mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
Merge patch series "Initial support for RK3576 UFS controller"
Shawn Lin <shawn.lin@rock-chips.com> says: This patchset adds initial UFS controller supprt for RK3576 SoC. Patch 1 is the dt-bindings. Patch 2-4 deal with rpm and spm support in advanced suggested by Ulf. Patch 5 exports two new APIs for host driver. Patch 6 and 7 are the host driver and dtsi support. Link: https://lore.kernel.org/r/1738736156-119203-1-git-send-email-shawn.lin@rock-chips.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
adc4fb9c81
265 changed files with 3682 additions and 1927 deletions
6
CREDITS
6
CREDITS
|
@ -2515,11 +2515,9 @@ D: SLS distribution
|
|||
D: Initial implementation of VC's, pty's and select()
|
||||
|
||||
N: Pavel Machek
|
||||
E: pavel@ucw.cz
|
||||
E: pavel@kernel.org
|
||||
P: 4096R/92DFCE96 4FA7 9EEF FCD4 C44F C585 B8C7 C060 2241 92DF CE96
|
||||
D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd,
|
||||
D: sun4/330 port, capabilities for elf, speedup for rm on ext2, USB,
|
||||
D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
|
||||
D: NBD, Sun4/330 port, USB, work on suspend-to-ram/disk,
|
||||
D: Altera SoCFPGA and Nokia N900 support.
|
||||
S: Czech Republic
|
||||
|
||||
|
|
|
@ -14,9 +14,8 @@ allOf:
|
|||
|
||||
description: |
|
||||
The Microchip LAN966x outband interrupt controller (OIC) maps the internal
|
||||
interrupt sources of the LAN966x device to an external interrupt.
|
||||
When the LAN966x device is used as a PCI device, the external interrupt is
|
||||
routed to the PCI interrupt.
|
||||
interrupt sources of the LAN966x device to a PCI interrupt when the LAN966x
|
||||
device is used as a PCI device.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
|
105
Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
Normal file
105
Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
Normal file
|
@ -0,0 +1,105 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/ufs/rockchip,rk3576-ufshc.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Rockchip UFS Host Controller
|
||||
|
||||
maintainers:
|
||||
- Shawn Lin <shawn.lin@rock-chips.com>
|
||||
|
||||
allOf:
|
||||
- $ref: ufs-common.yaml
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: rockchip,rk3576-ufshc
|
||||
|
||||
reg:
|
||||
maxItems: 5
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: hci
|
||||
- const: mphy
|
||||
- const: hci_grf
|
||||
- const: mphy_grf
|
||||
- const: hci_apb
|
||||
|
||||
clocks:
|
||||
maxItems: 4
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: core
|
||||
- const: pclk
|
||||
- const: pclk_mphy
|
||||
- const: ref_out
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 4
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: biu
|
||||
- const: sys
|
||||
- const: ufs
|
||||
- const: grf
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
description: |
|
||||
GPIO specifiers for host to reset the whole UFS device including PHY and
|
||||
memory. This gpio is active low and should choose the one whose high output
|
||||
voltage is lower than 1.5V based on the UFS spec.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- interrupts
|
||||
- power-domains
|
||||
- resets
|
||||
- reset-names
|
||||
- reset-gpios
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
|
||||
#include <dt-bindings/reset/rockchip,rk3576-cru.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/rockchip,rk3576-power.h>
|
||||
#include <dt-bindings/pinctrl/rockchip.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
ufshc: ufshc@2a2d0000 {
|
||||
compatible = "rockchip,rk3576-ufshc";
|
||||
reg = <0x0 0x2a2d0000 0x0 0x10000>,
|
||||
<0x0 0x2b040000 0x0 0x10000>,
|
||||
<0x0 0x2601f000 0x0 0x1000>,
|
||||
<0x0 0x2603c000 0x0 0x1000>,
|
||||
<0x0 0x2a2e0000 0x0 0x10000>;
|
||||
reg-names = "hci", "mphy", "hci_grf", "mphy_grf", "hci_apb";
|
||||
clocks = <&cru ACLK_UFS_SYS>, <&cru PCLK_USB_ROOT>, <&cru PCLK_MPHY>,
|
||||
<&cru CLK_REF_UFS_CLKOUT>;
|
||||
clock-names = "core", "pclk", "pclk_mphy", "ref_out";
|
||||
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&power RK3576_PD_USB>;
|
||||
resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>, <&cru SRST_A_UFS>,
|
||||
<&cru SRST_P_UFS_GRF>;
|
||||
reset-names = "biu", "sys", "ufs", "grf";
|
||||
reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
98
Documentation/filesystems/bcachefs/SubmittingPatches.rst
Normal file
98
Documentation/filesystems/bcachefs/SubmittingPatches.rst
Normal file
|
@ -0,0 +1,98 @@
|
|||
Submitting patches to bcachefs:
|
||||
===============================
|
||||
|
||||
Patches must be tested before being submitted, either with the xfstests suite
|
||||
[0], or the full bcachefs test suite in ktest [1], depending on what's being
|
||||
touched. Note that ktest wraps xfstests and will be an easier method to running
|
||||
it for most users; it includes single-command wrappers for all the mainstream
|
||||
in-kernel local filesystems.
|
||||
|
||||
Patches will undergo more testing after being merged (including
|
||||
lockdep/kasan/preempt/etc. variants), these are not generally required to be
|
||||
run by the submitter - but do put some thought into what you're changing and
|
||||
which tests might be relevant, e.g. are you dealing with tricky memory layout
|
||||
work? kasan, are you doing locking work? then lockdep; and ktest includes
|
||||
single-command variants for the debug build types you'll most likely need.
|
||||
|
||||
The exception to this rule is incomplete WIP/RFC patches: if you're working on
|
||||
something nontrivial, it's encouraged to send out a WIP patch to let people
|
||||
know what you're doing and make sure you're on the right track. Just make sure
|
||||
it includes a brief note as to what's done and what's incomplete, to avoid
|
||||
confusion.
|
||||
|
||||
Rigorous checkpatch.pl adherence is not required (many of its warnings are
|
||||
considered out of date), but try not to deviate too much without reason.
|
||||
|
||||
Focus on writing code that reads well and is organized well; code should be
|
||||
aesthetically pleasing.
|
||||
|
||||
CI:
|
||||
===
|
||||
|
||||
Instead of running your tests locally, when running the full test suite it's
|
||||
prefereable to let a server farm do it in parallel, and then have the results
|
||||
in a nice test dashboard (which can tell you which failures are new, and
|
||||
presents results in a git log view, avoiding the need for most bisecting).
|
||||
|
||||
That exists [2], and community members may request an account. If you work for
|
||||
a big tech company, you'll need to help out with server costs to get access -
|
||||
but the CI is not restricted to running bcachefs tests: it runs any ktest test
|
||||
(which generally makes it easy to wrap other tests that can run in qemu).
|
||||
|
||||
Other things to think about:
|
||||
============================
|
||||
|
||||
- How will we debug this code? Is there sufficient introspection to diagnose
|
||||
when something starts acting wonky on a user machine?
|
||||
|
||||
We don't necessarily need every single field of every data structure visible
|
||||
with introspection, but having the important fields of all the core data
|
||||
types wired up makes debugging drastically easier - a bit of thoughtful
|
||||
foresight greatly reduces the need to have people build custom kernels with
|
||||
debug patches.
|
||||
|
||||
More broadly, think about all the debug tooling that might be needed.
|
||||
|
||||
- Does it make the codebase more or less of a mess? Can we also try to do some
|
||||
organizing, too?
|
||||
|
||||
- Do new tests need to be written? New assertions? How do we know and verify
|
||||
that the code is correct, and what happens if something goes wrong?
|
||||
|
||||
We don't yet have automated code coverage analysis or easy fault injection -
|
||||
but for now, pretend we did and ask what they might tell us.
|
||||
|
||||
Assertions are hugely important, given that we don't yet have a systems
|
||||
language that can do ergonomic embedded correctness proofs. Hitting an assert
|
||||
in testing is much better than wandering off into undefined behaviour la-la
|
||||
land - use them. Use them judiciously, and not as a replacement for proper
|
||||
error handling, but use them.
|
||||
|
||||
- Does it need to be performance tested? Should we add new peformance counters?
|
||||
|
||||
bcachefs has a set of persistent runtime counters which can be viewed with
|
||||
the 'bcachefs fs top' command; this should give users a basic idea of what
|
||||
their filesystem is currently doing. If you're doing a new feature or looking
|
||||
at old code, think if anything should be added.
|
||||
|
||||
- If it's a new on disk format feature - have upgrades and downgrades been
|
||||
tested? (Automated tests exists but aren't in the CI, due to the hassle of
|
||||
disk image management; coordinate to have them run.)
|
||||
|
||||
Mailing list, IRC:
|
||||
==================
|
||||
|
||||
Patches should hit the list [3], but much discussion and code review happens on
|
||||
IRC as well [4]; many people appreciate the more conversational approach and
|
||||
quicker feedback.
|
||||
|
||||
Additionally, we have a lively user community doing excellent QA work, which
|
||||
exists primarily on IRC. Please make use of that resource; user feedback is
|
||||
important for any nontrivial feature, and documenting it in commit messages
|
||||
would be a good idea.
|
||||
|
||||
[0]: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
|
||||
[1]: https://evilpiepirate.org/git/ktest.git/
|
||||
[2]: https://evilpiepirate.org/~testdashboard/ci/
|
||||
[3]: linux-bcachefs@vger.kernel.org
|
||||
[4]: irc.oftc.net#bcache, #bcachefs-dev
|
|
@ -9,4 +9,5 @@ bcachefs Documentation
|
|||
:numbered:
|
||||
|
||||
CodingStyle
|
||||
SubmittingPatches
|
||||
errorcodes
|
||||
|
|
|
@ -1419,7 +1419,7 @@ fetch) is injected in the guest.
|
|||
S390:
|
||||
^^^^^
|
||||
|
||||
Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set.
|
||||
Returns -EINVAL or -EEXIST if the VM has the KVM_VM_S390_UCONTROL flag set.
|
||||
Returns -EINVAL if called on a protected VM.
|
||||
|
||||
4.36 KVM_SET_TSS_ADDR
|
||||
|
|
57
MAINTAINERS
57
MAINTAINERS
|
@ -2209,7 +2209,6 @@ F: sound/soc/codecs/cs42l84.*
|
|||
F: sound/soc/codecs/ssm3515.c
|
||||
|
||||
ARM/APPLE MACHINE SUPPORT
|
||||
M: Hector Martin <marcan@marcan.st>
|
||||
M: Sven Peter <sven@svenpeter.dev>
|
||||
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
|
||||
L: asahi@lists.linux.dev
|
||||
|
@ -3955,6 +3954,7 @@ M: Kent Overstreet <kent.overstreet@linux.dev>
|
|||
L: linux-bcachefs@vger.kernel.org
|
||||
S: Supported
|
||||
C: irc://irc.oftc.net/bcache
|
||||
P: Documentation/filesystems/bcachefs/SubmittingPatches.rst
|
||||
T: git https://evilpiepirate.org/git/bcachefs.git
|
||||
F: fs/bcachefs/
|
||||
F: Documentation/filesystems/bcachefs/
|
||||
|
@ -9409,7 +9409,7 @@ F: fs/freevxfs/
|
|||
|
||||
FREEZER
|
||||
M: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: Pavel Machek <pavel@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/freezing-of-tasks.rst
|
||||
|
@ -9869,7 +9869,7 @@ S: Maintained
|
|||
F: drivers/staging/gpib/
|
||||
|
||||
GPIO ACPI SUPPORT
|
||||
M: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
M: Mika Westerberg <westeri@kernel.org>
|
||||
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
L: linux-acpi@vger.kernel.org
|
||||
|
@ -10244,7 +10244,7 @@ F: drivers/video/fbdev/hgafb.c
|
|||
|
||||
HIBERNATION (aka Software Suspend, aka swsusp)
|
||||
M: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: Pavel Machek <pavel@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
B: https://bugzilla.kernel.org
|
||||
|
@ -13115,8 +13115,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
|
|||
F: scripts/leaking_addresses.pl
|
||||
|
||||
LED SUBSYSTEM
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: Lee Jones <lee@kernel.org>
|
||||
M: Pavel Machek <pavel@kernel.org>
|
||||
L: linux-leds@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/leds.git
|
||||
|
@ -16453,6 +16453,22 @@ F: include/net/dsa.h
|
|||
F: net/dsa/
|
||||
F: tools/testing/selftests/drivers/net/dsa/
|
||||
|
||||
NETWORKING [ETHTOOL]
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
F: Documentation/netlink/specs/ethtool.yaml
|
||||
F: Documentation/networking/ethtool-netlink.rst
|
||||
F: include/linux/ethtool*
|
||||
F: include/uapi/linux/ethtool*
|
||||
F: net/ethtool/
|
||||
F: tools/testing/selftests/drivers/net/*/ethtool*
|
||||
|
||||
NETWORKING [ETHTOOL CABLE TEST]
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
F: net/ethtool/cabletest.c
|
||||
F: tools/testing/selftests/drivers/net/*/ethtool*
|
||||
K: cable_test
|
||||
|
||||
NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
|
@ -16612,6 +16628,7 @@ F: tools/testing/selftests/net/mptcp/
|
|||
NETWORKING [TCP]
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
M: Neal Cardwell <ncardwell@google.com>
|
||||
R: Kuniyuki Iwashima <kuniyu@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/net_cachelines/tcp_sock.rst
|
||||
|
@ -16639,6 +16656,31 @@ F: include/net/tls.h
|
|||
F: include/uapi/linux/tls.h
|
||||
F: net/tls/*
|
||||
|
||||
NETWORKING [SOCKETS]
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
M: Kuniyuki Iwashima <kuniyu@amazon.com>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
M: Willem de Bruijn <willemb@google.com>
|
||||
S: Maintained
|
||||
F: include/linux/sock_diag.h
|
||||
F: include/linux/socket.h
|
||||
F: include/linux/sockptr.h
|
||||
F: include/net/sock.h
|
||||
F: include/net/sock_reuseport.h
|
||||
F: include/uapi/linux/socket.h
|
||||
F: net/core/*sock*
|
||||
F: net/core/scm.c
|
||||
F: net/socket.c
|
||||
|
||||
NETWORKING [UNIX SOCKETS]
|
||||
M: Kuniyuki Iwashima <kuniyu@amazon.com>
|
||||
S: Maintained
|
||||
F: include/net/af_unix.h
|
||||
F: include/net/netns/unix.h
|
||||
F: include/uapi/linux/unix_diag.h
|
||||
F: net/unix/
|
||||
F: tools/testing/selftests/net/af_unix/
|
||||
|
||||
NETXEN (1/10) GbE SUPPORT
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
M: Rahul Verma <rahulv@marvell.com>
|
||||
|
@ -16772,7 +16814,7 @@ F: include/linux/tick.h
|
|||
F: kernel/time/tick*.*
|
||||
|
||||
NOKIA N900 CAMERA SUPPORT (ET8EK8 SENSOR, AD5820 FOCUS)
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: Pavel Machek <pavel@kernel.org>
|
||||
M: Sakari Ailus <sakari.ailus@iki.fi>
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -17704,6 +17746,7 @@ L: netdev@vger.kernel.org
|
|||
L: dev@openvswitch.org
|
||||
S: Maintained
|
||||
W: http://openvswitch.org
|
||||
F: Documentation/networking/openvswitch.rst
|
||||
F: include/uapi/linux/openvswitch.h
|
||||
F: net/openvswitch/
|
||||
F: tools/testing/selftests/net/openvswitch/
|
||||
|
@ -22797,7 +22840,7 @@ F: drivers/sh/
|
|||
SUSPEND TO RAM
|
||||
M: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
M: Len Brown <len.brown@intel.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: Pavel Machek <pavel@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
B: https://bugzilla.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
|
||||
#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
|
@ -137,10 +137,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
|
|||
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
|
||||
})
|
||||
|
||||
#define SET_PERSONALITY(EX) \
|
||||
set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
|
||||
? PER_LINUX_32BIT : PER_LINUX)
|
||||
|
||||
extern int alpha_l1i_cacheshape;
|
||||
extern int alpha_l1d_cacheshape;
|
||||
extern int alpha_l2_cacheshape;
|
||||
|
|
|
@ -360,7 +360,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
|
|||
|
||||
extern void paging_init(void);
|
||||
|
||||
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
|
||||
/* We have our own get_unmapped_area */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
#endif /* _ALPHA_PGTABLE_H */
|
||||
|
|
|
@ -8,23 +8,19 @@
|
|||
#ifndef __ASM_ALPHA_PROCESSOR_H
|
||||
#define __ASM_ALPHA_PROCESSOR_H
|
||||
|
||||
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
|
||||
|
||||
/*
|
||||
* We have a 42-bit user address space: 4TB user VM...
|
||||
*/
|
||||
#define TASK_SIZE (0x40000000000UL)
|
||||
|
||||
#define STACK_TOP \
|
||||
(current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
|
||||
#define STACK_TOP (0x00120000000UL)
|
||||
|
||||
#define STACK_TOP_MAX 0x00120000000UL
|
||||
|
||||
/* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE \
|
||||
((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
|
||||
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
|
||||
|
||||
/* This is dead. Everything has been moved to thread_info. */
|
||||
struct thread_struct { };
|
||||
|
|
|
@ -1210,8 +1210,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Get an address range which is currently unmapped. Similar to the
|
||||
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
|
||||
/* Get an address range which is currently unmapped. */
|
||||
|
||||
static unsigned long
|
||||
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
|
||||
|
@ -1230,13 +1229,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags, vm_flags_t vm_flags)
|
||||
{
|
||||
unsigned long limit;
|
||||
|
||||
/* "32 bit" actually means 31 bit, since pointers sign extend. */
|
||||
if (current->personality & ADDR_LIMIT_32BIT)
|
||||
limit = 0x80000000;
|
||||
else
|
||||
limit = TASK_SIZE;
|
||||
unsigned long limit = TASK_SIZE;
|
||||
|
||||
if (len > limit)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1221,6 +1221,30 @@
|
|||
};
|
||||
};
|
||||
|
||||
ufshc: ufshc@2a2d0000 {
|
||||
compatible = "rockchip,rk3576-ufshc";
|
||||
reg = <0x0 0x2a2d0000 0x0 0x10000>,
|
||||
<0x0 0x2b040000 0x0 0x10000>,
|
||||
<0x0 0x2601f000 0x0 0x1000>,
|
||||
<0x0 0x2603c000 0x0 0x1000>,
|
||||
<0x0 0x2a2e0000 0x0 0x10000>;
|
||||
reg-names = "hci", "mphy", "hci_grf", "mphy_grf", "hci_apb";
|
||||
clocks = <&cru ACLK_UFS_SYS>, <&cru PCLK_USB_ROOT>, <&cru PCLK_MPHY>,
|
||||
<&cru CLK_REF_UFS_CLKOUT>;
|
||||
clock-names = "core", "pclk", "pclk_mphy", "ref_out";
|
||||
assigned-clocks = <&cru CLK_REF_OSC_MPHY>;
|
||||
assigned-clock-parents = <&cru CLK_REF_MPHY_26M>;
|
||||
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&power RK3576_PD_USB>;
|
||||
pinctrl-0 = <&ufs_refclk>;
|
||||
pinctrl-names = "default";
|
||||
resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>,
|
||||
<&cru SRST_A_UFS>, <&cru SRST_P_UFS_GRF>;
|
||||
reset-names = "biu", "sys", "ufs", "grf";
|
||||
reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sdmmc: mmc@2a310000 {
|
||||
compatible = "rockchip,rk3576-dw-mshc";
|
||||
reg = <0x0 0x2a310000 0x0 0x4000>;
|
||||
|
|
|
@ -471,10 +471,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
|||
|
||||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire != ctx->irq.level) {
|
||||
if (should_fire != ctx->irq.level)
|
||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
kvm_timer_update_status(ctx, should_fire);
|
||||
|
||||
|
@ -761,21 +759,6 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
|
|||
timer_irq(map->direct_ptimer),
|
||||
&arch_timer_irq_ops);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
/*
|
||||
* The virtual offset behaviour is "interesting", as it
|
||||
* always applies when HCR_EL2.E2H==0, but only when
|
||||
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
|
||||
* track E2H when putting the HV timer in "direct" mode.
|
||||
*/
|
||||
if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
|
||||
struct arch_timer_offset *offs = &map->direct_vtimer->offset;
|
||||
|
||||
if (vcpu_el2_e2h_is_set(vcpu))
|
||||
offs->vcpu_offset = NULL;
|
||||
else
|
||||
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -976,31 +959,21 @@ void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
|
|||
* which allows trapping of the timer registers even with NV2.
|
||||
* Still, this is still worse than FEAT_NV on its own. Meh.
|
||||
*/
|
||||
if (!vcpu_el2_e2h_is_set(vcpu)) {
|
||||
if (cpus_have_final_cap(ARM64_HAS_ECV))
|
||||
return;
|
||||
|
||||
/*
|
||||
* A non-VHE guest hypervisor doesn't have any direct access
|
||||
* to its timers: the EL2 registers trap (and the HW is
|
||||
* fully emulated), while the EL0 registers access memory
|
||||
* despite the access being notionally direct. Boo.
|
||||
*
|
||||
* We update the hardware timer registers with the
|
||||
* latest value written by the guest to the VNCR page
|
||||
* and let the hardware take care of the rest.
|
||||
*/
|
||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL);
|
||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
|
||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL);
|
||||
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL);
|
||||
} else {
|
||||
if (!cpus_have_final_cap(ARM64_HAS_ECV)) {
|
||||
/*
|
||||
* For a VHE guest hypervisor, the EL2 state is directly
|
||||
* stored in the host EL1 timers, while the emulated EL0
|
||||
* stored in the host EL1 timers, while the emulated EL1
|
||||
* state is stored in the VNCR page. The latter could have
|
||||
* been updated behind our back, and we must reset the
|
||||
* emulation of the timers.
|
||||
*
|
||||
* A non-VHE guest hypervisor doesn't have any direct access
|
||||
* to its timers: the EL2 registers trap despite being
|
||||
* notionally direct (we use the EL1 HW, as for VHE), while
|
||||
* the EL1 registers access memory.
|
||||
*
|
||||
* In both cases, process the emulated timers on each guest
|
||||
* exit. Boo.
|
||||
*/
|
||||
struct timer_map map;
|
||||
get_timer_map(vcpu, &map);
|
||||
|
|
|
@ -2290,6 +2290,19 @@ static int __init init_subsystems(void)
|
|||
break;
|
||||
case -ENODEV:
|
||||
case -ENXIO:
|
||||
/*
|
||||
* No VGIC? No pKVM for you.
|
||||
*
|
||||
* Protected mode assumes that VGICv3 is present, so no point
|
||||
* in trying to hobble along if vgic initialization fails.
|
||||
*/
|
||||
if (is_protected_kvm_enabled())
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Otherwise, userspace could choose to implement a GIC for its
|
||||
* guest on non-cooperative hardware.
|
||||
*/
|
||||
vgic_present = false;
|
||||
err = 0;
|
||||
break;
|
||||
|
@ -2400,6 +2413,13 @@ static void kvm_hyp_init_symbols(void)
|
|||
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
|
||||
kvm_nvhe_sym(__icache_flags) = __icache_flags;
|
||||
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
|
||||
|
||||
/*
|
||||
* Flush entire BSS since part of its data containing init symbols is read
|
||||
* while the MMU is off.
|
||||
*/
|
||||
kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
|
||||
kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
|
||||
}
|
||||
|
||||
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
|
|
|
@ -91,11 +91,34 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
|||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
||||
static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner;
|
||||
|
||||
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||
hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state;
|
||||
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||
hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state;
|
||||
}
|
||||
|
||||
static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||
host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state;
|
||||
else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
|
||||
host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state;
|
||||
}
|
||||
|
||||
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
fpsimd_sve_flush();
|
||||
flush_debug_state(hyp_vcpu);
|
||||
|
||||
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
||||
|
||||
|
@ -123,6 +146,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|||
unsigned int i;
|
||||
|
||||
fpsimd_sve_sync(&hyp_vcpu->vcpu);
|
||||
sync_debug_state(hyp_vcpu);
|
||||
|
||||
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
||||
|
||||
|
|
|
@ -67,26 +67,27 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
|
|||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
swap(kvm->arch.nested_mmus, tmp);
|
||||
|
||||
/*
|
||||
* If we went through a realocation, adjust the MMU back-pointers in
|
||||
* the previously initialised kvm_pgtable structures.
|
||||
*/
|
||||
if (kvm->arch.nested_mmus != tmp)
|
||||
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
|
||||
tmp[i].pgt->mmu = &tmp[i];
|
||||
kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
|
||||
|
||||
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
|
||||
ret = init_nested_s2_mmu(kvm, &tmp[i]);
|
||||
ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
|
||||
|
||||
if (ret) {
|
||||
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
|
||||
kvm_free_stage2_pgd(&tmp[i]);
|
||||
kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
kvm->arch.nested_mmus_size = num_mmus;
|
||||
kvm->arch.nested_mmus = tmp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1452,6 +1452,16 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_hv_timer(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
|
||||
return access_arch_timer(vcpu, p, r);
|
||||
}
|
||||
|
||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||
s64 new, s64 cur)
|
||||
{
|
||||
|
@ -3103,9 +3113,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer },
|
||||
EL2_REG(CNTHV_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||
EL2_REG(CNTHV_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
|
||||
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
|
||||
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
|
||||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
|
|||
srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
|
||||
cascade_virq = msi_data->cascade_array[srs]->virq;
|
||||
|
||||
seq_printf(p, " fsl-msi-%d", cascade_virq);
|
||||
seq_printf(p, "fsl-msi-%d", cascade_virq);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
/**
|
||||
* struct gmap_struct - guest address space
|
||||
* @list: list head for the mm->context gmap list
|
||||
* @crst_list: list of all crst tables used in the guest address space
|
||||
* @mm: pointer to the parent mm_struct
|
||||
* @guest_to_host: radix tree with guest to host address translation
|
||||
* @host_to_guest: radix tree with pointer to segment table entries
|
||||
|
@ -35,7 +34,6 @@
|
|||
* @guest_handle: protected virtual machine handle for the ultravisor
|
||||
* @host_to_rmap: radix tree with gmap_rmap lists
|
||||
* @children: list of shadow gmap structures
|
||||
* @pt_list: list of all page tables used in the shadow guest address space
|
||||
* @shadow_lock: spinlock to protect the shadow gmap list
|
||||
* @parent: pointer to the parent gmap for shadow guest address spaces
|
||||
* @orig_asce: ASCE for which the shadow page table has been created
|
||||
|
@ -45,7 +43,6 @@
|
|||
*/
|
||||
struct gmap {
|
||||
struct list_head list;
|
||||
struct list_head crst_list;
|
||||
struct mm_struct *mm;
|
||||
struct radix_tree_root guest_to_host;
|
||||
struct radix_tree_root host_to_guest;
|
||||
|
@ -61,7 +58,6 @@ struct gmap {
|
|||
/* Additional data for shadow guest address spaces */
|
||||
struct radix_tree_root host_to_rmap;
|
||||
struct list_head children;
|
||||
struct list_head pt_list;
|
||||
spinlock_t shadow_lock;
|
||||
struct gmap *parent;
|
||||
unsigned long orig_asce;
|
||||
|
@ -106,23 +102,21 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
|
|||
void gmap_remove(struct gmap *gmap);
|
||||
struct gmap *gmap_get(struct gmap *gmap);
|
||||
void gmap_put(struct gmap *gmap);
|
||||
void gmap_free(struct gmap *gmap);
|
||||
struct gmap *gmap_alloc(unsigned long limit);
|
||||
|
||||
int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
||||
unsigned long to, unsigned long len);
|
||||
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
|
||||
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
|
||||
unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
|
||||
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
|
||||
int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
|
||||
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
|
||||
void __gmap_zap(struct gmap *, unsigned long gaddr);
|
||||
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
|
||||
|
||||
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
|
||||
|
||||
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
|
||||
int edat_level);
|
||||
int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
|
||||
void gmap_unshadow(struct gmap *sg);
|
||||
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
int fake);
|
||||
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
|
@ -131,24 +125,22 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
|||
int fake);
|
||||
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
||||
int fake);
|
||||
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
|
||||
unsigned long *pgt, int *dat_protection, int *fake);
|
||||
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
|
||||
|
||||
void gmap_register_pte_notifier(struct gmap_notifier *);
|
||||
void gmap_unregister_pte_notifier(struct gmap_notifier *);
|
||||
|
||||
int gmap_mprotect_notify(struct gmap *, unsigned long start,
|
||||
unsigned long len, int prot);
|
||||
int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
|
||||
|
||||
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
|
||||
unsigned long gaddr, unsigned long vmaddr);
|
||||
int s390_disable_cow_sharing(void);
|
||||
void s390_unlist_old_asce(struct gmap *gmap);
|
||||
int s390_replace_asce(struct gmap *gmap);
|
||||
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
|
||||
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, bool interruptible);
|
||||
int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
|
||||
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
|
||||
|
||||
/**
|
||||
* s390_uv_destroy_range - Destroy a range of pages in the given mm.
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define KVM_S390_ESCA_CPU_SLOTS 248
|
||||
#define KVM_MAX_VCPUS 255
|
||||
|
||||
#define KVM_INTERNAL_MEM_SLOTS 1
|
||||
|
||||
/*
|
||||
* These seem to be used for allocating ->chip in the routing table, which we
|
||||
* don't use. 1 is as small as we can get to reduce the needed memory. If we
|
||||
|
@ -931,12 +933,14 @@ struct sie_page2 {
|
|||
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
|
||||
};
|
||||
|
||||
struct vsie_page;
|
||||
|
||||
struct kvm_s390_vsie {
|
||||
struct mutex mutex;
|
||||
struct radix_tree_root addr_to_page;
|
||||
int page_count;
|
||||
int next;
|
||||
struct page *pages[KVM_MAX_VCPUS];
|
||||
struct vsie_page *pages[KVM_MAX_VCPUS];
|
||||
};
|
||||
|
||||
struct kvm_s390_gisa_iam {
|
||||
|
|
|
@ -420,9 +420,10 @@ void setup_protection_map(void);
|
|||
#define PGSTE_HC_BIT 0x0020000000000000UL
|
||||
#define PGSTE_GR_BIT 0x0004000000000000UL
|
||||
#define PGSTE_GC_BIT 0x0002000000000000UL
|
||||
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
|
||||
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
|
||||
#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
|
||||
#define PGSTE_ST2_MASK 0x0000ffff00000000UL
|
||||
#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
|
||||
#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
|
||||
#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
|
||||
|
||||
/* Guest Page State used for virtualization */
|
||||
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
|
||||
|
@ -2007,4 +2008,18 @@ extern void s390_reset_cmma(struct mm_struct *mm);
|
|||
#define pmd_pgtable(pmd) \
|
||||
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
||||
|
||||
static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
|
||||
{
|
||||
unsigned long *pgstes, res;
|
||||
|
||||
pgstes = pgt + _PAGE_ENTRIES;
|
||||
|
||||
res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
|
||||
res |= pgstes[1] & PGSTE_ST2_MASK;
|
||||
res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
|
||||
res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /* _S390_PAGE_H */
|
||||
|
|
|
@ -628,12 +628,12 @@ static inline int is_prot_virt_host(void)
|
|||
}
|
||||
|
||||
int uv_pin_shared(unsigned long paddr);
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
|
||||
int uv_destroy_folio(struct folio *folio);
|
||||
int uv_destroy_pte(pte_t pte);
|
||||
int uv_convert_from_secure_pte(pte_t pte);
|
||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
||||
int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb);
|
||||
int uv_convert_from_secure(unsigned long paddr);
|
||||
int uv_convert_from_secure_folio(struct folio *folio);
|
||||
|
||||
void setup_uv(void);
|
||||
|
||||
|
|
|
@ -19,19 +19,6 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/uv.h>
|
||||
|
||||
#if !IS_ENABLED(CONFIG_KVM)
|
||||
unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gmap_fault(struct gmap *gmap, unsigned long gaddr,
|
||||
unsigned int fault_flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
|
||||
int __bootdata_preserved(prot_virt_guest);
|
||||
EXPORT_SYMBOL(prot_virt_guest);
|
||||
|
@ -159,6 +146,7 @@ int uv_destroy_folio(struct folio *folio)
|
|||
folio_put(folio);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(uv_destroy_folio);
|
||||
|
||||
/*
|
||||
* The present PTE still indirectly holds a folio reference through the mapping.
|
||||
|
@ -175,7 +163,7 @@ int uv_destroy_pte(pte_t pte)
|
|||
*
|
||||
* @paddr: Absolute host address of page to be exported
|
||||
*/
|
||||
static int uv_convert_from_secure(unsigned long paddr)
|
||||
int uv_convert_from_secure(unsigned long paddr)
|
||||
{
|
||||
struct uv_cb_cfs uvcb = {
|
||||
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
|
||||
|
@ -187,11 +175,12 @@ static int uv_convert_from_secure(unsigned long paddr)
|
|||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_convert_from_secure);
|
||||
|
||||
/*
|
||||
* The caller must already hold a reference to the folio.
|
||||
*/
|
||||
static int uv_convert_from_secure_folio(struct folio *folio)
|
||||
int uv_convert_from_secure_folio(struct folio *folio)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
@ -206,6 +195,7 @@ static int uv_convert_from_secure_folio(struct folio *folio)
|
|||
folio_put(folio);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
|
||||
|
||||
/*
|
||||
* The present PTE still indirectly holds a folio reference through the mapping.
|
||||
|
@ -237,13 +227,33 @@ static int expected_folio_refs(struct folio *folio)
|
|||
return res;
|
||||
}
|
||||
|
||||
static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
||||
/**
|
||||
* make_folio_secure() - make a folio secure
|
||||
* @folio: the folio to make secure
|
||||
* @uvcb: the uvcb that describes the UVC to be used
|
||||
*
|
||||
* The folio @folio will be made secure if possible, @uvcb will be passed
|
||||
* as-is to the UVC.
|
||||
*
|
||||
* Return: 0 on success;
|
||||
* -EBUSY if the folio is in writeback or has too many references;
|
||||
* -E2BIG if the folio is large;
|
||||
* -EAGAIN if the UVC needs to be attempted again;
|
||||
* -ENXIO if the address is not mapped;
|
||||
* -EINVAL if the UVC failed for other reasons.
|
||||
*
|
||||
* Context: The caller must hold exactly one extra reference on the folio
|
||||
* (it's the same logic as split_folio())
|
||||
*/
|
||||
int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
||||
{
|
||||
int expected, cc = 0;
|
||||
|
||||
if (folio_test_large(folio))
|
||||
return -E2BIG;
|
||||
if (folio_test_writeback(folio))
|
||||
return -EAGAIN;
|
||||
expected = expected_folio_refs(folio);
|
||||
return -EBUSY;
|
||||
expected = expected_folio_refs(folio) + 1;
|
||||
if (!folio_ref_freeze(folio, expected))
|
||||
return -EBUSY;
|
||||
set_bit(PG_arch_1, &folio->flags);
|
||||
|
@ -267,251 +277,7 @@ static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
|
|||
return -EAGAIN;
|
||||
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* should_export_before_import - Determine whether an export is needed
|
||||
* before an import-like operation
|
||||
* @uvcb: the Ultravisor control block of the UVC to be performed
|
||||
* @mm: the mm of the process
|
||||
*
|
||||
* Returns whether an export is needed before every import-like operation.
|
||||
* This is needed for shared pages, which don't trigger a secure storage
|
||||
* exception when accessed from a different guest.
|
||||
*
|
||||
* Although considered as one, the Unpin Page UVC is not an actual import,
|
||||
* so it is not affected.
|
||||
*
|
||||
* No export is needed also when there is only one protected VM, because the
|
||||
* page cannot belong to the wrong VM in that case (there is no "other VM"
|
||||
* it can belong to).
|
||||
*
|
||||
* Return: true if an export is needed before every import, otherwise false.
|
||||
*/
|
||||
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The misc feature indicates, among other things, that importing a
|
||||
* shared page from a different protected VM will automatically also
|
||||
* transfer its ownership.
|
||||
*/
|
||||
if (uv_has_feature(BIT_UV_FEAT_MISC))
|
||||
return false;
|
||||
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
|
||||
return false;
|
||||
return atomic_read(&mm->context.protected_count) > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drain LRU caches: the local one on first invocation and the ones of all
|
||||
* CPUs on successive invocations. Returns "true" on the first invocation.
|
||||
*/
|
||||
static bool drain_lru(bool *drain_lru_called)
|
||||
{
|
||||
/*
|
||||
* If we have tried a local drain and the folio refcount
|
||||
* still does not match our expected safe value, try with a
|
||||
* system wide drain. This is needed if the pagevecs holding
|
||||
* the page are on a different CPU.
|
||||
*/
|
||||
if (*drain_lru_called) {
|
||||
lru_add_drain_all();
|
||||
/* We give up here, don't retry immediately. */
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* We are here if the folio refcount does not match the
|
||||
* expected safe value. The main culprits are usually
|
||||
* pagevecs. With lru_add_drain() we drain the pagevecs
|
||||
* on the local CPU so that hopefully the refcount will
|
||||
* reach the expected safe value.
|
||||
*/
|
||||
lru_add_drain();
|
||||
*drain_lru_called = true;
|
||||
/* The caller should try again immediately */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Requests the Ultravisor to make a page accessible to a guest.
|
||||
* If it's brought in the first time, it will be cleared. If
|
||||
* it has been exported before, it will be decrypted and integrity
|
||||
* checked.
|
||||
*/
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
bool drain_lru_called = false;
|
||||
spinlock_t *ptelock;
|
||||
unsigned long uaddr;
|
||||
struct folio *folio;
|
||||
pte_t *ptep;
|
||||
int rc;
|
||||
|
||||
again:
|
||||
rc = -EFAULT;
|
||||
mmap_read_lock(gmap->mm);
|
||||
|
||||
uaddr = __gmap_translate(gmap, gaddr);
|
||||
if (IS_ERR_VALUE(uaddr))
|
||||
goto out;
|
||||
vma = vma_lookup(gmap->mm, uaddr);
|
||||
if (!vma)
|
||||
goto out;
|
||||
/*
|
||||
* Secure pages cannot be huge and userspace should not combine both.
|
||||
* In case userspace does it anyway this will result in an -EFAULT for
|
||||
* the unpack. The guest is thus never reaching secure mode. If
|
||||
* userspace is playing dirty tricky with mapping huge pages later
|
||||
* on this will result in a segmentation fault.
|
||||
*/
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
goto out;
|
||||
|
||||
rc = -ENXIO;
|
||||
ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
|
||||
if (!ptep)
|
||||
goto out;
|
||||
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
|
||||
folio = page_folio(pte_page(*ptep));
|
||||
rc = -EAGAIN;
|
||||
if (folio_test_large(folio)) {
|
||||
rc = -E2BIG;
|
||||
} else if (folio_trylock(folio)) {
|
||||
if (should_export_before_import(uvcb, gmap->mm))
|
||||
uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
|
||||
rc = make_folio_secure(folio, uvcb);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Once we drop the PTL, the folio may get unmapped and
|
||||
* freed immediately. We need a temporary reference.
|
||||
*/
|
||||
if (rc == -EAGAIN || rc == -E2BIG)
|
||||
folio_get(folio);
|
||||
}
|
||||
pte_unmap_unlock(ptep, ptelock);
|
||||
out:
|
||||
mmap_read_unlock(gmap->mm);
|
||||
|
||||
switch (rc) {
|
||||
case -E2BIG:
|
||||
folio_lock(folio);
|
||||
rc = split_folio(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
/* Splitting succeeded, try again immediately. */
|
||||
goto again;
|
||||
case -EAGAIN:
|
||||
/* Additional folio references. */
|
||||
if (drain_lru(&drain_lru_called))
|
||||
goto again;
|
||||
return -EAGAIN;
|
||||
case -EBUSY:
|
||||
/* Unexpected race. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -ENXIO;
|
||||
case -EAGAIN:
|
||||
/*
|
||||
* If we are here because the UVC returned busy or partial
|
||||
* completion, this is just a useless check, but it is safe.
|
||||
*/
|
||||
folio_wait_writeback(folio);
|
||||
folio_put(folio);
|
||||
return -EAGAIN;
|
||||
case -EBUSY:
|
||||
/* Additional folio references. */
|
||||
if (drain_lru(&drain_lru_called))
|
||||
goto again;
|
||||
return -EAGAIN;
|
||||
case -ENXIO:
|
||||
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
|
||||
return -EFAULT;
|
||||
return -EAGAIN;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gmap_make_secure);
|
||||
|
||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
|
||||
{
|
||||
struct uv_cb_cts uvcb = {
|
||||
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
|
||||
.header.len = sizeof(uvcb),
|
||||
.guest_handle = gmap->guest_handle,
|
||||
.gaddr = gaddr,
|
||||
};
|
||||
|
||||
return gmap_make_secure(gmap, gaddr, &uvcb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
|
||||
|
||||
/**
|
||||
* gmap_destroy_page - Destroy a guest page.
|
||||
* @gmap: the gmap of the guest
|
||||
* @gaddr: the guest address to destroy
|
||||
*
|
||||
* An attempt will be made to destroy the given guest page. If the attempt
|
||||
* fails, an attempt is made to export the page. If both attempts fail, an
|
||||
* appropriate error is returned.
|
||||
*/
|
||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct folio_walk fw;
|
||||
unsigned long uaddr;
|
||||
struct folio *folio;
|
||||
int rc;
|
||||
|
||||
rc = -EFAULT;
|
||||
mmap_read_lock(gmap->mm);
|
||||
|
||||
uaddr = __gmap_translate(gmap, gaddr);
|
||||
if (IS_ERR_VALUE(uaddr))
|
||||
goto out;
|
||||
vma = vma_lookup(gmap->mm, uaddr);
|
||||
if (!vma)
|
||||
goto out;
|
||||
/*
|
||||
* Huge pages should not be able to become secure
|
||||
*/
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
goto out;
|
||||
|
||||
rc = 0;
|
||||
folio = folio_walk_start(&fw, vma, uaddr, 0);
|
||||
if (!folio)
|
||||
goto out;
|
||||
/*
|
||||
* See gmap_make_secure(): large folios cannot be secure. Small
|
||||
* folio implies FW_LEVEL_PTE.
|
||||
*/
|
||||
if (folio_test_large(folio) || !pte_write(fw.pte))
|
||||
goto out_walk_end;
|
||||
rc = uv_destroy_folio(folio);
|
||||
/*
|
||||
* Fault handlers can race; it is possible that two CPUs will fault
|
||||
* on the same secure page. One CPU can destroy the page, reboot,
|
||||
* re-enter secure mode and import it, while the second CPU was
|
||||
* stuck at the beginning of the handler. At some point the second
|
||||
* CPU will be able to progress, and it will not be able to destroy
|
||||
* the page. In that case we do not want to terminate the process,
|
||||
* we instead try to export the page.
|
||||
*/
|
||||
if (rc)
|
||||
rc = uv_convert_from_secure_folio(folio);
|
||||
out_walk_end:
|
||||
folio_walk_end(&fw, vma);
|
||||
out:
|
||||
mmap_read_unlock(gmap->mm);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gmap_destroy_page);
|
||||
EXPORT_SYMBOL_GPL(make_folio_secure);
|
||||
|
||||
/*
|
||||
* To be called with the folio locked or with an extra reference! This will
|
||||
|
|
|
@ -8,7 +8,7 @@ include $(srctree)/virt/kvm/Makefile.kvm
|
|||
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
|
||||
|
||||
kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
|
||||
kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
|
||||
kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap.o gmap-vsie.o
|
||||
|
||||
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <asm/gmap.h>
|
||||
#include <asm/dat-bits.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gmap.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
/*
|
||||
|
@ -1392,6 +1393,44 @@ shadow_pgt:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* shadow_pgt_lookup() - find a shadow page table
|
||||
* @sg: pointer to the shadow guest address space structure
|
||||
* @saddr: the address in the shadow aguest address space
|
||||
* @pgt: parent gmap address of the page table to get shadowed
|
||||
* @dat_protection: if the pgtable is marked as protected by dat
|
||||
* @fake: pgt references contiguous guest memory block, not a pgtable
|
||||
*
|
||||
* Returns 0 if the shadow page table was found and -EAGAIN if the page
|
||||
* table was not found.
|
||||
*
|
||||
* Called with sg->mm->mmap_lock in read.
|
||||
*/
|
||||
static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
|
||||
int *dat_protection, int *fake)
|
||||
{
|
||||
unsigned long pt_index;
|
||||
unsigned long *table;
|
||||
struct page *page;
|
||||
int rc;
|
||||
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
|
||||
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
|
||||
/* Shadow page tables are full pages (pte+pgste) */
|
||||
page = pfn_to_page(*table >> PAGE_SHIFT);
|
||||
pt_index = gmap_pgste_get_pgt_addr(page_to_virt(page));
|
||||
*pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE;
|
||||
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
|
||||
*fake = !!(pt_index & GMAP_SHADOW_FAKE_TABLE);
|
||||
rc = 0;
|
||||
} else {
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
spin_unlock(&sg->guest_table_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_shadow_fault - handle fault on a shadow page table
|
||||
* @vcpu: virtual cpu
|
||||
|
@ -1415,6 +1454,9 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
|||
int dat_protection, fake;
|
||||
int rc;
|
||||
|
||||
if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_read_lock(sg->mm);
|
||||
/*
|
||||
* We don't want any guest-2 tables to change - so the parent
|
||||
|
@ -1423,7 +1465,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
|||
*/
|
||||
ipte_lock(vcpu->kvm);
|
||||
|
||||
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
|
||||
rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
|
||||
if (rc)
|
||||
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
|
||||
&fake);
|
||||
|
|
142
arch/s390/kvm/gmap-vsie.c
Normal file
142
arch/s390/kvm/gmap-vsie.c
Normal file
|
@ -0,0 +1,142 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Guest memory management for KVM/s390 nested VMs.
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2020, 2024
|
||||
*
|
||||
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* David Hildenbrand <david@redhat.com>
|
||||
* Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mman.h>
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/uv.h>
|
||||
|
||||
#include "kvm-s390.h"
|
||||
#include "gmap.h"
|
||||
|
||||
/**
|
||||
* gmap_find_shadow - find a specific asce in the list of shadow tables
|
||||
* @parent: pointer to the parent gmap
|
||||
* @asce: ASCE for which the shadow table is created
|
||||
* @edat_level: edat level to be used for the shadow translation
|
||||
*
|
||||
* Returns the pointer to a gmap if a shadow table with the given asce is
|
||||
* already available, ERR_PTR(-EAGAIN) if another one is just being created,
|
||||
* otherwise NULL
|
||||
*
|
||||
* Context: Called with parent->shadow_lock held
|
||||
*/
|
||||
static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce, int edat_level)
|
||||
{
|
||||
struct gmap *sg;
|
||||
|
||||
lockdep_assert_held(&parent->shadow_lock);
|
||||
list_for_each_entry(sg, &parent->children, list) {
|
||||
if (!gmap_shadow_valid(sg, asce, edat_level))
|
||||
continue;
|
||||
if (!sg->initialized)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
refcount_inc(&sg->ref_count);
|
||||
return sg;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmap_shadow - create/find a shadow guest address space
|
||||
* @parent: pointer to the parent gmap
|
||||
* @asce: ASCE for which the shadow table is created
|
||||
* @edat_level: edat level to be used for the shadow translation
|
||||
*
|
||||
* The pages of the top level page table referred by the asce parameter
|
||||
* will be set to read-only and marked in the PGSTEs of the kvm process.
|
||||
* The shadow table will be removed automatically on any change to the
|
||||
* PTE mapping for the source table.
|
||||
*
|
||||
* Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
|
||||
* ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
|
||||
* parent gmap table could not be protected.
|
||||
*/
|
||||
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level)
|
||||
{
|
||||
struct gmap *sg, *new;
|
||||
unsigned long limit;
|
||||
int rc;
|
||||
|
||||
if (KVM_BUG_ON(parent->mm->context.allow_gmap_hpage_1m, (struct kvm *)parent->private) ||
|
||||
KVM_BUG_ON(gmap_is_shadow(parent), (struct kvm *)parent->private))
|
||||
return ERR_PTR(-EFAULT);
|
||||
spin_lock(&parent->shadow_lock);
|
||||
sg = gmap_find_shadow(parent, asce, edat_level);
|
||||
spin_unlock(&parent->shadow_lock);
|
||||
if (sg)
|
||||
return sg;
|
||||
/* Create a new shadow gmap */
|
||||
limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
|
||||
if (asce & _ASCE_REAL_SPACE)
|
||||
limit = -1UL;
|
||||
new = gmap_alloc(limit);
|
||||
if (!new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
new->mm = parent->mm;
|
||||
new->parent = gmap_get(parent);
|
||||
new->private = parent->private;
|
||||
new->orig_asce = asce;
|
||||
new->edat_level = edat_level;
|
||||
new->initialized = false;
|
||||
spin_lock(&parent->shadow_lock);
|
||||
/* Recheck if another CPU created the same shadow */
|
||||
sg = gmap_find_shadow(parent, asce, edat_level);
|
||||
if (sg) {
|
||||
spin_unlock(&parent->shadow_lock);
|
||||
gmap_free(new);
|
||||
return sg;
|
||||
}
|
||||
if (asce & _ASCE_REAL_SPACE) {
|
||||
/* only allow one real-space gmap shadow */
|
||||
list_for_each_entry(sg, &parent->children, list) {
|
||||
if (sg->orig_asce & _ASCE_REAL_SPACE) {
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
gmap_unshadow(sg);
|
||||
spin_unlock(&sg->guest_table_lock);
|
||||
list_del(&sg->list);
|
||||
gmap_put(sg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
refcount_set(&new->ref_count, 2);
|
||||
list_add(&new->list, &parent->children);
|
||||
if (asce & _ASCE_REAL_SPACE) {
|
||||
/* nothing to protect, return right away */
|
||||
new->initialized = true;
|
||||
spin_unlock(&parent->shadow_lock);
|
||||
return new;
|
||||
}
|
||||
spin_unlock(&parent->shadow_lock);
|
||||
/* protect after insertion, so it will get properly invalidated */
|
||||
mmap_read_lock(parent->mm);
|
||||
rc = __kvm_s390_mprotect_many(parent, asce & _ASCE_ORIGIN,
|
||||
((asce & _ASCE_TABLE_LENGTH) + 1),
|
||||
PROT_READ, GMAP_NOTIFY_SHADOW);
|
||||
mmap_read_unlock(parent->mm);
|
||||
spin_lock(&parent->shadow_lock);
|
||||
new->initialized = true;
|
||||
if (rc) {
|
||||
list_del(&new->list);
|
||||
gmap_free(new);
|
||||
new = ERR_PTR(rc);
|
||||
}
|
||||
spin_unlock(&parent->shadow_lock);
|
||||
return new;
|
||||
}
|
212
arch/s390/kvm/gmap.c
Normal file
212
arch/s390/kvm/gmap.c
Normal file
|
@ -0,0 +1,212 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Guest memory management for KVM/s390
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2020, 2024
|
||||
*
|
||||
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* David Hildenbrand <david@redhat.com>
|
||||
* Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/uv.h>
|
||||
|
||||
#include "gmap.h"
|
||||
|
||||
/**
|
||||
* should_export_before_import - Determine whether an export is needed
|
||||
* before an import-like operation
|
||||
* @uvcb: the Ultravisor control block of the UVC to be performed
|
||||
* @mm: the mm of the process
|
||||
*
|
||||
* Returns whether an export is needed before every import-like operation.
|
||||
* This is needed for shared pages, which don't trigger a secure storage
|
||||
* exception when accessed from a different guest.
|
||||
*
|
||||
* Although considered as one, the Unpin Page UVC is not an actual import,
|
||||
* so it is not affected.
|
||||
*
|
||||
* No export is needed also when there is only one protected VM, because the
|
||||
* page cannot belong to the wrong VM in that case (there is no "other VM"
|
||||
* it can belong to).
|
||||
*
|
||||
* Return: true if an export is needed before every import, otherwise false.
|
||||
*/
|
||||
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The misc feature indicates, among other things, that importing a
|
||||
* shared page from a different protected VM will automatically also
|
||||
* transfer its ownership.
|
||||
*/
|
||||
if (uv_has_feature(BIT_UV_FEAT_MISC))
|
||||
return false;
|
||||
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
|
||||
return false;
|
||||
return atomic_read(&mm->context.protected_count) > 1;
|
||||
}
|
||||
|
||||
static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Secure pages cannot be huge and userspace should not combine both.
|
||||
* In case userspace does it anyway this will result in an -EFAULT for
|
||||
* the unpack. The guest is thus never reaching secure mode.
|
||||
* If userspace plays dirty tricks and decides to map huge pages at a
|
||||
* later point in time, it will receive a segmentation fault or
|
||||
* KVM_RUN will return -EFAULT.
|
||||
*/
|
||||
if (folio_test_hugetlb(folio))
|
||||
return -EFAULT;
|
||||
if (folio_test_large(folio)) {
|
||||
mmap_read_unlock(gmap->mm);
|
||||
rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
|
||||
mmap_read_lock(gmap->mm);
|
||||
if (rc)
|
||||
return rc;
|
||||
folio = page_folio(page);
|
||||
}
|
||||
|
||||
if (!folio_trylock(folio))
|
||||
return -EAGAIN;
|
||||
if (should_export_before_import(uvcb, gmap->mm))
|
||||
uv_convert_from_secure(folio_to_phys(folio));
|
||||
rc = make_folio_secure(folio, uvcb);
|
||||
folio_unlock(folio);
|
||||
|
||||
/*
|
||||
* In theory a race is possible and the folio might have become
|
||||
* large again before the folio_trylock() above. In that case, no
|
||||
* action is performed and -EAGAIN is returned; the callers will
|
||||
* have to try again later.
|
||||
* In most cases this implies running the VM again, getting the same
|
||||
* exception again, and make another attempt in this function.
|
||||
* This is expected to happen extremely rarely.
|
||||
*/
|
||||
if (rc == -E2BIG)
|
||||
return -EAGAIN;
|
||||
/* The folio has too many references, try to shake some off */
|
||||
if (rc == -EBUSY) {
|
||||
mmap_read_unlock(gmap->mm);
|
||||
kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
|
||||
mmap_read_lock(gmap->mm);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmap_make_secure() - make one guest page secure
|
||||
* @gmap: the guest gmap
|
||||
* @gaddr: the guest address that needs to be made secure
|
||||
* @uvcb: the UVCB specifying which operation needs to be performed
|
||||
*
|
||||
* Context: needs to be called with kvm->srcu held.
|
||||
* Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
|
||||
*/
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||
{
|
||||
struct kvm *kvm = gmap->private;
|
||||
struct page *page;
|
||||
int rc = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->srcu);
|
||||
|
||||
page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
|
||||
mmap_read_lock(gmap->mm);
|
||||
if (page)
|
||||
rc = __gmap_make_secure(gmap, page, uvcb);
|
||||
kvm_release_page_clean(page);
|
||||
mmap_read_unlock(gmap->mm);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
|
||||
{
|
||||
struct uv_cb_cts uvcb = {
|
||||
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
|
||||
.header.len = sizeof(uvcb),
|
||||
.guest_handle = gmap->guest_handle,
|
||||
.gaddr = gaddr,
|
||||
};
|
||||
|
||||
return gmap_make_secure(gmap, gaddr, &uvcb);
|
||||
}
|
||||
|
||||
/**
|
||||
* __gmap_destroy_page() - Destroy a guest page.
|
||||
* @gmap: the gmap of the guest
|
||||
* @page: the page to destroy
|
||||
*
|
||||
* An attempt will be made to destroy the given guest page. If the attempt
|
||||
* fails, an attempt is made to export the page. If both attempts fail, an
|
||||
* appropriate error is returned.
|
||||
*
|
||||
* Context: must be called holding the mm lock for gmap->mm
|
||||
*/
|
||||
static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* See gmap_make_secure(): large folios cannot be secure. Small
|
||||
* folio implies FW_LEVEL_PTE.
|
||||
*/
|
||||
if (folio_test_large(folio))
|
||||
return -EFAULT;
|
||||
|
||||
rc = uv_destroy_folio(folio);
|
||||
/*
|
||||
* Fault handlers can race; it is possible that two CPUs will fault
|
||||
* on the same secure page. One CPU can destroy the page, reboot,
|
||||
* re-enter secure mode and import it, while the second CPU was
|
||||
* stuck at the beginning of the handler. At some point the second
|
||||
* CPU will be able to progress, and it will not be able to destroy
|
||||
* the page. In that case we do not want to terminate the process,
|
||||
* we instead try to export the page.
|
||||
*/
|
||||
if (rc)
|
||||
rc = uv_convert_from_secure_folio(folio);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmap_destroy_page() - Destroy a guest page.
|
||||
* @gmap: the gmap of the guest
|
||||
* @gaddr: the guest address to destroy
|
||||
*
|
||||
* An attempt will be made to destroy the given guest page. If the attempt
|
||||
* fails, an attempt is made to export the page. If both attempts fail, an
|
||||
* appropriate error is returned.
|
||||
*
|
||||
* Context: may sleep.
|
||||
*/
|
||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
|
||||
{
|
||||
struct page *page;
|
||||
int rc = 0;
|
||||
|
||||
mmap_read_lock(gmap->mm);
|
||||
page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
|
||||
if (page)
|
||||
rc = __gmap_destroy_page(gmap, page);
|
||||
kvm_release_page_clean(page);
|
||||
mmap_read_unlock(gmap->mm);
|
||||
return rc;
|
||||
}
|
39
arch/s390/kvm/gmap.h
Normal file
39
arch/s390/kvm/gmap.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* KVM guest address space mapping code
|
||||
*
|
||||
* Copyright IBM Corp. 2007, 2016, 2025
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef ARCH_KVM_S390_GMAP_H
|
||||
#define ARCH_KVM_S390_GMAP_H
|
||||
|
||||
#define GMAP_SHADOW_FAKE_TABLE 1ULL
|
||||
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
|
||||
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level);
|
||||
|
||||
/**
|
||||
* gmap_shadow_valid - check if a shadow guest address space matches the
|
||||
* given properties and is still valid
|
||||
* @sg: pointer to the shadow guest address space structure
|
||||
* @asce: ASCE for which the shadow table is requested
|
||||
* @edat_level: edat level to be used for the shadow translation
|
||||
*
|
||||
* Returns 1 if the gmap shadow is still valid and matches the given
|
||||
* properties, the caller can continue using it. Returns 0 otherwise, the
|
||||
* caller has to request a new shadow gmap in this case.
|
||||
*
|
||||
*/
|
||||
static inline int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
|
||||
{
|
||||
if (sg->removed)
|
||||
return 0;
|
||||
return sg->orig_asce == asce && sg->edat_level == edat_level;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -21,6 +21,7 @@
|
|||
#include "gaccess.h"
|
||||
#include "trace.h"
|
||||
#include "trace-s390.h"
|
||||
#include "gmap.h"
|
||||
|
||||
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -367,7 +368,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
|||
reg2, &srcaddr, GACC_FETCH, 0);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
rc = gmap_fault(vcpu->arch.gmap, srcaddr, 0);
|
||||
rc = kvm_s390_handle_dat_fault(vcpu, srcaddr, 0);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
|
@ -376,7 +377,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
|||
reg1, &dstaddr, GACC_STORE, 0);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
rc = gmap_fault(vcpu->arch.gmap, dstaddr, FAULT_FLAG_WRITE);
|
||||
rc = kvm_s390_handle_dat_fault(vcpu, dstaddr, FOLL_WRITE);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
|
@ -549,7 +550,7 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
|
|||
* If the unpin did not succeed, the guest will exit again for the UVC
|
||||
* and we will retry the unpin.
|
||||
*/
|
||||
if (rc == -EINVAL)
|
||||
if (rc == -EINVAL || rc == -ENXIO)
|
||||
return 0;
|
||||
/*
|
||||
* If we got -EAGAIN here, we simply return it. It will eventually
|
||||
|
|
|
@ -2893,7 +2893,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
|||
struct kvm_kernel_irq_routing_entry *e,
|
||||
const struct kvm_irq_routing_entry *ue)
|
||||
{
|
||||
u64 uaddr;
|
||||
u64 uaddr_s, uaddr_i;
|
||||
int idx;
|
||||
|
||||
switch (ue->type) {
|
||||
/* we store the userspace addresses instead of the guest addresses */
|
||||
|
@ -2901,14 +2902,16 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
|||
if (kvm_is_ucontrol(kvm))
|
||||
return -EINVAL;
|
||||
e->set = set_adapter_int;
|
||||
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
|
||||
if (uaddr == -EFAULT)
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
|
||||
uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
if (kvm_is_error_hva(uaddr_s) || kvm_is_error_hva(uaddr_i))
|
||||
return -EFAULT;
|
||||
e->adapter.summary_addr = uaddr;
|
||||
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
|
||||
if (uaddr == -EFAULT)
|
||||
return -EFAULT;
|
||||
e->adapter.ind_addr = uaddr;
|
||||
e->adapter.summary_addr = uaddr_s;
|
||||
e->adapter.ind_addr = uaddr_i;
|
||||
e->adapter.summary_offset = ue->u.adapter.summary_offset;
|
||||
e->adapter.ind_offset = ue->u.adapter.ind_offset;
|
||||
e->adapter.adapter_id = ue->u.adapter.adapter_id;
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include "pci.h"
|
||||
#include "gmap.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
@ -3428,8 +3429,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
VM_EVENT(kvm, 3, "vm created with type %lu", type);
|
||||
|
||||
if (type & KVM_VM_S390_UCONTROL) {
|
||||
struct kvm_userspace_memory_region2 fake_memslot = {
|
||||
.slot = KVM_S390_UCONTROL_MEMSLOT,
|
||||
.guest_phys_addr = 0,
|
||||
.userspace_addr = 0,
|
||||
.memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
kvm->arch.gmap = NULL;
|
||||
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
|
||||
/* one flat fake memslot covering the whole address-space */
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
} else {
|
||||
if (sclp.hamax == U64_MAX)
|
||||
kvm->arch.mem_limit = TASK_SIZE_MAX;
|
||||
|
@ -4498,6 +4511,75 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
|||
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
|
||||
}
|
||||
|
||||
static int __kvm_s390_fixup_fault_sync(struct gmap *gmap, gpa_t gaddr, unsigned int flags)
|
||||
{
|
||||
struct kvm *kvm = gmap->private;
|
||||
gfn_t gfn = gpa_to_gfn(gaddr);
|
||||
bool unlocked;
|
||||
hva_t vmaddr;
|
||||
gpa_t tmp;
|
||||
int rc;
|
||||
|
||||
if (kvm_is_ucontrol(kvm)) {
|
||||
tmp = __gmap_translate(gmap, gaddr);
|
||||
gfn = gpa_to_gfn(tmp);
|
||||
}
|
||||
|
||||
vmaddr = gfn_to_hva(kvm, gfn);
|
||||
rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
|
||||
if (!rc)
|
||||
rc = __gmap_link(gmap, gaddr, vmaddr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* __kvm_s390_mprotect_many() - Apply specified protection to guest pages
|
||||
* @gmap: the gmap of the guest
|
||||
* @gpa: the starting guest address
|
||||
* @npages: how many pages to protect
|
||||
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
|
||||
* @bits: pgste notification bits to set
|
||||
*
|
||||
* Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
|
||||
*
|
||||
* Context: kvm->srcu and gmap->mm need to be held in read mode
|
||||
*/
|
||||
int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
|
||||
unsigned long bits)
|
||||
{
|
||||
unsigned int fault_flag = (prot & PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
|
||||
gpa_t end = gpa + npages * PAGE_SIZE;
|
||||
int rc;
|
||||
|
||||
for (; gpa < end; gpa = ALIGN(gpa + 1, rc)) {
|
||||
rc = gmap_protect_one(gmap, gpa, prot, bits);
|
||||
if (rc == -EAGAIN) {
|
||||
__kvm_s390_fixup_fault_sync(gmap, gpa, fault_flag);
|
||||
rc = gmap_protect_one(gmap, gpa, prot, bits);
|
||||
}
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_s390_mprotect_notify_prefix(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gpa_t gaddr = kvm_s390_get_prefix(vcpu);
|
||||
int idx, rc;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||
|
||||
rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT);
|
||||
|
||||
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
retry:
|
||||
|
@ -4513,9 +4595,8 @@ retry:
|
|||
*/
|
||||
if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
|
||||
int rc;
|
||||
rc = gmap_mprotect_notify(vcpu->arch.gmap,
|
||||
kvm_s390_get_prefix(vcpu),
|
||||
PAGE_SIZE * 2, PROT_WRITE);
|
||||
|
||||
rc = kvm_s390_mprotect_notify_prefix(vcpu);
|
||||
if (rc) {
|
||||
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
|
||||
return rc;
|
||||
|
@ -4766,11 +4847,111 @@ static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
|
|||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||
}
|
||||
|
||||
static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
}
|
||||
|
||||
/*
|
||||
* __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
|
||||
* @vcpu: the vCPU whose gmap is to be fixed up
|
||||
* @gfn: the guest frame number used for memslots (including fake memslots)
|
||||
* @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps
|
||||
* @flags: FOLL_* flags
|
||||
*
|
||||
* Return: 0 on success, < 0 in case of error.
|
||||
* Context: The mm lock must not be held before calling. May sleep.
|
||||
*/
|
||||
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned int fault_flags;
|
||||
bool writable, unlocked;
|
||||
unsigned long vmaddr;
|
||||
struct page *page;
|
||||
kvm_pfn_t pfn;
|
||||
int rc;
|
||||
|
||||
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
||||
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
||||
return vcpu_post_run_addressing_exception(vcpu);
|
||||
|
||||
fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
|
||||
if (vcpu->arch.gmap->pfault_enabled)
|
||||
flags |= FOLL_NOWAIT;
|
||||
vmaddr = __gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
try_again:
|
||||
pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page);
|
||||
|
||||
/* Access outside memory, inject addressing exception */
|
||||
if (is_noslot_pfn(pfn))
|
||||
return vcpu_post_run_addressing_exception(vcpu);
|
||||
/* Signal pending: try again */
|
||||
if (pfn == KVM_PFN_ERR_SIGPENDING)
|
||||
return -EAGAIN;
|
||||
|
||||
/* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT) */
|
||||
if (pfn == KVM_PFN_ERR_NEEDS_IO) {
|
||||
trace_kvm_s390_major_guest_pfault(vcpu);
|
||||
if (kvm_arch_setup_async_pf(vcpu))
|
||||
return 0;
|
||||
vcpu->stat.pfault_sync++;
|
||||
/* Could not setup async pfault, try again synchronously */
|
||||
flags &= ~FOLL_NOWAIT;
|
||||
goto try_again;
|
||||
}
|
||||
/* Any other error */
|
||||
if (is_error_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
/* Success */
|
||||
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||
/* Mark the userspace PTEs as young and/or dirty, to avoid page fault loops */
|
||||
rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked);
|
||||
if (!rc)
|
||||
rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr);
|
||||
scoped_guard(spinlock, &vcpu->kvm->mmu_lock) {
|
||||
kvm_release_faultin_page(vcpu->kvm, page, false, writable);
|
||||
}
|
||||
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags)
|
||||
{
|
||||
unsigned long gaddr_tmp;
|
||||
gfn_t gfn;
|
||||
|
||||
gfn = gpa_to_gfn(gaddr);
|
||||
if (kvm_is_ucontrol(vcpu->kvm)) {
|
||||
/*
|
||||
* This translates the per-vCPU guest address into a
|
||||
* fake guest address, which can then be used with the
|
||||
* fake memslots that are identity mapping userspace.
|
||||
* This allows ucontrol VMs to use the normal fault
|
||||
* resolution path, like normal VMs.
|
||||
*/
|
||||
mmap_read_lock(vcpu->arch.gmap->mm);
|
||||
gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr);
|
||||
mmap_read_unlock(vcpu->arch.gmap->mm);
|
||||
if (gaddr_tmp == -EFAULT) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
|
||||
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
|
||||
vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
|
||||
return -EREMOTE;
|
||||
}
|
||||
gfn = gpa_to_gfn(gaddr_tmp);
|
||||
}
|
||||
return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags);
|
||||
}
|
||||
|
||||
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
unsigned long gaddr;
|
||||
int rc = 0;
|
||||
|
||||
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
|
||||
if (kvm_s390_cur_gmap_fault_is_write())
|
||||
|
@ -4781,9 +4962,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
|||
vcpu->stat.exit_null++;
|
||||
break;
|
||||
case PGM_NON_SECURE_STORAGE_ACCESS:
|
||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
kvm_s390_assert_primary_as(vcpu);
|
||||
/*
|
||||
* This is normal operation; a page belonging to a protected
|
||||
* guest has not been imported yet. Try to import the page into
|
||||
|
@ -4794,9 +4973,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
|||
break;
|
||||
case PGM_SECURE_STORAGE_ACCESS:
|
||||
case PGM_SECURE_STORAGE_VIOLATION:
|
||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
kvm_s390_assert_primary_as(vcpu);
|
||||
/*
|
||||
* This can happen after a reboot with asynchronous teardown;
|
||||
* the new guest (normal or protected) will run on top of the
|
||||
|
@ -4825,40 +5002,15 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
|||
case PGM_REGION_FIRST_TRANS:
|
||||
case PGM_REGION_SECOND_TRANS:
|
||||
case PGM_REGION_THIRD_TRANS:
|
||||
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
|
||||
"Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
if (vcpu->arch.gmap->pfault_enabled) {
|
||||
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
|
||||
if (rc == -EFAULT)
|
||||
return vcpu_post_run_addressing_exception(vcpu);
|
||||
if (rc == -EAGAIN) {
|
||||
trace_kvm_s390_major_guest_pfault(vcpu);
|
||||
if (kvm_arch_setup_async_pf(vcpu))
|
||||
return 0;
|
||||
vcpu->stat.pfault_sync++;
|
||||
} else {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
|
||||
if (rc == -EFAULT) {
|
||||
if (kvm_is_ucontrol(vcpu->kvm)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
|
||||
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
|
||||
vcpu->run->s390_ucontrol.pgm_code = 0x10;
|
||||
return -EREMOTE;
|
||||
}
|
||||
return vcpu_post_run_addressing_exception(vcpu);
|
||||
}
|
||||
break;
|
||||
kvm_s390_assert_primary_as(vcpu);
|
||||
return vcpu_dat_fault_handler(vcpu, gaddr, flags);
|
||||
default:
|
||||
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
|
@ -5737,7 +5889,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
}
|
||||
#endif
|
||||
case KVM_S390_VCPU_FAULT: {
|
||||
r = gmap_fault(vcpu->arch.gmap, arg, 0);
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = vcpu_dat_fault_handler(vcpu, arg, 0);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
break;
|
||||
}
|
||||
case KVM_ENABLE_CAP:
|
||||
|
@ -5853,7 +6007,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
{
|
||||
gpa_t size;
|
||||
|
||||
if (kvm_is_ucontrol(kvm))
|
||||
if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
|
||||
return -EINVAL;
|
||||
|
||||
/* When we are protected, we should not change the memory slots */
|
||||
|
@ -5905,6 +6059,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
{
|
||||
int rc = 0;
|
||||
|
||||
if (kvm_is_ucontrol(kvm))
|
||||
return;
|
||||
|
||||
switch (change) {
|
||||
case KVM_MR_DELETE:
|
||||
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
#define KVM_S390_UCONTROL_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
|
||||
|
||||
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
||||
{
|
||||
fpu_stfpc(&run->s.regs.fpc);
|
||||
|
@ -279,6 +281,15 @@ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
|
|||
return gd;
|
||||
}
|
||||
|
||||
static inline hva_t gpa_to_hva(struct kvm *kvm, gpa_t gpa)
|
||||
{
|
||||
hva_t hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
|
||||
|
||||
if (!kvm_is_error_hva(hva))
|
||||
hva |= offset_in_page(gpa);
|
||||
return hva;
|
||||
}
|
||||
|
||||
/* implemented in pv.c */
|
||||
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||
|
@ -408,6 +419,14 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
|
|||
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
|
||||
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags);
|
||||
int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
|
||||
unsigned long bits);
|
||||
|
||||
static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags)
|
||||
{
|
||||
return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags);
|
||||
}
|
||||
|
||||
/* implemented in diag.c */
|
||||
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/sched/mm.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gmap.h"
|
||||
|
||||
bool kvm_s390_pv_is_protected(struct kvm *kvm)
|
||||
{
|
||||
|
@ -638,10 +639,28 @@ static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
|
|||
.tweak[1] = offset,
|
||||
};
|
||||
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
|
||||
unsigned long vmaddr;
|
||||
bool unlocked;
|
||||
|
||||
*rc = uvcb.header.rc;
|
||||
*rrc = uvcb.header.rrc;
|
||||
|
||||
if (ret == -ENXIO) {
|
||||
mmap_read_lock(kvm->mm);
|
||||
vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr));
|
||||
if (kvm_is_error_hva(vmaddr)) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
|
||||
if (!ret)
|
||||
ret = __gmap_link(kvm->arch.gmap, addr, vmaddr);
|
||||
}
|
||||
mmap_read_unlock(kvm->mm);
|
||||
if (!ret)
|
||||
return -EAGAIN;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret && ret != -EAGAIN)
|
||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
|
||||
uvcb.gaddr, *rc, *rrc);
|
||||
|
@ -660,6 +679,8 @@ int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
|
|||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
|
||||
addr, size);
|
||||
|
||||
guard(srcu)(&kvm->srcu);
|
||||
|
||||
while (offset < size) {
|
||||
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
|
||||
if (ret == -EAGAIN) {
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mman.h>
|
||||
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -22,6 +23,11 @@
|
|||
#include <asm/facility.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include "gmap.h"
|
||||
|
||||
enum vsie_page_flags {
|
||||
VSIE_PAGE_IN_USE = 0,
|
||||
};
|
||||
|
||||
struct vsie_page {
|
||||
struct kvm_s390_sie_block scb_s; /* 0x0000 */
|
||||
|
@ -46,7 +52,18 @@ struct vsie_page {
|
|||
gpa_t gvrd_gpa; /* 0x0240 */
|
||||
gpa_t riccbd_gpa; /* 0x0248 */
|
||||
gpa_t sdnx_gpa; /* 0x0250 */
|
||||
__u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
|
||||
/*
|
||||
* guest address of the original SCB. Remains set for free vsie
|
||||
* pages, so we can properly look them up in our addr_to_page
|
||||
* radix tree.
|
||||
*/
|
||||
gpa_t scb_gpa; /* 0x0258 */
|
||||
/*
|
||||
* Flags: must be set/cleared atomically after the vsie page can be
|
||||
* looked up by other CPUs.
|
||||
*/
|
||||
unsigned long flags; /* 0x0260 */
|
||||
__u8 reserved[0x0700 - 0x0268]; /* 0x0268 */
|
||||
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
|
||||
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
|
||||
};
|
||||
|
@ -584,7 +601,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
|||
struct kvm *kvm = gmap->private;
|
||||
struct vsie_page *cur;
|
||||
unsigned long prefix;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
if (!gmap_is_shadow(gmap))
|
||||
|
@ -594,10 +610,9 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
|||
* therefore we can safely reference them all the time.
|
||||
*/
|
||||
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
||||
page = READ_ONCE(kvm->arch.vsie.pages[i]);
|
||||
if (!page)
|
||||
cur = READ_ONCE(kvm->arch.vsie.pages[i]);
|
||||
if (!cur)
|
||||
continue;
|
||||
cur = page_to_virt(page);
|
||||
if (READ_ONCE(cur->gmap) != gmap)
|
||||
continue;
|
||||
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
|
||||
|
@ -1345,6 +1360,20 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* Try getting a given vsie page, returning "true" on success. */
|
||||
static inline bool try_get_vsie_page(struct vsie_page *vsie_page)
|
||||
{
|
||||
if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags))
|
||||
return false;
|
||||
return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||
}
|
||||
|
||||
/* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */
|
||||
static void put_vsie_page(struct vsie_page *vsie_page)
|
||||
{
|
||||
clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get or create a vsie page for a scb address.
|
||||
*
|
||||
|
@ -1355,16 +1384,21 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|||
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||
{
|
||||
struct vsie_page *vsie_page;
|
||||
struct page *page;
|
||||
int nr_vcpus;
|
||||
|
||||
rcu_read_lock();
|
||||
page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
|
||||
vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
|
||||
rcu_read_unlock();
|
||||
if (page) {
|
||||
if (page_ref_inc_return(page) == 2)
|
||||
return page_to_virt(page);
|
||||
page_ref_dec(page);
|
||||
if (vsie_page) {
|
||||
if (try_get_vsie_page(vsie_page)) {
|
||||
if (vsie_page->scb_gpa == addr)
|
||||
return vsie_page;
|
||||
/*
|
||||
* We raced with someone reusing + putting this vsie
|
||||
* page before we grabbed it.
|
||||
*/
|
||||
put_vsie_page(vsie_page);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1375,36 +1409,40 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
|||
|
||||
mutex_lock(&kvm->arch.vsie.mutex);
|
||||
if (kvm->arch.vsie.page_count < nr_vcpus) {
|
||||
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
||||
if (!page) {
|
||||
vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
||||
if (!vsie_page) {
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
page_ref_inc(page);
|
||||
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
|
||||
__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
|
||||
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
|
||||
kvm->arch.vsie.page_count++;
|
||||
} else {
|
||||
/* reuse an existing entry that belongs to nobody */
|
||||
while (true) {
|
||||
page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
|
||||
if (page_ref_inc_return(page) == 2)
|
||||
vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
|
||||
if (try_get_vsie_page(vsie_page))
|
||||
break;
|
||||
page_ref_dec(page);
|
||||
kvm->arch.vsie.next++;
|
||||
kvm->arch.vsie.next %= nr_vcpus;
|
||||
}
|
||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
|
||||
if (vsie_page->scb_gpa != ULONG_MAX)
|
||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page,
|
||||
vsie_page->scb_gpa >> 9);
|
||||
}
|
||||
page->index = addr;
|
||||
/* double use of the same address */
|
||||
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
|
||||
page_ref_dec(page);
|
||||
/* Mark it as invalid until it resides in the tree. */
|
||||
vsie_page->scb_gpa = ULONG_MAX;
|
||||
|
||||
/* Double use of the same address or allocation failure. */
|
||||
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9,
|
||||
vsie_page)) {
|
||||
put_vsie_page(vsie_page);
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
return NULL;
|
||||
}
|
||||
vsie_page->scb_gpa = addr;
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
|
||||
vsie_page = page_to_virt(page);
|
||||
memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
|
||||
release_gmap_shadow(vsie_page);
|
||||
vsie_page->fault_addr = 0;
|
||||
|
@ -1412,14 +1450,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
|||
return vsie_page;
|
||||
}
|
||||
|
||||
/* put a vsie page acquired via get_vsie_page */
|
||||
static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
|
||||
{
|
||||
struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
|
||||
|
||||
page_ref_dec(page);
|
||||
}
|
||||
|
||||
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vsie_page *vsie_page;
|
||||
|
@ -1470,7 +1500,7 @@ out_unshadow:
|
|||
out_unpin_scb:
|
||||
unpin_scb(vcpu, vsie_page, scb_addr);
|
||||
out_put:
|
||||
put_vsie_page(vcpu->kvm, vsie_page);
|
||||
put_vsie_page(vsie_page);
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
@ -1486,18 +1516,18 @@ void kvm_s390_vsie_init(struct kvm *kvm)
|
|||
void kvm_s390_vsie_destroy(struct kvm *kvm)
|
||||
{
|
||||
struct vsie_page *vsie_page;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->arch.vsie.mutex);
|
||||
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
|
||||
page = kvm->arch.vsie.pages[i];
|
||||
vsie_page = kvm->arch.vsie.pages[i];
|
||||
kvm->arch.vsie.pages[i] = NULL;
|
||||
vsie_page = page_to_virt(page);
|
||||
release_gmap_shadow(vsie_page);
|
||||
/* free the radix tree entry */
|
||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
|
||||
__free_page(page);
|
||||
if (vsie_page->scb_gpa != ULONG_MAX)
|
||||
radix_tree_delete(&kvm->arch.vsie.addr_to_page,
|
||||
vsie_page->scb_gpa >> 9);
|
||||
free_page((unsigned long)vsie_page);
|
||||
}
|
||||
kvm->arch.vsie.page_count = 0;
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -176,8 +176,6 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|||
}
|
||||
table = ptdesc_to_virt(ptdesc);
|
||||
__arch_set_page_dat(table, 1);
|
||||
/* pt_list is used by gmap only */
|
||||
INIT_LIST_HEAD(&ptdesc->pt_list);
|
||||
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
return table;
|
||||
|
|
|
@ -25,6 +25,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
|||
# avoid errors with '-march=i386', and future flags may depend on the target to
|
||||
# be valid.
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
|
||||
KBUILD_CFLAGS += -std=gnu11
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
|
||||
KBUILD_CFLAGS += -Wundef
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
|
|
|
@ -1180,7 +1180,7 @@ void kvm_set_cpu_caps(void)
|
|||
SYNTHESIZED_F(SBPB),
|
||||
SYNTHESIZED_F(IBPB_BRTYPE),
|
||||
SYNTHESIZED_F(SRSO_NO),
|
||||
SYNTHESIZED_F(SRSO_USER_KERNEL_NO),
|
||||
F(SRSO_USER_KERNEL_NO),
|
||||
);
|
||||
|
||||
kvm_cpu_cap_init(CPUID_8000_0022_EAX,
|
||||
|
|
|
@ -7120,6 +7120,19 @@ static void mmu_destroy_caches(void)
|
|||
kmem_cache_destroy(mmu_page_header_cache);
|
||||
}
|
||||
|
||||
static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* The NX recovery thread is spawned on-demand at the first KVM_RUN and
|
||||
* may not be valid even though the VM is globally visible. Do nothing,
|
||||
* as such a VM can't have any possible NX huge pages.
|
||||
*/
|
||||
struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
|
||||
|
||||
if (nx_thread)
|
||||
vhost_task_wake(nx_thread);
|
||||
}
|
||||
|
||||
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
if (nx_hugepage_mitigation_hard_disabled)
|
||||
|
@ -7180,7 +7193,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
|||
kvm_mmu_zap_all_fast(kvm);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
||||
kvm_wake_nx_recovery_thread(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
|
@ -7315,7 +7328,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
|
|||
mutex_lock(&kvm_lock);
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
|
||||
kvm_wake_nx_recovery_thread(kvm);
|
||||
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
|
@ -7451,14 +7464,20 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
|
|||
{
|
||||
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
|
||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||
struct vhost_task *nx_thread;
|
||||
|
||||
kvm->arch.nx_huge_page_last = get_jiffies_64();
|
||||
kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
|
||||
kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
|
||||
kvm_nx_huge_page_recovery_worker_kill,
|
||||
kvm, "kvm-nx-lpage-recovery");
|
||||
|
||||
if (kvm->arch.nx_huge_page_recovery_thread)
|
||||
vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
|
||||
if (!nx_thread)
|
||||
return;
|
||||
|
||||
vhost_task_start(nx_thread);
|
||||
|
||||
/* Make the task visible only once it is fully started. */
|
||||
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
|
||||
}
|
||||
|
||||
int kvm_mmu_post_init_vm(struct kvm *kvm)
|
||||
|
|
|
@ -12741,6 +12741,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
"does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
|
||||
}
|
||||
|
||||
once_init(&kvm->arch.nx_once);
|
||||
return 0;
|
||||
|
||||
out_uninit_mmu:
|
||||
|
@ -12750,12 +12751,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arch_post_init_vm(struct kvm *kvm)
|
||||
{
|
||||
once_init(&kvm->arch.nx_once);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_load(vcpu);
|
||||
|
|
|
@ -100,9 +100,6 @@ SYM_FUNC_START(xen_hypercall_hvm)
|
|||
push %r10
|
||||
push %r9
|
||||
push %r8
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
pushq $0 /* Dummy push for stack alignment. */
|
||||
#endif
|
||||
#endif
|
||||
/* Set the vendor specific function. */
|
||||
call __xen_hypercall_setfunc
|
||||
|
@ -117,11 +114,8 @@ SYM_FUNC_START(xen_hypercall_hvm)
|
|||
pop %ebx
|
||||
pop %eax
|
||||
#else
|
||||
lea xen_hypercall_amd(%rip), %rbx
|
||||
cmp %rax, %rbx
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
pop %rax /* Dummy pop. */
|
||||
#endif
|
||||
lea xen_hypercall_amd(%rip), %rcx
|
||||
cmp %rax, %rcx
|
||||
pop %r8
|
||||
pop %r9
|
||||
pop %r10
|
||||
|
@ -132,6 +126,7 @@ SYM_FUNC_START(xen_hypercall_hvm)
|
|||
pop %rcx
|
||||
pop %rax
|
||||
#endif
|
||||
FRAME_END
|
||||
/* Use correct hypercall function. */
|
||||
jz xen_hypercall_amd
|
||||
jmp xen_hypercall_intel
|
||||
|
|
|
@ -21,6 +21,11 @@
|
|||
|
||||
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
|
||||
|
||||
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
|
||||
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
|
||||
MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
|
||||
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
|
||||
|
||||
/*
|
||||
* Bind the driver base on (vendor_id, device_id) pair and later use the
|
||||
* (device_id, rev_id) pair as a key to select the devices. The devices with
|
||||
|
|
|
@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
|
|||
if (ivpu_fw_is_cold_boot(vdev)) {
|
||||
ret = ivpu_pm_dct_init(vdev);
|
||||
if (ret)
|
||||
goto err_diagnose_failure;
|
||||
goto err_disable_ipc;
|
||||
|
||||
ret = ivpu_hw_sched_init(vdev);
|
||||
if (ret)
|
||||
goto err_diagnose_failure;
|
||||
goto err_disable_ipc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_ipc:
|
||||
ivpu_ipc_disable(vdev);
|
||||
ivpu_hw_irq_disable(vdev);
|
||||
disable_irq(vdev->irq);
|
||||
err_diagnose_failure:
|
||||
ivpu_hw_diagnose_failure(vdev);
|
||||
ivpu_mmu_evtq_dump(vdev);
|
||||
|
|
|
@ -115,41 +115,57 @@ err_power_down:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ivpu_pm_recovery_work(struct work_struct *work)
|
||||
static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
|
||||
struct ivpu_device *vdev = pm->vdev;
|
||||
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
|
||||
int ret;
|
||||
|
||||
ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
|
||||
|
||||
ret = pm_runtime_resume_and_get(vdev->drm.dev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
|
||||
|
||||
ivpu_jsm_state_dump(vdev);
|
||||
ivpu_dev_coredump(vdev);
|
||||
pm_runtime_disable(vdev->drm.dev);
|
||||
|
||||
atomic_inc(&vdev->pm->reset_counter);
|
||||
atomic_set(&vdev->pm->reset_pending, 1);
|
||||
down_write(&vdev->pm->reset_lock);
|
||||
}
|
||||
|
||||
static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ivpu_suspend(vdev);
|
||||
ivpu_pm_prepare_cold_boot(vdev);
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_ms_cleanup_all(vdev);
|
||||
|
||||
ret = ivpu_resume(vdev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
|
||||
pm_runtime_set_suspended(vdev->drm.dev);
|
||||
} else {
|
||||
pm_runtime_set_active(vdev->drm.dev);
|
||||
}
|
||||
|
||||
up_write(&vdev->pm->reset_lock);
|
||||
atomic_set(&vdev->pm->reset_pending, 0);
|
||||
|
||||
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
|
||||
pm_runtime_mark_last_busy(vdev->drm.dev);
|
||||
pm_runtime_put_autosuspend(vdev->drm.dev);
|
||||
pm_runtime_enable(vdev->drm.dev);
|
||||
}
|
||||
|
||||
static void ivpu_pm_recovery_work(struct work_struct *work)
|
||||
{
|
||||
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
|
||||
struct ivpu_device *vdev = pm->vdev;
|
||||
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
|
||||
|
||||
ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
|
||||
|
||||
ivpu_pm_reset_begin(vdev);
|
||||
|
||||
if (!pm_runtime_status_suspended(vdev->drm.dev)) {
|
||||
ivpu_jsm_state_dump(vdev);
|
||||
ivpu_dev_coredump(vdev);
|
||||
ivpu_suspend(vdev);
|
||||
}
|
||||
|
||||
ivpu_pm_reset_complete(vdev);
|
||||
|
||||
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
|
||||
}
|
||||
|
||||
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
|
||||
|
@ -309,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
|
|||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(vdev->drm.dev);
|
||||
drm_WARN_ON(&vdev->drm, ret < 0);
|
||||
if (ret < 0) {
|
||||
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
|
||||
pm_runtime_set_suspended(vdev->drm.dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -325,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
|
|||
struct ivpu_device *vdev = pci_get_drvdata(pdev);
|
||||
|
||||
ivpu_dbg(vdev, PM, "Pre-reset..\n");
|
||||
atomic_inc(&vdev->pm->reset_counter);
|
||||
atomic_set(&vdev->pm->reset_pending, 1);
|
||||
|
||||
pm_runtime_get_sync(vdev->drm.dev);
|
||||
down_write(&vdev->pm->reset_lock);
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
ivpu_hw_reset(vdev);
|
||||
ivpu_pm_prepare_cold_boot(vdev);
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_ms_cleanup_all(vdev);
|
||||
ivpu_pm_reset_begin(vdev);
|
||||
|
||||
if (!pm_runtime_status_suspended(vdev->drm.dev)) {
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
ivpu_hw_reset(vdev);
|
||||
}
|
||||
|
||||
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
|
||||
}
|
||||
|
@ -342,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
|
|||
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
|
||||
{
|
||||
struct ivpu_device *vdev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
ivpu_dbg(vdev, PM, "Post-reset..\n");
|
||||
ret = ivpu_resume(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
|
||||
up_write(&vdev->pm->reset_lock);
|
||||
atomic_set(&vdev->pm->reset_pending, 0);
|
||||
ivpu_dbg(vdev, PM, "Post-reset done.\n");
|
||||
|
||||
pm_runtime_mark_last_busy(vdev->drm.dev);
|
||||
pm_runtime_put_autosuspend(vdev->drm.dev);
|
||||
ivpu_pm_reset_complete(vdev);
|
||||
|
||||
ivpu_dbg(vdev, PM, "Post-reset done.\n");
|
||||
}
|
||||
|
||||
void ivpu_pm_init(struct ivpu_device *vdev)
|
||||
|
|
|
@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
|
|||
if (!handler || !module)
|
||||
goto invalid_guid;
|
||||
|
||||
if (!handler->handler_addr ||
|
||||
!handler->static_data_buffer_addr ||
|
||||
!handler->acpi_param_buffer_addr) {
|
||||
if (!handler->handler_addr) {
|
||||
buffer->prm_status = PRM_HANDLER_ERROR;
|
||||
return AE_OK;
|
||||
}
|
||||
|
|
|
@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
|
|||
}
|
||||
break;
|
||||
}
|
||||
if (nval == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->type == ACPI_TYPE_BUFFER) {
|
||||
if (proptype != DEV_PROP_U8)
|
||||
|
@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
|
|||
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
|
||||
break;
|
||||
case DEV_PROP_STRING:
|
||||
ret = acpi_copy_property_array_string(
|
||||
items, (char **)val,
|
||||
min_t(u32, nval, obj->package.count));
|
||||
nval = min_t(u32, nval, obj->package.count);
|
||||
if (nval == 0)
|
||||
return -ENODATA;
|
||||
|
||||
ret = acpi_copy_property_array_string(items, (char **)val, nval);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -563,6 +563,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
|
|
|
@ -1191,24 +1191,18 @@ static pm_message_t resume_event(pm_message_t sleep_state)
|
|||
return PMSG_ON;
|
||||
}
|
||||
|
||||
static void dpm_superior_set_must_resume(struct device *dev, bool set_active)
|
||||
static void dpm_superior_set_must_resume(struct device *dev)
|
||||
{
|
||||
struct device_link *link;
|
||||
int idx;
|
||||
|
||||
if (dev->parent) {
|
||||
if (dev->parent)
|
||||
dev->parent->power.must_resume = true;
|
||||
if (set_active)
|
||||
dev->parent->power.set_active = true;
|
||||
}
|
||||
|
||||
idx = device_links_read_lock();
|
||||
|
||||
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
|
||||
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
|
||||
link->supplier->power.must_resume = true;
|
||||
if (set_active)
|
||||
link->supplier->power.set_active = true;
|
||||
}
|
||||
|
||||
device_links_read_unlock(idx);
|
||||
}
|
||||
|
@ -1287,9 +1281,12 @@ Skip:
|
|||
dev->power.must_resume = true;
|
||||
|
||||
if (dev->power.must_resume) {
|
||||
dev->power.set_active = dev->power.set_active ||
|
||||
dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
|
||||
dpm_superior_set_must_resume(dev, dev->power.set_active);
|
||||
if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
|
||||
dev->power.set_active = true;
|
||||
if (dev->parent && !dev->parent->power.ignore_children)
|
||||
dev->parent->power.set_active = true;
|
||||
}
|
||||
dpm_superior_set_must_resume(dev);
|
||||
}
|
||||
|
||||
Complete:
|
||||
|
|
|
@ -1127,8 +1127,8 @@ static void vdc_queue_drain(struct vdc_port *port)
|
|||
|
||||
spin_lock_irq(&port->vio.lock);
|
||||
port->drain = 0;
|
||||
blk_mq_unquiesce_queue(q, memflags);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q, memflags);
|
||||
}
|
||||
|
||||
static void vdc_ldc_reset_timer_work(struct work_struct *work)
|
||||
|
|
|
@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
|||
|
||||
id = moxtet->modules[pos->idx];
|
||||
|
||||
seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
|
||||
seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
|
||||
pos->bit);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,8 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
|
|||
|
||||
config ARM_AIROHA_SOC_CPUFREQ
|
||||
tristate "Airoha EN7581 SoC CPUFreq support"
|
||||
depends on (ARCH_AIROHA && OF) || COMPILE_TEST
|
||||
depends on ARCH_AIROHA || COMPILE_TEST
|
||||
depends on OF
|
||||
select PM_OPP
|
||||
default ARCH_AIROHA
|
||||
help
|
||||
|
|
|
@ -699,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
|||
if (min_perf < lowest_nonlinear_perf)
|
||||
min_perf = lowest_nonlinear_perf;
|
||||
|
||||
max_perf = cap_perf;
|
||||
max_perf = cpudata->max_limit_perf;
|
||||
if (max_perf < min_perf)
|
||||
max_perf = min_perf;
|
||||
|
||||
|
@ -747,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
|
|||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
|
||||
ret = amd_pstate_cpu_boost_update(policy, state);
|
||||
policy->boost_enabled = !ret ? state : false;
|
||||
refresh_frequency_limits(policy);
|
||||
|
||||
return ret;
|
||||
|
@ -822,25 +821,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
|
|||
|
||||
static void amd_pstate_update_limits(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct cpufreq_policy *policy = NULL;
|
||||
struct amd_cpudata *cpudata;
|
||||
u32 prev_high = 0, cur_high = 0;
|
||||
int ret;
|
||||
bool highest_perf_changed = false;
|
||||
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
cpudata = policy->driver_data;
|
||||
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
|
||||
ret = amd_get_highest_perf(cpu, &cur_high);
|
||||
if (ret)
|
||||
goto free_cpufreq_put;
|
||||
if (ret) {
|
||||
cpufreq_cpu_put(policy);
|
||||
return;
|
||||
}
|
||||
|
||||
prev_high = READ_ONCE(cpudata->prefcore_ranking);
|
||||
highest_perf_changed = (prev_high != cur_high);
|
||||
|
@ -850,8 +852,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
|
|||
if (cur_high < CPPC_MAX_PERF)
|
||||
sched_set_itmt_core_prio((int)cur_high, cpu);
|
||||
}
|
||||
|
||||
free_cpufreq_put:
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
if (!highest_perf_changed)
|
||||
|
|
|
@ -1571,7 +1571,8 @@ static int cpufreq_online(unsigned int cpu)
|
|||
policy->cdev = of_cpufreq_cooling_register(policy);
|
||||
|
||||
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
|
||||
if (policy->boost_enabled != cpufreq_boost_enabled()) {
|
||||
if (cpufreq_driver->set_boost &&
|
||||
policy->boost_enabled != cpufreq_boost_enabled()) {
|
||||
policy->boost_enabled = cpufreq_boost_enabled();
|
||||
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
|
||||
if (ret) {
|
||||
|
|
|
@ -106,7 +106,7 @@ config ISCSI_IBFT
|
|||
select ISCSI_BOOT_SYSFS
|
||||
select ISCSI_IBFT_FIND if X86
|
||||
depends on ACPI && SCSI && SCSI_LOWLEVEL
|
||||
default n
|
||||
default n
|
||||
help
|
||||
This option enables support for detection and exposing of iSCSI
|
||||
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
|
||||
|
|
|
@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
|
|||
str += sprintf_ipaddr(str, nic->ip_addr);
|
||||
break;
|
||||
case ISCSI_BOOT_ETH_SUBNET_MASK:
|
||||
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
|
||||
if (nic->subnet_mask_prefix > 32)
|
||||
val = cpu_to_be32(~0);
|
||||
else
|
||||
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
|
||||
str += sprintf(str, "%pI4", &val);
|
||||
break;
|
||||
case ISCSI_BOOT_ETH_PREFIX_LEN:
|
||||
|
|
|
@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
|
|||
|
||||
config GPIO_GRGPIO
|
||||
tristate "Aeroflex Gaisler GRGPIO support"
|
||||
depends on OF || COMPILE_TEST
|
||||
select GPIO_GENERIC
|
||||
select IRQ_DOMAIN
|
||||
help
|
||||
|
|
|
@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
|
|||
DECLARE_BITMAP(trigger, MAX_LINE);
|
||||
int ret;
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
/* Read the current interrupt status from the device */
|
||||
ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
/* Check latched inputs and clear interrupt status */
|
||||
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
/* Apply filter for rising/falling edge selection */
|
||||
bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
|
||||
|
||||
bitmap_and(pending, new_stat, trigger, gc->ngpio);
|
||||
|
||||
return !bitmap_empty(pending, gc->ngpio);
|
||||
}
|
||||
|
||||
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
|
||||
if (ret)
|
||||
return false;
|
||||
|
|
|
@ -1028,20 +1028,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
|
|||
struct configfs_subsystem *subsys = dev->group.cg_subsys;
|
||||
struct gpio_sim_bank *bank;
|
||||
struct gpio_sim_line *line;
|
||||
struct config_item *item;
|
||||
|
||||
/*
|
||||
* The device only needs to depend on leaf line entries. This is
|
||||
* The device only needs to depend on leaf entries. This is
|
||||
* sufficient to lock up all the configfs entries that the
|
||||
* instantiated, alive device depends on.
|
||||
*/
|
||||
list_for_each_entry(bank, &dev->bank_list, siblings) {
|
||||
list_for_each_entry(line, &bank->line_list, siblings) {
|
||||
item = line->hog ? &line->hog->item
|
||||
: &line->group.cg_item;
|
||||
|
||||
if (lock)
|
||||
WARN_ON(configfs_depend_item_unlocked(
|
||||
subsys, &line->group.cg_item));
|
||||
WARN_ON(configfs_depend_item_unlocked(subsys,
|
||||
item));
|
||||
else
|
||||
configfs_undepend_item_unlocked(
|
||||
&line->group.cg_item);
|
||||
configfs_undepend_item_unlocked(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,9 +119,10 @@
|
|||
* - 3.57.0 - Compute tunneling on GFX10+
|
||||
* - 3.58.0 - Add GFX12 DCC support
|
||||
* - 3.59.0 - Cleared VRAM
|
||||
* - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 59
|
||||
#define KMS_DRIVER_MINOR 60
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
/*
|
||||
|
|
|
@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
|||
mutex_lock(&adev->mman.gtt_window_lock);
|
||||
while (src_mm.remaining) {
|
||||
uint64_t from, to, cur_size, tiling_flags;
|
||||
uint32_t num_type, data_format, max_com;
|
||||
uint32_t num_type, data_format, max_com, write_compress_disable;
|
||||
struct dma_fence *next;
|
||||
|
||||
/* Never copy more than 256MiB at once to avoid a timeout */
|
||||
|
@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
|||
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
|
||||
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
|
||||
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
|
||||
write_compress_disable =
|
||||
AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
|
||||
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
|
||||
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
|
||||
AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
|
||||
AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
|
||||
AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
|
||||
write_compress_disable));
|
||||
}
|
||||
|
||||
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
|
||||
|
|
|
@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
|
|||
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
|
||||
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
|
||||
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
|
||||
#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
|
||||
#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
|
||||
|
||||
#define AMDGPU_COPY_FLAGS_SET(field, value) \
|
||||
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
|
||||
|
|
|
@ -1741,11 +1741,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
|
|||
uint32_t byte_count,
|
||||
uint32_t copy_flags)
|
||||
{
|
||||
uint32_t num_type, data_format, max_com;
|
||||
uint32_t num_type, data_format, max_com, write_cm;
|
||||
|
||||
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
|
||||
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
|
||||
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
|
||||
write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
|
||||
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
|
||||
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
|
||||
|
@ -1762,7 +1763,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
|
|||
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
|
||||
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
|
||||
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
|
||||
((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
|
||||
((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
|
||||
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
|
||||
else
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
|
|
|
@ -2133,7 +2133,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
|
||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||
|
||||
if (context->stream_count > get_seamless_boot_stream_count(context) ||
|
||||
if (get_seamless_boot_stream_count(context) == 0 ||
|
||||
context->stream_count == 0) {
|
||||
/* Must wait for no flips to be pending before doing optimize bw */
|
||||
hwss_wait_for_no_pipes_pending(dc, context);
|
||||
|
|
|
@ -63,8 +63,7 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
|
|||
|
||||
bool should_use_dmub_lock(struct dc_link *link)
|
||||
{
|
||||
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
|
||||
link->psr_settings.psr_version == DC_PSR_VERSION_1)
|
||||
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
|
||||
return true;
|
||||
|
||||
if (link->replay_settings.replay_feature_enabled)
|
||||
|
|
|
@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
|
|||
dml_rcflags := $(CC_FLAGS_NO_FPU)
|
||||
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
frame_warn_flag := -Wframe-larger-than=3072
|
||||
else
|
||||
frame_warn_flag := -Wframe-larger-than=2048
|
||||
endif
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
frame_warn_limit := 3072
|
||||
else
|
||||
frame_warn_limit := 2048
|
||||
endif
|
||||
|
||||
ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
|
||||
frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
|
||||
|
|
|
@ -28,15 +28,19 @@ dml2_ccflags := $(CC_FLAGS_FPU)
|
|||
dml2_rcflags := $(CC_FLAGS_NO_FPU)
|
||||
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
|
||||
frame_warn_flag := -Wframe-larger-than=4096
|
||||
else
|
||||
frame_warn_flag := -Wframe-larger-than=3072
|
||||
endif
|
||||
else
|
||||
frame_warn_flag := -Wframe-larger-than=2048
|
||||
endif
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
|
||||
frame_warn_limit := 4096
|
||||
else
|
||||
frame_warn_limit := 3072
|
||||
endif
|
||||
else
|
||||
frame_warn_limit := 2048
|
||||
endif
|
||||
|
||||
ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
|
||||
frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
|
||||
endif
|
||||
endif
|
||||
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
|
||||
|
|
|
@ -1017,7 +1017,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
|
|||
if (disp_cfg_stream_location < 0)
|
||||
disp_cfg_stream_location = dml_dispcfg->num_streams++;
|
||||
|
||||
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
|
||||
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
|
||||
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
|
||||
|
@ -1042,7 +1042,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
|
|||
if (disp_cfg_plane_location < 0)
|
||||
disp_cfg_plane_location = dml_dispcfg->num_planes++;
|
||||
|
||||
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
|
||||
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
|
||||
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
|
||||
|
|
|
@ -786,7 +786,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
|
|||
case SIGNAL_TYPE_DISPLAY_PORT_MST:
|
||||
case SIGNAL_TYPE_DISPLAY_PORT:
|
||||
out->OutputEncoder[location] = dml_dp;
|
||||
if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
|
||||
if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
|
||||
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
|
||||
break;
|
||||
case SIGNAL_TYPE_EDP:
|
||||
|
@ -1343,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
|
|||
if (disp_cfg_stream_location < 0)
|
||||
disp_cfg_stream_location = dml_dispcfg->num_timings++;
|
||||
|
||||
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
|
||||
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
|
||||
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
|
||||
|
@ -1383,7 +1383,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
|
|||
if (disp_cfg_plane_location < 0)
|
||||
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
|
||||
|
||||
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
|
||||
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
|
||||
populate_dml_plane_cfg_from_plane_state(
|
||||
|
|
|
@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
|
|||
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
|
||||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
|
||||
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
return wm_pending;
|
||||
}
|
||||
|
|
|
@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
|
|||
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
|
||||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
|
||||
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
return wm_pending;
|
||||
}
|
||||
|
||||
|
|
|
@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
|
|||
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
|
||||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
|
||||
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
|
||||
|
||||
|
|
|
@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
|
|||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
|
||||
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
|
||||
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
|
||||
|
||||
|
|
|
@ -500,6 +500,8 @@ void hubp3_init(struct hubp *hubp)
|
|||
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
|
||||
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
|
||||
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
|
||||
|
||||
hubp_reset(hubp);
|
||||
}
|
||||
|
||||
|
|
|
@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
|
|||
{
|
||||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
|
||||
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
|
||||
}
|
||||
static struct hubp_funcs dcn32_hubp_funcs = {
|
||||
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
|
||||
|
|
|
@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
|
|||
}
|
||||
|
||||
hws->funcs.init_pipes(dc, dc->current_state);
|
||||
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
|
||||
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
|
||||
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
|
||||
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
|
||||
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
|
||||
}
|
||||
|
|
|
@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
|
|||
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
|
||||
kwb_conn->wb_layer->layer_type,
|
||||
&n_formats);
|
||||
if (!formats) {
|
||||
kfree(kwb_conn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = drm_writeback_connector_init(&kms->base, wb_conn,
|
||||
&komeda_wb_connector_funcs,
|
||||
|
|
|
@ -195,7 +195,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
|
|||
if (enabled)
|
||||
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
|
||||
|
||||
for (i = 0; i < 200; ++i) {
|
||||
for (i = 0; i < 1000; ++i) {
|
||||
if (i)
|
||||
mdelay(1);
|
||||
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
|
||||
|
|
|
@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
|
|||
if (!aux->transfer)
|
||||
return;
|
||||
|
||||
#ifndef CONFIG_MEDIA_CEC_RC
|
||||
/*
|
||||
* CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
|
||||
* cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
|
||||
*
|
||||
* Do this here as well to ensure the tests against cec_caps are
|
||||
* correct.
|
||||
*/
|
||||
cec_caps &= ~CEC_CAP_RC;
|
||||
#endif
|
||||
cancel_delayed_work_sync(&aux->cec.unregister_work);
|
||||
|
||||
mutex_lock(&aux->cec.lock);
|
||||
|
@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
|
|||
num_las = CEC_MAX_LOG_ADDRS;
|
||||
|
||||
if (aux->cec.adap) {
|
||||
if (aux->cec.adap->capabilities == cec_caps &&
|
||||
/* Check if the adapter properties have changed */
|
||||
if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
|
||||
(cec_caps & CEC_CAP_MONITOR_ALL) &&
|
||||
aux->cec.adap->available_log_addrs == num_las) {
|
||||
/* Unchanged, so just set the phys addr */
|
||||
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
|
||||
|
|
|
@ -41,8 +41,9 @@ static u32 scale(u32 source_val,
|
|||
{
|
||||
u64 target_val;
|
||||
|
||||
WARN_ON(source_min > source_max);
|
||||
WARN_ON(target_min > target_max);
|
||||
if (WARN_ON(source_min >= source_max) ||
|
||||
WARN_ON(target_min > target_max))
|
||||
return target_min;
|
||||
|
||||
/* defensive */
|
||||
source_val = clamp(source_val, source_min, source_max);
|
||||
|
|
|
@ -1791,7 +1791,7 @@ int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
|
|||
if (DISPLAY_VER(display) == 11)
|
||||
return 10;
|
||||
|
||||
return 0;
|
||||
return intel_dp_dsc_min_src_input_bpc();
|
||||
}
|
||||
|
||||
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
|
||||
|
@ -2072,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
|
|||
/* Compressed BPP should be less than the Input DSC bpp */
|
||||
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp)
|
||||
for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp ||
|
||||
valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
continue;
|
||||
if (valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
break;
|
||||
|
||||
ret = dsc_compute_link_config(intel_dp,
|
||||
pipe_config,
|
||||
|
@ -2829,7 +2828,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
|
|||
|
||||
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
|
||||
|
||||
/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
|
||||
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
|
||||
as_sdp->length = 0x9;
|
||||
as_sdp->duration_incr_ms = 0;
|
||||
|
@ -2840,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
|
|||
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
|
||||
as_sdp->target_rr_divider = true;
|
||||
} else {
|
||||
as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
|
||||
as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
|
||||
as_sdp->vtotal = adjusted_mode->vtotal;
|
||||
as_sdp->target_rr = 0;
|
||||
}
|
||||
|
|
|
@ -341,6 +341,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
|
|||
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allow using zero step to indicate one try */
|
||||
if (!step)
|
||||
break;
|
||||
}
|
||||
|
||||
if (slots < 0) {
|
||||
|
|
|
@ -41,7 +41,7 @@ intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
|
|||
u32 rekey_bit = 0;
|
||||
|
||||
/* Here we assume HDMI is in TMDS mode of operation */
|
||||
if (encoder->type != INTEL_OUTPUT_HDMI)
|
||||
if (!intel_encoder_is_hdmi(encoder))
|
||||
return;
|
||||
|
||||
if (DISPLAY_VER(display) >= 30) {
|
||||
|
@ -2188,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
|
|||
|
||||
drm_dbg_kms(display->drm,
|
||||
"HDCP2.2 Downstream topology change\n");
|
||||
|
||||
ret = hdcp2_authenticate_repeater_topology(connector);
|
||||
if (!ret) {
|
||||
intel_hdcp_update_value(connector,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED,
|
||||
true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_dbg_kms(display->drm,
|
||||
"[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
|
||||
connector->base.base.id, connector->base.name,
|
||||
ret);
|
||||
} else {
|
||||
drm_dbg_kms(display->drm,
|
||||
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
|
||||
|
|
|
@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
|
|||
DRM_FORMAT_Y216,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_XVYU2101010,
|
||||
DRM_FORMAT_XVYU12_16161616,
|
||||
DRM_FORMAT_XVYU16161616,
|
||||
};
|
||||
|
||||
static const u32 icl_sdr_uv_plane_formats[] = {
|
||||
|
@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
|
|||
DRM_FORMAT_Y216,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_XVYU2101010,
|
||||
DRM_FORMAT_XVYU12_16161616,
|
||||
DRM_FORMAT_XVYU16161616,
|
||||
};
|
||||
|
||||
static const u32 icl_hdr_plane_formats[] = {
|
||||
|
|
|
@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
|
|||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
|
||||
struct sg_table *st;
|
||||
struct sgt_iter sgt_iter;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -239,9 +237,7 @@ rebuild_st:
|
|||
* for PAGE_SIZE chunks instead may be helpful.
|
||||
*/
|
||||
if (max_segment > PAGE_SIZE) {
|
||||
for_each_sgt_page(page, sgt_iter, st)
|
||||
put_page(page);
|
||||
sg_free_table(st);
|
||||
shmem_sg_free_table(st, mapping, false, false);
|
||||
kfree(st);
|
||||
|
||||
max_segment = PAGE_SIZE;
|
||||
|
|
|
@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
|
|||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
}
|
||||
|
||||
static void __update_guc_busyness_running_state(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
for_each_engine(engine, gt, id)
|
||||
engine->stats.guc.running = false;
|
||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
}
|
||||
|
||||
static void __update_guc_busyness_stats(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
|
|||
if (!guc_submission_initialized(guc))
|
||||
return;
|
||||
|
||||
/* Assume no engines are running and set running state to false */
|
||||
__update_guc_busyness_running_state(guc);
|
||||
|
||||
/*
|
||||
* There is a race with suspend flow where the worker runs after suspend
|
||||
* and causes an unclaimed register access warning. Cancel the worker
|
||||
|
@ -5519,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
|
|||
{
|
||||
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
|
||||
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
|
||||
drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
|
||||
ce->ring->head,
|
||||
ce->lrc_reg_state[CTX_RING_HEAD]);
|
||||
drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
|
||||
ce->ring->tail,
|
||||
ce->lrc_reg_state[CTX_RING_TAIL]);
|
||||
if (intel_context_pin_if_active(ce)) {
|
||||
drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
|
||||
ce->ring->head,
|
||||
ce->lrc_reg_state[CTX_RING_HEAD]);
|
||||
drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
|
||||
ce->ring->tail,
|
||||
ce->lrc_reg_state[CTX_RING_TAIL]);
|
||||
intel_context_unpin(ce);
|
||||
} else {
|
||||
drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
|
||||
ce->ring->head);
|
||||
drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
|
||||
ce->ring->tail);
|
||||
}
|
||||
drm_printf(p, "\t\tContext Pin Count: %u\n",
|
||||
atomic_read(&ce->pin_count));
|
||||
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
|
||||
|
|
|
@ -51,6 +51,10 @@
|
|||
/* Common to all OA units */
|
||||
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
|
||||
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
|
||||
#define OAG_OACONTROL_USED_BITS \
|
||||
(OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
|
||||
OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
|
||||
OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
|
||||
|
||||
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
|
||||
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
|
||||
|
@ -78,6 +82,8 @@
|
|||
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
|
||||
#define OAM_CONTROL_OFFSET (0x194)
|
||||
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
|
||||
#define OAM_OACONTROL_USED_BITS \
|
||||
(OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
|
||||
#define OAM_DEBUG_OFFSET (0x198)
|
||||
#define OAM_STATUS_OFFSET (0x19c)
|
||||
#define OAM_MMIO_TRG_OFFSET (0x1d0)
|
||||
|
|
|
@ -119,11 +119,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
|
|||
drm_puts(&p, "\n**** GuC CT ****\n");
|
||||
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
|
||||
|
||||
/*
|
||||
* Don't add a new section header here because the mesa debug decoder
|
||||
* tool expects the context information to be in the 'GuC CT' section.
|
||||
*/
|
||||
/* drm_puts(&p, "\n**** Contexts ****\n"); */
|
||||
drm_puts(&p, "\n**** Contexts ****\n");
|
||||
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
|
||||
|
||||
drm_puts(&p, "\n**** Job ****\n");
|
||||
|
@ -395,42 +391,34 @@ int xe_devcoredump_init(struct xe_device *xe)
|
|||
/**
|
||||
* xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
|
||||
*
|
||||
* The output is split to multiple lines because some print targets, e.g. dmesg
|
||||
* cannot handle arbitrarily long lines. Note also that printing to dmesg in
|
||||
* piece-meal fashion is not possible, each separate call to drm_puts() has a
|
||||
* line-feed automatically added! Therefore, the entire output line must be
|
||||
* constructed in a local buffer first, then printed in one atomic output call.
|
||||
* The output is split into multiple calls to drm_puts() because some print
|
||||
* targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
|
||||
* add newlines, as is the case with dmesg: each drm_puts() call creates a
|
||||
* separate line.
|
||||
*
|
||||
* There is also a scheduler yield call to prevent the 'task has been stuck for
|
||||
* 120s' kernel hang check feature from firing when printing to a slow target
|
||||
* such as dmesg over a serial port.
|
||||
*
|
||||
* TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
|
||||
*
|
||||
* @p: the printer object to output to
|
||||
* @prefix: optional prefix to add to output string
|
||||
* @suffix: optional suffix to add at the end. 0 disables it and is
|
||||
* not added to the output, which is useful when using multiple calls
|
||||
* to dump data to @p
|
||||
* @blob: the Binary Large OBject to dump out
|
||||
* @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
|
||||
* @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
|
||||
*/
|
||||
void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
||||
void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
|
||||
const void *blob, size_t offset, size_t size)
|
||||
{
|
||||
const u32 *blob32 = (const u32 *)blob;
|
||||
char buff[ASCII85_BUFSZ], *line_buff;
|
||||
size_t line_pos = 0;
|
||||
|
||||
/*
|
||||
* Splitting blobs across multiple lines is not compatible with the mesa
|
||||
* debug decoder tool. Note that even dropping the explicit '\n' below
|
||||
* doesn't help because the GuC log is so big some underlying implementation
|
||||
* still splits the lines at 512K characters. So just bail completely for
|
||||
* the moment.
|
||||
*/
|
||||
return;
|
||||
|
||||
#define DMESG_MAX_LINE_LEN 800
|
||||
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
|
||||
/* Always leave space for the suffix char and the \0 */
|
||||
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
|
||||
|
||||
if (size & 3)
|
||||
drm_printf(p, "Size not word aligned: %zu", size);
|
||||
|
@ -462,7 +450,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
|||
line_pos += strlen(line_buff + line_pos);
|
||||
|
||||
if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
|
||||
line_buff[line_pos++] = '\n';
|
||||
line_buff[line_pos++] = 0;
|
||||
|
||||
drm_puts(p, line_buff);
|
||||
|
@ -474,10 +461,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
|||
}
|
||||
}
|
||||
|
||||
if (line_pos) {
|
||||
line_buff[line_pos++] = '\n';
|
||||
line_buff[line_pos++] = 0;
|
||||
if (suffix)
|
||||
line_buff[line_pos++] = suffix;
|
||||
|
||||
if (line_pos) {
|
||||
line_buff[line_pos++] = 0;
|
||||
drm_puts(p, line_buff);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
|
|||
}
|
||||
#endif
|
||||
|
||||
void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
||||
void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
|
||||
const void *blob, size_t offset, size_t size);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
|
|||
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
|
||||
xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
|
||||
|
||||
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
||||
if (IS_SRIOV_PF(gt_to_xe(gt))) {
|
||||
xe_gt_sriov_pf_init(gt);
|
||||
xe_gt_sriov_pf_init_hw(gt);
|
||||
}
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
|
||||
|
|
|
@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
|
||||
* @gt: the &xe_gt to initialize
|
||||
*
|
||||
* Late one-time initialization of the PF data.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_init(struct xe_gt *gt)
|
||||
{
|
||||
return xe_gt_sriov_pf_migration_init(gt);
|
||||
}
|
||||
|
||||
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
|
||||
{
|
||||
return GRAPHICS_VERx100(xe) == 1200;
|
||||
|
@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
|
|||
pf_enable_ggtt_guest_update(gt);
|
||||
|
||||
xe_gt_sriov_pf_service_update(gt);
|
||||
xe_gt_sriov_pf_migration_init(gt);
|
||||
}
|
||||
|
||||
static u32 pf_get_vf_regs_stride(struct xe_device *xe)
|
||||
|
|
|
@ -10,6 +10,7 @@ struct xe_gt;
|
|||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
|
||||
int xe_gt_sriov_pf_init(struct xe_gt *gt);
|
||||
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
|
||||
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
|
||||
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
|
||||
|
@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -1724,7 +1724,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
|
|||
snapshot->g2h_outstanding);
|
||||
|
||||
if (snapshot->ctb)
|
||||
xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
|
||||
xe_print_blob_ascii85(p, "CTB data", '\n',
|
||||
snapshot->ctb, 0, snapshot->ctb_size);
|
||||
} else {
|
||||
drm_puts(p, "CT disabled\n");
|
||||
}
|
||||
|
|
|
@ -211,8 +211,10 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
|
|||
remain = snapshot->size;
|
||||
for (i = 0; i < snapshot->num_chunks; i++) {
|
||||
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
|
||||
const char *prefix = i ? NULL : "Log data";
|
||||
char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
|
||||
|
||||
xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
|
||||
xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
|
||||
remain -= size;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -237,7 +237,6 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
|
|||
u32 tail, hw_tail, partial_report_size, available;
|
||||
int report_size = stream->oa_buffer.format->size;
|
||||
unsigned long flags;
|
||||
bool pollin;
|
||||
|
||||
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
|
||||
|
||||
|
@ -282,11 +281,11 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
|
|||
stream->oa_buffer.tail = tail;
|
||||
|
||||
available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
|
||||
pollin = available >= stream->wait_num_reports * report_size;
|
||||
stream->pollin = available >= stream->wait_num_reports * report_size;
|
||||
|
||||
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
|
||||
|
||||
return pollin;
|
||||
return stream->pollin;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
|
||||
|
@ -294,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
|
|||
struct xe_oa_stream *stream =
|
||||
container_of(hrtimer, typeof(*stream), poll_check_timer);
|
||||
|
||||
if (xe_oa_buffer_check_unlocked(stream)) {
|
||||
stream->pollin = true;
|
||||
if (xe_oa_buffer_check_unlocked(stream))
|
||||
wake_up(&stream->poll_wq);
|
||||
}
|
||||
|
||||
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
|
||||
|
||||
|
@ -452,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
|
|||
return val;
|
||||
}
|
||||
|
||||
static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
|
||||
{
|
||||
return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
|
||||
OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
|
||||
}
|
||||
|
||||
static void xe_oa_enable(struct xe_oa_stream *stream)
|
||||
{
|
||||
const struct xe_oa_format *format = stream->oa_buffer.format;
|
||||
|
@ -472,14 +475,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
|
|||
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
|
||||
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
|
||||
|
||||
xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
|
||||
xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
|
||||
}
|
||||
|
||||
static void xe_oa_disable(struct xe_oa_stream *stream)
|
||||
{
|
||||
struct xe_mmio *mmio = &stream->gt->mmio;
|
||||
|
||||
xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
|
||||
xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
|
||||
if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
|
||||
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
|
||||
drm_err(&stream->oa->xe->drm,
|
||||
|
@ -2534,6 +2537,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
|
|||
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
|
||||
}
|
||||
|
||||
xe_mmio_write32(>->mmio, u->regs.oa_ctrl, 0);
|
||||
|
||||
/* Ensure MMIO trigger remains disabled till there is a stream */
|
||||
xe_mmio_write32(>->mmio, u->regs.oa_debug,
|
||||
oag_configure_mmio_trigger(NULL, false));
|
||||
|
|
|
@ -1300,12 +1300,14 @@ new_device_store(struct device *dev, struct device_attribute *attr,
|
|||
info.flags |= I2C_CLIENT_SLAVE;
|
||||
}
|
||||
|
||||
info.flags |= I2C_CLIENT_USER;
|
||||
|
||||
client = i2c_new_client_device(adap, &info);
|
||||
if (IS_ERR(client))
|
||||
return PTR_ERR(client);
|
||||
|
||||
/* Keep track of the added device */
|
||||
mutex_lock(&adap->userspace_clients_lock);
|
||||
list_add_tail(&client->detected, &adap->userspace_clients);
|
||||
mutex_unlock(&adap->userspace_clients_lock);
|
||||
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
|
||||
info.type, info.addr);
|
||||
|
||||
|
@ -1313,15 +1315,6 @@ new_device_store(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
static DEVICE_ATTR_WO(new_device);
|
||||
|
||||
static int __i2c_find_user_addr(struct device *dev, const void *addrp)
|
||||
{
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
unsigned short addr = *(unsigned short *)addrp;
|
||||
|
||||
return client && client->flags & I2C_CLIENT_USER &&
|
||||
i2c_encode_flags_to_addr(client) == addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* And of course let the users delete the devices they instantiated, if
|
||||
* they got it wrong. This interface can only be used to delete devices
|
||||
|
@ -1336,7 +1329,7 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
|
|||
const char *buf, size_t count)
|
||||
{
|
||||
struct i2c_adapter *adap = to_i2c_adapter(dev);
|
||||
struct device *child_dev;
|
||||
struct i2c_client *client, *next;
|
||||
unsigned short addr;
|
||||
char end;
|
||||
int res;
|
||||
|
@ -1352,19 +1345,28 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&core_lock);
|
||||
/* Make sure the device was added through sysfs */
|
||||
child_dev = device_find_child(&adap->dev, &addr, __i2c_find_user_addr);
|
||||
if (child_dev) {
|
||||
i2c_unregister_device(i2c_verify_client(child_dev));
|
||||
put_device(child_dev);
|
||||
} else {
|
||||
dev_err(dev, "Can't find userspace-created device at %#x\n", addr);
|
||||
count = -ENOENT;
|
||||
}
|
||||
mutex_unlock(&core_lock);
|
||||
res = -ENOENT;
|
||||
mutex_lock_nested(&adap->userspace_clients_lock,
|
||||
i2c_adapter_depth(adap));
|
||||
list_for_each_entry_safe(client, next, &adap->userspace_clients,
|
||||
detected) {
|
||||
if (i2c_encode_flags_to_addr(client) == addr) {
|
||||
dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
|
||||
"delete_device", client->name, client->addr);
|
||||
|
||||
return count;
|
||||
list_del(&client->detected);
|
||||
i2c_unregister_device(client);
|
||||
res = count;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&adap->userspace_clients_lock);
|
||||
|
||||
if (res < 0)
|
||||
dev_err(dev, "%s: Can't find device in list\n",
|
||||
"delete_device");
|
||||
return res;
|
||||
}
|
||||
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
|
||||
delete_device_store);
|
||||
|
@ -1535,6 +1537,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
|
|||
adap->locked_flags = 0;
|
||||
rt_mutex_init(&adap->bus_lock);
|
||||
rt_mutex_init(&adap->mux_lock);
|
||||
mutex_init(&adap->userspace_clients_lock);
|
||||
INIT_LIST_HEAD(&adap->userspace_clients);
|
||||
|
||||
/* Set default timeout to 1 second if not already set */
|
||||
if (adap->timeout == 0)
|
||||
|
@ -1700,6 +1704,23 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
|
||||
|
||||
static void i2c_do_del_adapter(struct i2c_driver *driver,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
struct i2c_client *client, *_n;
|
||||
|
||||
/* Remove the devices we created ourselves as the result of hardware
|
||||
* probing (using a driver's detect method) */
|
||||
list_for_each_entry_safe(client, _n, &driver->clients, detected) {
|
||||
if (client->adapter == adapter) {
|
||||
dev_dbg(&adapter->dev, "Removing %s at 0x%x\n",
|
||||
client->name, client->addr);
|
||||
list_del(&client->detected);
|
||||
i2c_unregister_device(client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __unregister_client(struct device *dev, void *dummy)
|
||||
{
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
|
@ -1715,6 +1736,12 @@ static int __unregister_dummy(struct device *dev, void *dummy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __process_removed_adapter(struct device_driver *d, void *data)
|
||||
{
|
||||
i2c_do_del_adapter(to_i2c_driver(d), data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i2c_del_adapter - unregister I2C adapter
|
||||
* @adap: the adapter being unregistered
|
||||
|
@ -1726,6 +1753,7 @@ static int __unregister_dummy(struct device *dev, void *dummy)
|
|||
void i2c_del_adapter(struct i2c_adapter *adap)
|
||||
{
|
||||
struct i2c_adapter *found;
|
||||
struct i2c_client *client, *next;
|
||||
|
||||
/* First make sure that this adapter was ever added */
|
||||
mutex_lock(&core_lock);
|
||||
|
@ -1737,16 +1765,31 @@ void i2c_del_adapter(struct i2c_adapter *adap)
|
|||
}
|
||||
|
||||
i2c_acpi_remove_space_handler(adap);
|
||||
/* Tell drivers about this removal */
|
||||
mutex_lock(&core_lock);
|
||||
bus_for_each_drv(&i2c_bus_type, NULL, adap,
|
||||
__process_removed_adapter);
|
||||
mutex_unlock(&core_lock);
|
||||
|
||||
/* Remove devices instantiated from sysfs */
|
||||
mutex_lock_nested(&adap->userspace_clients_lock,
|
||||
i2c_adapter_depth(adap));
|
||||
list_for_each_entry_safe(client, next, &adap->userspace_clients,
|
||||
detected) {
|
||||
dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
|
||||
client->addr);
|
||||
list_del(&client->detected);
|
||||
i2c_unregister_device(client);
|
||||
}
|
||||
mutex_unlock(&adap->userspace_clients_lock);
|
||||
|
||||
/* Detach any active clients. This can't fail, thus we do not
|
||||
* check the returned value. This is a two-pass process, because
|
||||
* we can't remove the dummy devices during the first pass: they
|
||||
* could have been instantiated by real devices wishing to clean
|
||||
* them up properly, so we give them a chance to do that first. */
|
||||
mutex_lock(&core_lock);
|
||||
device_for_each_child(&adap->dev, NULL, __unregister_client);
|
||||
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
|
||||
mutex_unlock(&core_lock);
|
||||
|
||||
/* device name is gone after device_unregister */
|
||||
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
|
||||
|
@ -1966,6 +2009,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
|
|||
/* add the driver to the list of i2c drivers in the driver core */
|
||||
driver->driver.owner = owner;
|
||||
driver->driver.bus = &i2c_bus_type;
|
||||
INIT_LIST_HEAD(&driver->clients);
|
||||
|
||||
/* When registration returns, the driver core
|
||||
* will have called probe() for all matching-but-unbound devices.
|
||||
|
@ -1983,13 +2027,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
|
|||
}
|
||||
EXPORT_SYMBOL(i2c_register_driver);
|
||||
|
||||
static int __i2c_unregister_detected_client(struct device *dev, void *argp)
|
||||
static int __process_removed_driver(struct device *dev, void *data)
|
||||
{
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
|
||||
if (client && client->flags & I2C_CLIENT_AUTO)
|
||||
i2c_unregister_device(client);
|
||||
|
||||
if (dev->type == &i2c_adapter_type)
|
||||
i2c_do_del_adapter(data, to_i2c_adapter(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2000,12 +2041,7 @@ static int __i2c_unregister_detected_client(struct device *dev, void *argp)
|
|||
*/
|
||||
void i2c_del_driver(struct i2c_driver *driver)
|
||||
{
|
||||
mutex_lock(&core_lock);
|
||||
/* Satisfy __must_check, function can't fail */
|
||||
if (driver_for_each_device(&driver->driver, NULL, NULL,
|
||||
__i2c_unregister_detected_client)) {
|
||||
}
|
||||
mutex_unlock(&core_lock);
|
||||
i2c_for_each_dev(driver, __process_removed_driver);
|
||||
|
||||
driver_unregister(&driver->driver);
|
||||
pr_debug("driver [%s] unregistered\n", driver->driver.name);
|
||||
|
@ -2432,7 +2468,6 @@ static int i2c_detect_address(struct i2c_client *temp_client,
|
|||
/* Finally call the custom detection function */
|
||||
memset(&info, 0, sizeof(struct i2c_board_info));
|
||||
info.addr = addr;
|
||||
info.flags = I2C_CLIENT_AUTO;
|
||||
err = driver->detect(temp_client, &info);
|
||||
if (err) {
|
||||
/* -ENODEV is returned if the detection fails. We catch it
|
||||
|
@ -2459,7 +2494,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
|
|||
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
|
||||
info.type, info.addr);
|
||||
client = i2c_new_client_device(adapter, &info);
|
||||
if (IS_ERR(client))
|
||||
if (!IS_ERR(client))
|
||||
list_add_tail(&client->detected, &driver->clients);
|
||||
else
|
||||
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
|
||||
info.type, info.addr);
|
||||
}
|
||||
|
|
|
@ -169,6 +169,7 @@ config IXP4XX_IRQ
|
|||
|
||||
config LAN966X_OIC
|
||||
tristate "Microchip LAN966x OIC Support"
|
||||
depends on MCHP_LAN966X_PCI || COMPILE_TEST
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
help
|
||||
|
|
|
@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
|
|||
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
|
||||
}
|
||||
|
||||
if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
|
||||
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
|
||||
(FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
|
||||
int irq;
|
||||
if (cpumask_test_cpu(smp_processor_id(),
|
||||
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
|
||||
|
|
|
@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
|
|||
unsigned long *hwirq, unsigned int *type)
|
||||
{
|
||||
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
|
||||
struct mvebu_icu_msi_data *msi_data = d->host_data;
|
||||
struct msi_domain_info *info = d->host_data;
|
||||
struct mvebu_icu_msi_data *msi_data = info->chip_data;
|
||||
struct mvebu_icu *icu = msi_data->icu;
|
||||
|
||||
/* Check the count of the parameters in dt */
|
||||
|
|
|
@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
|||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
|
||||
seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
|
||||
}
|
||||
|
||||
static struct irq_chip partition_irq_chip = {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue