mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.14-rc6). Conflicts: tools/testing/selftests/drivers/net/ping.py75cc19c8ff
("selftests: drv-net: add xdp cases for ping.py")de94e86974
("selftests: drv-net: store addresses in dict indexed by ipver") https://lore.kernel.org/netdev/20250311115758.17a1d414@canb.auug.org.au/ net/core/devmem.ca70f891e0f
("net: devmem: do not WARN conditionally after netdev_rx_queue_restart()")1d22d3060b
("net: drop rtnl_lock for queue_mgmt operations") https://lore.kernel.org/netdev/20250313114929.43744df1@canb.auug.org.au/ Adjacent changes: tools/testing/selftests/net/Makefile6f50175cca
("selftests: Add IPv6 link-local address generation tests for GRE devices.")2e5584e0f9
("selftests/net: expand cmsg_ipv6.sh with ipv4") drivers/net/ethernet/broadcom/bnxt/bnxt.c661958552e
("eth: bnxt: do not use BNXT_VNIC_NTUPLE unconditionally in queue restart logic")fe96d717d3
("bnxt_en: Extend queue stop/start for TX rings") Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
941defcea7
315 changed files with 3545 additions and 1364 deletions
1
.mailmap
1
.mailmap
|
@ -691,6 +691,7 @@ Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
|
|||
Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
|
||||
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
|
||||
Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org>
|
||||
Sumit Semwal <sumit.semwal@ti.com>
|
||||
Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
|
||||
Sven Eckelmann <sven@narfation.org> <seckelmann@datto.com>
|
||||
|
|
|
@ -176,7 +176,7 @@ Configuring the kernel
|
|||
values without prompting.
|
||||
|
||||
"make defconfig" Create a ./.config file by using the default
|
||||
symbol values from either arch/$ARCH/defconfig
|
||||
symbol values from either arch/$ARCH/configs/defconfig
|
||||
or arch/$ARCH/configs/${PLATFORM}_defconfig,
|
||||
depending on the architecture.
|
||||
|
||||
|
|
|
@ -212,6 +212,17 @@ pid>/``).
|
|||
This value defaults to 0.
|
||||
|
||||
|
||||
core_sort_vma
|
||||
=============
|
||||
|
||||
The default coredump writes VMAs in address order. By setting
|
||||
``core_sort_vma`` to 1, VMAs will be written from smallest size
|
||||
to largest size. This is known to break at least elfutils, but
|
||||
can be handy when dealing with very large (and truncated)
|
||||
coredumps where the more useful debugging details are included
|
||||
in the smaller VMAs.
|
||||
|
||||
|
||||
core_uses_pid
|
||||
=============
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ properties:
|
|||
maxItems: 2
|
||||
|
||||
pwm-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: convst1
|
||||
- const: convst2
|
||||
|
|
|
@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system:
|
|||
* sched_rt_period_us takes values from 1 to INT_MAX.
|
||||
* sched_rt_runtime_us takes values from -1 to sched_rt_period_us.
|
||||
* A run time of -1 specifies runtime == period, ie. no limit.
|
||||
* sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve
|
||||
bandwidth for fair dl_server. For accurate value check average of
|
||||
runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/
|
||||
|
||||
|
||||
2.2 Default behaviour
|
||||
|
|
52
MAINTAINERS
52
MAINTAINERS
|
@ -124,6 +124,7 @@ F: include/net/ieee80211_radiotap.h
|
|||
F: include/net/iw_handler.h
|
||||
F: include/net/wext.h
|
||||
F: include/uapi/linux/nl80211.h
|
||||
N: include/uapi/linux/nl80211-.*
|
||||
F: include/uapi/linux/wireless.h
|
||||
F: net/wireless/
|
||||
|
||||
|
@ -514,7 +515,7 @@ F: drivers/hwmon/adm1029.c
|
|||
ADM8211 WIRELESS DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/admtek/adm8211.*
|
||||
F: drivers/net/wireless/admtek/
|
||||
|
||||
ADP1050 HARDWARE MONITOR DRIVER
|
||||
M: Radu Sabau <radu.sabau@analog.com>
|
||||
|
@ -6207,7 +6208,7 @@ F: Documentation/process/cve.rst
|
|||
|
||||
CW1200 WLAN driver
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/st/cw1200/
|
||||
F: drivers/net/wireless/st/
|
||||
F: include/linux/platform_data/net-cw1200.h
|
||||
|
||||
CX18 VIDEO4LINUX DRIVER
|
||||
|
@ -9443,14 +9444,11 @@ F: include/linux/fscrypt.h
|
|||
F: include/uapi/linux/fscrypt.h
|
||||
|
||||
FSI SUBSYSTEM
|
||||
M: Jeremy Kerr <jk@ozlabs.org>
|
||||
M: Joel Stanley <joel@jms.id.au>
|
||||
R: Alistar Popple <alistair@popple.id.au>
|
||||
R: Eddie James <eajames@linux.ibm.com>
|
||||
M: Eddie James <eajames@linux.ibm.com>
|
||||
R: Ninad Palsule <ninad@linux.ibm.com>
|
||||
L: linux-fsi@lists.ozlabs.org
|
||||
S: Supported
|
||||
Q: http://patchwork.ozlabs.org/project/linux-fsi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/fsi.git
|
||||
F: drivers/fsi/
|
||||
F: include/linux/fsi*.h
|
||||
F: include/trace/events/fsi*.h
|
||||
|
@ -12876,7 +12874,7 @@ F: include/keys/trusted_dcp.h
|
|||
F: security/keys/trusted-keys/trusted_dcp.c
|
||||
|
||||
KEYS-TRUSTED-TEE
|
||||
M: Sumit Garg <sumit.garg@linaro.org>
|
||||
M: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: linux-integrity@vger.kernel.org
|
||||
L: keyrings@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -13999,6 +13997,7 @@ MARVELL LIBERTAS WIRELESS DRIVER
|
|||
L: libertas-dev@lists.infradead.org
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/marvell/libertas/
|
||||
F: drivers/net/wireless/marvell/libertas_tf/
|
||||
|
||||
MARVELL MACCHIATOBIN SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
|
@ -15668,7 +15667,7 @@ M: Ajay Singh <ajay.kathat@microchip.com>
|
|||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/microchip/wilc1000/
|
||||
F: drivers/net/wireless/microchip/
|
||||
|
||||
MICROSEMI MIPS SOCS
|
||||
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
|
||||
|
@ -16454,6 +16453,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
|
||||
F: Documentation/devicetree/bindings/net/wireless/
|
||||
F: drivers/net/wireless/
|
||||
X: drivers/net/wireless/ath/
|
||||
X: drivers/net/wireless/broadcom/
|
||||
X: drivers/net/wireless/intel/
|
||||
X: drivers/net/wireless/intersil/
|
||||
X: drivers/net/wireless/marvell/
|
||||
X: drivers/net/wireless/mediatek/mt76/
|
||||
X: drivers/net/wireless/mediatek/mt7601u/
|
||||
X: drivers/net/wireless/microchip/
|
||||
X: drivers/net/wireless/purelifi/
|
||||
X: drivers/net/wireless/quantenna/
|
||||
X: drivers/net/wireless/ralink/
|
||||
X: drivers/net/wireless/realtek/
|
||||
X: drivers/net/wireless/rsi/
|
||||
X: drivers/net/wireless/silabs/
|
||||
X: drivers/net/wireless/st/
|
||||
X: drivers/net/wireless/ti/
|
||||
X: drivers/net/wireless/zydas/
|
||||
|
||||
NETWORKING [DSA]
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
|
@ -17677,7 +17693,7 @@ F: Documentation/ABI/testing/sysfs-bus-optee-devices
|
|||
F: drivers/tee/optee/
|
||||
|
||||
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Sumit Garg <sumit.garg@linaro.org>
|
||||
M: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: op-tee@lists.trustedfirmware.org
|
||||
S: Maintained
|
||||
F: drivers/char/hw_random/optee-rng.c
|
||||
|
@ -17838,7 +17854,7 @@ M: Christian Lamparter <chunkeey@googlemail.com>
|
|||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
|
||||
F: drivers/net/wireless/intersil/p54/
|
||||
F: drivers/net/wireless/intersil/
|
||||
|
||||
PACKET SOCKETS
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
|
@ -19115,7 +19131,7 @@ PURELIFI PLFXLC DRIVER
|
|||
M: Srinivasan Raju <srini.raju@purelifi.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/purelifi/plfxlc/
|
||||
F: drivers/net/wireless/purelifi/
|
||||
|
||||
PVRUSB2 VIDEO4LINUX DRIVER
|
||||
M: Mike Isely <isely@pobox.com>
|
||||
|
@ -19666,7 +19682,7 @@ M: Igor Mitsyanko <imitsyanko@quantenna.com>
|
|||
R: Sergey Matyukevich <geomatsi@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/quantenna
|
||||
F: drivers/net/wireless/quantenna/
|
||||
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
|
@ -19746,7 +19762,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
|
|||
M: Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/ralink/rt2x00/
|
||||
F: drivers/net/wireless/ralink/
|
||||
|
||||
RAMDISK RAM BLOCK DEVICE DRIVER
|
||||
M: Jens Axboe <axboe@kernel.dk>
|
||||
|
@ -21094,6 +21110,7 @@ F: include/linux/clk/samsung.h
|
|||
|
||||
SAMSUNG SPI DRIVERS
|
||||
M: Andi Shyti <andi.shyti@kernel.org>
|
||||
R: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-spi@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -21504,7 +21521,6 @@ F: include/linux/slimbus.h
|
|||
|
||||
SFC NETWORK DRIVER
|
||||
M: Edward Cree <ecree.xilinx@gmail.com>
|
||||
M: Martin Habets <habetsm.xilinx@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-net-drivers@amd.com
|
||||
S: Maintained
|
||||
|
@ -21713,7 +21729,7 @@ SILICON LABS WIRELESS DRIVERS (for WFxxx series)
|
|||
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
|
||||
F: drivers/net/wireless/silabs/wfx/
|
||||
F: drivers/net/wireless/silabs/
|
||||
|
||||
SILICON MOTION SM712 FRAME BUFFER DRIVER
|
||||
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
|
||||
|
@ -23290,7 +23306,7 @@ F: include/media/i2c/tw9910.h
|
|||
|
||||
TEE SUBSYSTEM
|
||||
M: Jens Wiklander <jens.wiklander@linaro.org>
|
||||
R: Sumit Garg <sumit.garg@linaro.org>
|
||||
R: Sumit Garg <sumit.garg@kernel.org>
|
||||
L: op-tee@lists.trustedfirmware.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-class-tee
|
||||
|
@ -26213,7 +26229,7 @@ F: mm/zbud.c
|
|||
ZD1211RW WIRELESS DRIVER
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/zydas/zd1211rw/
|
||||
F: drivers/net/wireless/zydas/
|
||||
|
||||
ZD1301 MEDIA DRIVER
|
||||
L: linux-media@vger.kernel.org
|
||||
|
|
7
Makefile
7
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1123,6 +1123,11 @@ endif
|
|||
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
||||
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
||||
|
||||
# userspace programs are linked via the compiler, use the correct linker
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
|
||||
KBUILD_USERLDFLAGS += --ld-path=$(LD)
|
||||
endif
|
||||
|
||||
# make the checker run with the right architecture
|
||||
CHECKFLAGS += --arch=$(ARCH)
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
|||
}
|
||||
|
||||
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long pfn, struct vm_fault *vmf)
|
||||
unsigned long pfn, bool need_lock)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
|
@ -99,12 +99,11 @@ again:
|
|||
if (!pte)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we are using split PTE locks, then we need to take the page
|
||||
* lock here. Otherwise we are using shared mm->page_table_lock
|
||||
* which is already locked, thus cannot take it.
|
||||
*/
|
||||
if (ptl != vmf->ptl) {
|
||||
if (need_lock) {
|
||||
/*
|
||||
* Use nested version here to indicate that we are already
|
||||
* holding one similar spinlock.
|
||||
*/
|
||||
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
|
||||
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
|
@ -114,7 +113,7 @@ again:
|
|||
|
||||
ret = do_adjust_pte(vma, address, pfn, pte);
|
||||
|
||||
if (ptl != vmf->ptl)
|
||||
if (need_lock)
|
||||
spin_unlock(ptl);
|
||||
pte_unmap(pte);
|
||||
|
||||
|
@ -123,9 +122,10 @@ again:
|
|||
|
||||
static void
|
||||
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep, unsigned long pfn,
|
||||
struct vm_fault *vmf)
|
||||
unsigned long addr, pte_t *ptep, unsigned long pfn)
|
||||
{
|
||||
const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
|
||||
const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *mpnt;
|
||||
unsigned long offset;
|
||||
|
@ -141,6 +141,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
|||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
/*
|
||||
* If we are using split PTE locks, then we need to take the pte
|
||||
* lock. Otherwise we are using shared mm->page_table_lock which
|
||||
* is already locked, thus cannot take it.
|
||||
*/
|
||||
bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS);
|
||||
unsigned long mpnt_addr;
|
||||
|
||||
/*
|
||||
* If this VMA is not in our MM, we can ignore it.
|
||||
* Note that we intentionally mask out the VMA
|
||||
|
@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
|||
if (!(mpnt->vm_flags & VM_MAYSHARE))
|
||||
continue;
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf);
|
||||
mpnt_addr = mpnt->vm_start + offset;
|
||||
|
||||
/* Avoid deadlocks by not grabbing the same PTE lock again. */
|
||||
if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr)
|
||||
need_lock = false;
|
||||
aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
if (aliases)
|
||||
|
@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
|||
__flush_dcache_folio(mapping, folio);
|
||||
if (mapping) {
|
||||
if (cache_is_vivt())
|
||||
make_coherent(mapping, vma, addr, ptep, pfn, vmf);
|
||||
make_coherent(mapping, vma, addr, ptep, pfn);
|
||||
else if (vma->vm_flags & VM_EXEC)
|
||||
__flush_icache_all();
|
||||
}
|
||||
|
|
|
@ -16,6 +16,32 @@
|
|||
#include <asm/sysreg.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
.macro init_el2_hcr val
|
||||
mov_q x0, \val
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
msr sctlr_el2, x0
|
||||
|
@ -244,11 +270,6 @@
|
|||
.Lskip_gcs_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
.endm
|
||||
|
||||
.macro __init_el2_mpam
|
||||
/* Memory Partitioning And Monitoring: disable EL2 traps */
|
||||
mrs x1, id_aa64pfr0_el1
|
||||
|
|
|
@ -298,25 +298,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
msr sctlr_el2, x0
|
||||
isb
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case. Publish the E2H bit early so that
|
||||
* it can be picked up by the init_el2_state macro.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
1:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
init_el2_hcr HCR_HOST_NVHE_FLAGS
|
||||
init_el2_state
|
||||
|
||||
/* Hypervisor stub */
|
||||
|
@ -339,7 +322,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
msr sctlr_el1, x1
|
||||
mov x2, xzr
|
||||
3:
|
||||
__init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
orr x0, x0, x2
|
||||
|
|
|
@ -73,8 +73,12 @@ __do_hyp_init:
|
|||
eret
|
||||
SYM_CODE_END(__kvm_hyp_init)
|
||||
|
||||
/*
|
||||
* Initialize EL2 CPU state to sane values.
|
||||
*
|
||||
* HCR_EL2.E2H must have been initialized already.
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
|
||||
/* Initialize EL2 CPU state to sane values. */
|
||||
init_el2_state // Clobbers x0..x2
|
||||
finalise_el2_state
|
||||
ret
|
||||
|
@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
|||
|
||||
2: msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||
|
||||
bl __kvm_init_el2_state
|
||||
init_el2_hcr 0
|
||||
|
||||
__init_el2_nvhe_prepare_eret
|
||||
bl __kvm_init_el2_state
|
||||
|
||||
/* Enable MMU, set vectors and stack. */
|
||||
mov x0, x28
|
||||
|
|
|
@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
|
|||
if (is_cpu_on)
|
||||
release_boot_args(boot_args);
|
||||
|
||||
write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
|
||||
write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
|
||||
|
||||
__host_enter(host_ctxt);
|
||||
}
|
||||
|
||||
|
|
|
@ -249,18 +249,6 @@ static __init int setup_node(int pxm)
|
|||
return acpi_map_pxm_to_node(pxm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
|
||||
* I/O localities since SRAT does not list them. I/O localities are
|
||||
* not supported at this point.
|
||||
*/
|
||||
unsigned int numa_distance_cnt;
|
||||
|
||||
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
|
||||
{
|
||||
return slit->locality_count;
|
||||
}
|
||||
|
||||
void __init numa_set_distance(int from, int to, int distance)
|
||||
{
|
||||
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
|
||||
|
|
|
@ -126,14 +126,14 @@ void kexec_reboot(void)
|
|||
/* All secondary cpus go to kexec_smp_wait */
|
||||
if (smp_processor_id() > 0) {
|
||||
relocated_kexec_smp_wait(NULL);
|
||||
unreachable();
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
do_kexec = (void *)reboot_code_buffer;
|
||||
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
|
||||
|
||||
unreachable();
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void)
|
|||
*/
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
/* Recalculate max_low_pfn for "mem=xxx" */
|
||||
max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||
|
||||
if (usermem)
|
||||
pr_info("User-defined physical RAM map overwrite\n");
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
|
|||
mb();
|
||||
}
|
||||
|
||||
void __noreturn arch_cpu_idle_dead(void)
|
||||
static void __noreturn idle_play_dead(void)
|
||||
{
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
|
@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
|
|||
BUG();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static void __noreturn poll_play_dead(void)
|
||||
{
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
|
||||
idle_task_exit();
|
||||
__this_cpu_write(cpu_state, CPU_DEAD);
|
||||
|
||||
__smp_mb();
|
||||
do {
|
||||
__asm__ __volatile__("nop\n\t");
|
||||
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
|
||||
} while (addr == 0);
|
||||
|
||||
init_fn = (void *)TO_CACHE(addr);
|
||||
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
|
||||
|
||||
init_fn();
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
static void (*play_dead)(void) = idle_play_dead;
|
||||
|
||||
void __noreturn arch_cpu_idle_dead(void)
|
||||
{
|
||||
play_dead();
|
||||
BUG(); /* play_dead() doesn't return */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
int hibernate_resume_nonboot_cpu_disable(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
play_dead = poll_play_dead;
|
||||
ret = suspend_disable_secondary_cpus();
|
||||
play_dead = idle_play_dead;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
|
|||
struct kvm_run *run = vcpu->run;
|
||||
unsigned long badv = vcpu->arch.badv;
|
||||
|
||||
/* Inject ADE exception if exceed max GPA size */
|
||||
if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
|
||||
kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
ret = kvm_handle_mm_fault(vcpu, badv, write);
|
||||
if (ret) {
|
||||
/* Treat as MMIO */
|
||||
|
|
|
@ -317,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void)
|
|||
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
|
||||
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
|
||||
|
||||
/*
|
||||
* HW Guest CSR registers are lost after CPU suspend and resume.
|
||||
* Clear last_vcpu so that Guest CSR registers forced to reload
|
||||
* from vCPU SW state.
|
||||
*/
|
||||
this_cpu_ptr(vmcs)->last_vcpu = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
{
|
||||
int ret = RESUME_GUEST;
|
||||
unsigned long estat = vcpu->arch.host_estat;
|
||||
u32 intr = estat & 0x1fff; /* Ignore NMI */
|
||||
u32 intr = estat & CSR_ESTAT_IS;
|
||||
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
|
|
|
@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
if (kvm_pvtime_supported())
|
||||
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
|
||||
|
||||
kvm->arch.gpa_size = BIT(cpu_vabits - 1);
|
||||
/*
|
||||
* cpu_vabits means user address space only (a half of total).
|
||||
* GPA size of VM is the same with the size of user address space.
|
||||
*/
|
||||
kvm->arch.gpa_size = BIT(cpu_vabits);
|
||||
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
|
||||
kvm->arch.invalid_ptes[0] = 0;
|
||||
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|||
}
|
||||
|
||||
info.length = len;
|
||||
info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
if (filp && is_file_hugepages(filp))
|
||||
info.align_mask = huge_page_mask_align(filp);
|
||||
else
|
||||
info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
|
||||
|
||||
if (dir == DOWN) {
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
|
|
|
@ -44,8 +44,10 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
|
|||
pgd_t *new_pgd;
|
||||
|
||||
new_pgd = __pgd_alloc(mm, 0);
|
||||
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
|
||||
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
|
||||
if (likely(new_pgd != NULL)) {
|
||||
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
|
||||
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
|
||||
}
|
||||
return new_pgd;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ config ARCH_SOPHGO
|
|||
|
||||
config ARCH_SPACEMIT
|
||||
bool "SpacemiT SoCs"
|
||||
select PINCTRL
|
||||
help
|
||||
This enables support for SpacemiT SoC platform hardware.
|
||||
|
||||
|
|
|
@ -266,12 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|||
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
||||
{
|
||||
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
|
||||
unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15];
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
return;
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
|
||||
if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs))
|
||||
*parent = (unsigned long)&return_to_handler;
|
||||
}
|
||||
|
||||
|
|
|
@ -285,10 +285,10 @@ static void __init test_monitor_call(void)
|
|||
return;
|
||||
asm volatile(
|
||||
" mc 0,0\n"
|
||||
"0: xgr %0,%0\n"
|
||||
"0: lhi %[val],0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (val));
|
||||
EX_TABLE(0b, 1b)
|
||||
: [val] "+d" (val));
|
||||
if (!val)
|
||||
panic("Monitor call doesn't work!\n");
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "misc.h"
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/bootparam_utils.h>
|
||||
#include <asm/e820/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include "pgtable.h"
|
||||
|
@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
|
|||
bool l5_required = false;
|
||||
|
||||
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
|
||||
sanitize_boot_params(bp);
|
||||
boot_params_ptr = bp;
|
||||
|
||||
/*
|
||||
|
|
|
@ -2853,19 +2853,8 @@ struct snp_msg_desc *snp_msg_alloc(void)
|
|||
if (!mdesc->response)
|
||||
goto e_free_request;
|
||||
|
||||
mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE);
|
||||
if (!mdesc->certs_data)
|
||||
goto e_free_response;
|
||||
|
||||
/* initial the input address for guest request */
|
||||
mdesc->input.req_gpa = __pa(mdesc->request);
|
||||
mdesc->input.resp_gpa = __pa(mdesc->response);
|
||||
mdesc->input.data_gpa = __pa(mdesc->certs_data);
|
||||
|
||||
return mdesc;
|
||||
|
||||
e_free_response:
|
||||
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
|
||||
e_free_request:
|
||||
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
|
||||
e_unmap:
|
||||
|
@ -2885,7 +2874,6 @@ void snp_msg_free(struct snp_msg_desc *mdesc)
|
|||
kfree(mdesc->ctx);
|
||||
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
|
||||
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
|
||||
free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
|
||||
iounmap((__force void __iomem *)mdesc->secrets);
|
||||
|
||||
memset(mdesc, 0, sizeof(*mdesc));
|
||||
|
@ -3054,7 +3042,7 @@ retry_request:
|
|||
* sequence number must be incremented or the VMPCK must be deleted to
|
||||
* prevent reuse of the IV.
|
||||
*/
|
||||
rc = snp_issue_guest_request(req, &mdesc->input, rio);
|
||||
rc = snp_issue_guest_request(req, &req->input, rio);
|
||||
switch (rc) {
|
||||
case -ENOSPC:
|
||||
/*
|
||||
|
@ -3064,7 +3052,7 @@ retry_request:
|
|||
* order to increment the sequence number and thus avoid
|
||||
* IV reuse.
|
||||
*/
|
||||
override_npages = mdesc->input.data_npages;
|
||||
override_npages = req->input.data_npages;
|
||||
req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
|
||||
|
||||
/*
|
||||
|
@ -3120,7 +3108,7 @@ retry_request:
|
|||
}
|
||||
|
||||
if (override_npages)
|
||||
mdesc->input.data_npages = override_npages;
|
||||
req->input.data_npages = override_npages;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -3158,6 +3146,11 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
|
|||
*/
|
||||
memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
|
||||
|
||||
/* Initialize the input address for guest request */
|
||||
req->input.req_gpa = __pa(mdesc->request);
|
||||
req->input.resp_gpa = __pa(mdesc->response);
|
||||
req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
|
||||
|
||||
rc = __handle_guest_request(mdesc, req, rio);
|
||||
if (rc) {
|
||||
if (rc == -EIO &&
|
||||
|
|
|
@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void)
|
|||
x86_platform.realmode_init = x86_init_noop;
|
||||
x86_init.irqs.pre_vector_init = x86_init_noop;
|
||||
x86_init.timers.timer_init = x86_init_noop;
|
||||
x86_init.resources.probe_roms = x86_init_noop;
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_mptable = x86_init_noop;
|
||||
|
|
|
@ -464,7 +464,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
|||
enum hv_mem_host_visibility visibility)
|
||||
{
|
||||
struct hv_gpa_range_for_visibility *input;
|
||||
u16 pages_processed;
|
||||
u64 hv_status;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -493,7 +492,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
|||
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
|
||||
hv_status = hv_do_rep_hypercall(
|
||||
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
|
||||
0, input, &pages_processed);
|
||||
0, input, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (hv_result_success(hv_status))
|
||||
|
|
|
@ -780,6 +780,7 @@ struct kvm_vcpu_arch {
|
|||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
u64 host_debugctl;
|
||||
u64 apic_base;
|
||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||
bool load_eoi_exitmap_pending;
|
||||
|
|
|
@ -198,9 +198,8 @@
|
|||
.endm
|
||||
|
||||
/*
|
||||
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
|
||||
* to the retpoline thunk with a CS prefix when the register requires
|
||||
* a RAX prefix byte to encode. Also see apply_retpolines().
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
.macro __CS_PREFIX reg:req
|
||||
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
|
||||
|
@ -420,20 +419,27 @@ static inline void call_depth_return_thunk(void) {}
|
|||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
#define __CS_PREFIX(reg) \
|
||||
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
|
||||
".ifc \\rs," reg "\n" \
|
||||
".byte 0x2e\n" \
|
||||
".endif\n" \
|
||||
".endr\n"
|
||||
|
||||
/*
|
||||
* Inline asm uses the %V modifier which is only in newer GCC
|
||||
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE_2( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE, \
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_LFENCE)
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n"
|
||||
#else
|
||||
#define CALL_NOSPEC "call *%[thunk_target]\n"
|
||||
#endif
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
|
|
|
@ -23,17 +23,17 @@ typedef union {
|
|||
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||
|
||||
/*
|
||||
* traditional i386 two-level paging structure:
|
||||
* Traditional i386 two-level paging structure:
|
||||
*/
|
||||
|
||||
#define PGDIR_SHIFT 22
|
||||
#define PTRS_PER_PGD 1024
|
||||
|
||||
|
||||
/*
|
||||
* the i386 is two-level, so we don't really have any
|
||||
* PMD directory physically.
|
||||
* The i386 is two-level, so we don't really have any
|
||||
* PMD directory physically:
|
||||
*/
|
||||
#define PTRS_PER_PMD 1
|
||||
|
||||
#define PTRS_PER_PTE 1024
|
||||
|
||||
|
|
|
@ -203,6 +203,9 @@ struct snp_guest_req {
|
|||
unsigned int vmpck_id;
|
||||
u8 msg_version;
|
||||
u8 msg_type;
|
||||
|
||||
struct snp_req_data input;
|
||||
void *certs_data;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -263,9 +266,6 @@ struct snp_msg_desc {
|
|||
struct snp_guest_msg secret_request, secret_response;
|
||||
|
||||
struct snp_secrets_page *secrets;
|
||||
struct snp_req_data input;
|
||||
|
||||
void *certs_data;
|
||||
|
||||
struct aesgcm_ctx *ctx;
|
||||
|
||||
|
|
|
@ -143,7 +143,6 @@ bool __init early_is_amd_nb(u32 device)
|
|||
|
||||
struct resource *amd_get_mmconfig_range(struct resource *res)
|
||||
{
|
||||
u32 address;
|
||||
u64 base, msr;
|
||||
unsigned int segn_busn_bits;
|
||||
|
||||
|
@ -151,13 +150,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
|
|||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return NULL;
|
||||
|
||||
/* assume all cpus from fam10h have mmconfig */
|
||||
if (boot_cpu_data.x86 < 0x10)
|
||||
/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
|
||||
if (boot_cpu_data.x86 < 0x10 ||
|
||||
rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
|
||||
return NULL;
|
||||
|
||||
address = MSR_FAM10H_MMIO_CONF_BASE;
|
||||
rdmsrl(address, msr);
|
||||
|
||||
/* mmconfig is not enabled */
|
||||
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
|
||||
return NULL;
|
||||
|
|
|
@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
||||
|
||||
/* If bit 31 is set, this is an unknown format */
|
||||
for (j = 0 ; j < 3 ; j++)
|
||||
for (j = 0 ; j < 4 ; j++)
|
||||
if (regs[j] & (1 << 31))
|
||||
regs[j] = 0;
|
||||
|
||||
|
|
|
@ -635,26 +635,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define TLB_INST_4K 0x01
|
||||
#define TLB_INST_4M 0x02
|
||||
#define TLB_INST_2M_4M 0x03
|
||||
#define TLB_INST_4K 0x01
|
||||
#define TLB_INST_4M 0x02
|
||||
#define TLB_INST_2M_4M 0x03
|
||||
|
||||
#define TLB_INST_ALL 0x05
|
||||
#define TLB_INST_1G 0x06
|
||||
#define TLB_INST_ALL 0x05
|
||||
#define TLB_INST_1G 0x06
|
||||
|
||||
#define TLB_DATA_4K 0x11
|
||||
#define TLB_DATA_4M 0x12
|
||||
#define TLB_DATA_2M_4M 0x13
|
||||
#define TLB_DATA_4K_4M 0x14
|
||||
#define TLB_DATA_4K 0x11
|
||||
#define TLB_DATA_4M 0x12
|
||||
#define TLB_DATA_2M_4M 0x13
|
||||
#define TLB_DATA_4K_4M 0x14
|
||||
|
||||
#define TLB_DATA_1G 0x16
|
||||
#define TLB_DATA_1G 0x16
|
||||
#define TLB_DATA_1G_2M_4M 0x17
|
||||
|
||||
#define TLB_DATA0_4K 0x21
|
||||
#define TLB_DATA0_4M 0x22
|
||||
#define TLB_DATA0_2M_4M 0x23
|
||||
#define TLB_DATA0_4K 0x21
|
||||
#define TLB_DATA0_4M 0x22
|
||||
#define TLB_DATA0_2M_4M 0x23
|
||||
|
||||
#define STLB_4K 0x41
|
||||
#define STLB_4K_2M 0x42
|
||||
#define STLB_4K 0x41
|
||||
#define STLB_4K_2M 0x42
|
||||
|
||||
/*
|
||||
* All of leaf 0x2's one-byte TLB descriptors implies the same number of
|
||||
* entries for their respective TLB types. The 0x63 descriptor is an
|
||||
* exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
|
||||
* for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
|
||||
* 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
|
||||
* intel_tlb_table[] mapping.
|
||||
*/
|
||||
#define TLB_0x63_2M_4M_ENTRIES 32
|
||||
|
||||
static const struct _tlb_table intel_tlb_table[] = {
|
||||
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
|
||||
|
@ -676,7 +687,8 @@ static const struct _tlb_table intel_tlb_table[] = {
|
|||
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
|
||||
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
|
||||
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
|
||||
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
|
||||
{ 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
|
||||
" (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
|
||||
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
|
||||
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
|
||||
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
|
||||
|
@ -776,6 +788,12 @@ static void intel_tlb_lookup(const unsigned char desc)
|
|||
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
|
||||
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
|
||||
break;
|
||||
case TLB_DATA_1G_2M_4M:
|
||||
if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
|
||||
tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
|
||||
if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
|
||||
tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
|
||||
fallthrough;
|
||||
case TLB_DATA_1G:
|
||||
if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
|
||||
tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
|
||||
|
@ -799,7 +817,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
|
|||
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
||||
|
||||
/* If bit 31 is set, this is an unknown format */
|
||||
for (j = 0 ; j < 3 ; j++)
|
||||
for (j = 0 ; j < 4 ; j++)
|
||||
if (regs[j] & (1 << 31))
|
||||
regs[j] = 0;
|
||||
|
||||
|
|
|
@ -175,23 +175,29 @@ static bool need_sha_check(u32 cur_rev)
|
|||
{
|
||||
switch (cur_rev >> 8) {
|
||||
case 0x80012: return cur_rev <= 0x800126f; break;
|
||||
case 0x80082: return cur_rev <= 0x800820f; break;
|
||||
case 0x83010: return cur_rev <= 0x830107c; break;
|
||||
case 0x86001: return cur_rev <= 0x860010e; break;
|
||||
case 0x86081: return cur_rev <= 0x8608108; break;
|
||||
case 0x87010: return cur_rev <= 0x8701034; break;
|
||||
case 0x8a000: return cur_rev <= 0x8a0000a; break;
|
||||
case 0xa0010: return cur_rev <= 0xa00107a; break;
|
||||
case 0xa0011: return cur_rev <= 0xa0011da; break;
|
||||
case 0xa0012: return cur_rev <= 0xa001243; break;
|
||||
case 0xa0082: return cur_rev <= 0xa00820e; break;
|
||||
case 0xa1011: return cur_rev <= 0xa101153; break;
|
||||
case 0xa1012: return cur_rev <= 0xa10124e; break;
|
||||
case 0xa1081: return cur_rev <= 0xa108109; break;
|
||||
case 0xa2010: return cur_rev <= 0xa20102f; break;
|
||||
case 0xa2012: return cur_rev <= 0xa201212; break;
|
||||
case 0xa4041: return cur_rev <= 0xa404109; break;
|
||||
case 0xa5000: return cur_rev <= 0xa500013; break;
|
||||
case 0xa6012: return cur_rev <= 0xa60120a; break;
|
||||
case 0xa7041: return cur_rev <= 0xa704109; break;
|
||||
case 0xa7052: return cur_rev <= 0xa705208; break;
|
||||
case 0xa7080: return cur_rev <= 0xa708009; break;
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
default: break;
|
||||
}
|
||||
|
@ -1068,7 +1074,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
|||
if (ret != UCODE_OK)
|
||||
return ret;
|
||||
|
||||
for_each_node(nid) {
|
||||
for_each_node_with_cpus(nid) {
|
||||
cpu = cpumask_first(cpumask_of_node(nid));
|
||||
c = &cpu_data(cpu);
|
||||
|
||||
|
|
|
@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
|
|||
u64 xfrm_mask;
|
||||
int ret;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
|
||||
pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (!(eax & 1)) {
|
||||
pr_err("SGX disabled: SGX1 instruction support not available.\n");
|
||||
pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
|
|||
}
|
||||
|
||||
ret = misc_register(&sgx_dev_enclave);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
|
|||
struct file *backing;
|
||||
long ret;
|
||||
|
||||
/*
|
||||
* ECREATE would detect this too, but checking here also ensures
|
||||
* that the 'encl_size' calculations below can never overflow.
|
||||
*/
|
||||
if (!is_power_of_2(secs->size))
|
||||
return -EINVAL;
|
||||
|
||||
va_page = sgx_encl_grow(encl, true);
|
||||
if (IS_ERR(va_page))
|
||||
return PTR_ERR(va_page);
|
||||
|
|
|
@ -1763,7 +1763,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
|||
|
||||
entry->ecx = entry->edx = 0;
|
||||
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
|
||||
entry->eax = entry->ebx;
|
||||
entry->eax = entry->ebx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -4590,6 +4590,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
|
|||
|
||||
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
|
||||
{
|
||||
struct kvm *kvm = svm->vcpu.kvm;
|
||||
|
||||
/*
|
||||
* All host state for SEV-ES guests is categorized into three swap types
|
||||
* based on how it is handled by hardware during a world switch:
|
||||
|
@ -4613,14 +4615,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
|
|||
|
||||
/*
|
||||
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
|
||||
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
|
||||
* saves and loads debug registers (Type-A).
|
||||
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
|
||||
* not save or load debug registers. Sadly, KVM can't prevent SNP
|
||||
* guests from lying about DebugSwap on secondary vCPUs, i.e. the
|
||||
* SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
|
||||
* the guest has actually enabled (or not!) in the VMSA.
|
||||
*
|
||||
* If DebugSwap is *possible*, save the masks so that they're restored
|
||||
* if the guest enables DebugSwap. But for the DRs themselves, do NOT
|
||||
* rely on the CPU to restore the host values; KVM will restore them as
|
||||
* needed in common code, via hw_breakpoint_restore(). Note, KVM does
|
||||
* NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
|
||||
* don't need to be restored per se, KVM just needs to ensure they are
|
||||
* loaded with the correct values *if* the CPU writes the MSRs.
|
||||
*/
|
||||
if (sev_vcpu_has_debug_swap(svm)) {
|
||||
hostsa->dr0 = native_get_debugreg(0);
|
||||
hostsa->dr1 = native_get_debugreg(1);
|
||||
hostsa->dr2 = native_get_debugreg(2);
|
||||
hostsa->dr3 = native_get_debugreg(3);
|
||||
if (sev_vcpu_has_debug_swap(svm) ||
|
||||
(sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
|
||||
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
|
||||
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
|
||||
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
|
||||
|
|
|
@ -3165,6 +3165,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD changed the architectural behavior of bits 5:2. On CPUs
|
||||
* without BusLockTrap, bits 5:2 control "external pins", but
|
||||
* on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
|
||||
* and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
|
||||
* the guest to set bits 5:2 despite not actually virtualizing
|
||||
* Performance-Monitoring/Breakpoint external pins. Drop bits
|
||||
* 5:2 for backwards compatibility.
|
||||
*/
|
||||
data &= ~GENMASK(5, 2);
|
||||
|
||||
/*
|
||||
* Suppress BTF as KVM doesn't virtualize BTF, but there's no
|
||||
* way to communicate lack of support to the guest.
|
||||
*/
|
||||
if (data & DEBUGCTLMSR_BTF) {
|
||||
kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
|
||||
data &= ~DEBUGCTLMSR_BTF;
|
||||
}
|
||||
|
||||
if (data & DEBUGCTL_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
|
@ -4189,6 +4210,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
|||
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
/*
|
||||
* Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
|
||||
* VMRUN controls whether or not physical IRQs are masked (KVM always
|
||||
* runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
|
||||
* temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
|
||||
* into guest state if delivery of an event during VMRUN triggers a
|
||||
* #VMEXIT, and the guest_state transitions already tell lockdep that
|
||||
* IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
|
||||
* this path, so IRQs aren't actually unmasked while running host code.
|
||||
*/
|
||||
raw_local_irq_enable();
|
||||
|
||||
amd_clear_divider();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
|
@ -4197,6 +4230,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
|||
else
|
||||
__svm_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
|
||||
raw_local_irq_disable();
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
|
@ -4253,6 +4288,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
|
|||
clgi();
|
||||
kvm_load_guest_xsave_state(vcpu);
|
||||
|
||||
/*
|
||||
* Hardware only context switches DEBUGCTL if LBR virtualization is
|
||||
* enabled. Manually load DEBUGCTL if necessary (and restore it after
|
||||
* VM-Exit), as running with the host's DEBUGCTL can negatively affect
|
||||
* guest state and can even be fatal, e.g. due to Bus Lock Detect.
|
||||
*/
|
||||
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
|
||||
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
|
||||
update_debugctlmsr(svm->vmcb->save.dbgctl);
|
||||
|
||||
kvm_wait_lapic_expire(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -4280,6 +4325,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
|
|||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
|
||||
|
||||
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
|
||||
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
|
||||
update_debugctlmsr(vcpu->arch.host_debugctl);
|
||||
|
||||
kvm_load_host_xsave_state(vcpu);
|
||||
stgi();
|
||||
|
||||
|
|
|
@ -584,7 +584,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
|
|||
/* svm.c */
|
||||
#define MSR_INVALID 0xffffffffU
|
||||
|
||||
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
||||
#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
|
||||
|
||||
extern bool dump_invalid_vmcb;
|
||||
|
||||
|
|
|
@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
|
|||
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
3: vmrun %_ASM_AX
|
||||
4:
|
||||
cli
|
||||
|
||||
/* Pop @svm to RAX while it's the only available register. */
|
||||
pop %_ASM_AX
|
||||
|
||||
|
@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
|||
mov KVM_VMCB_pa(%rax), %rax
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
1: vmrun %rax
|
||||
|
||||
2: cli
|
||||
|
||||
2:
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
|
||||
|
||||
|
|
|
@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
|
|||
*/
|
||||
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
|
||||
shrink_ple_window(vcpu);
|
||||
|
||||
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
|
||||
vmx->host_debugctlmsr = get_debugctlmsr();
|
||||
}
|
||||
|
||||
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
|
@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
|
|||
}
|
||||
|
||||
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
|
||||
if (vmx->host_debugctlmsr)
|
||||
update_debugctlmsr(vmx->host_debugctlmsr);
|
||||
if (vcpu->arch.host_debugctl)
|
||||
update_debugctlmsr(vcpu->arch.host_debugctl);
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/*
|
||||
|
|
|
@ -340,8 +340,6 @@ struct vcpu_vmx {
|
|||
/* apic deadline value in host tsc */
|
||||
u64 hv_deadline_tsc;
|
||||
|
||||
unsigned long host_debugctlmsr;
|
||||
|
||||
/*
|
||||
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
|
||||
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
|
||||
|
|
|
@ -10968,6 +10968,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
set_debugreg(0, 7);
|
||||
}
|
||||
|
||||
vcpu->arch.host_debugctl = get_debugctlmsr();
|
||||
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
for (;;) {
|
||||
|
|
|
@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
|
|||
out[size] = 0;
|
||||
|
||||
while (i < size) {
|
||||
u8 c = le16_to_cpu(in[i]) & 0xff;
|
||||
u8 c = le16_to_cpu(in[i]) & 0x7f;
|
||||
|
||||
if (c && !isprint(c))
|
||||
c = '!';
|
||||
|
|
|
@ -21,9 +21,15 @@ struct platform_profile_handler {
|
|||
struct device dev;
|
||||
int minor;
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
const struct platform_profile_ops *ops;
|
||||
};
|
||||
|
||||
struct aggregate_choices_data {
|
||||
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
int count;
|
||||
};
|
||||
|
||||
static const char * const profile_names[] = {
|
||||
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
|
||||
[PLATFORM_PROFILE_COOL] = "cool",
|
||||
|
@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
|
|||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
if (!test_bit(*bit, handler->choices))
|
||||
if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return handler->ops->profile_set(dev, *bit);
|
||||
|
@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
|
|||
/**
|
||||
* _aggregate_choices - Aggregate the available profile choices
|
||||
* @dev: The device
|
||||
* @data: The available profile choices
|
||||
* @arg: struct aggregate_choices_data
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int _aggregate_choices(struct device *dev, void *data)
|
||||
static int _aggregate_choices(struct device *dev, void *arg)
|
||||
{
|
||||
unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data *data = arg;
|
||||
struct platform_profile_handler *handler;
|
||||
unsigned long *aggregate = data;
|
||||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
|
||||
bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
|
||||
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
|
||||
if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
|
||||
bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
|
||||
else
|
||||
bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
|
||||
bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
|
||||
data->count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _remove_hidden_choices - Remove hidden choices from aggregate data
|
||||
* @dev: The device
|
||||
* @arg: struct aggregate_choices_data
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int _remove_hidden_choices(struct device *dev, void *arg)
|
||||
{
|
||||
struct aggregate_choices_data *data = arg;
|
||||
struct platform_profile_handler *handler;
|
||||
|
||||
lockdep_assert_held(&profile_lock);
|
||||
handler = to_pprof_handler(dev);
|
||||
bitmap_andnot(data->aggregate, handler->choices,
|
||||
handler->hidden_choices, PLATFORM_PROFILE_LAST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
int err;
|
||||
|
||||
set_bit(PLATFORM_PROFILE_LAST, aggregate);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
aggregate, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (err)
|
||||
return err;
|
||||
if (data.count == 1) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
&data, _remove_hidden_choices);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* no profile handler registered any more */
|
||||
if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
|
||||
if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
|
||||
return -EINVAL;
|
||||
|
||||
return _commmon_choices_show(aggregate, buf);
|
||||
return _commmon_choices_show(data.aggregate, buf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
|
@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
|
|||
i = sysfs_match_string(profile_names, buf);
|
||||
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
|
||||
return -EINVAL;
|
||||
set_bit(PLATFORM_PROFILE_LAST, choices);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
ret = class_for_each_device(&platform_profile_class, NULL,
|
||||
choices, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!test_bit(i, choices))
|
||||
if (!test_bit(i, data.aggregate))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = class_for_each_device(&platform_profile_class, NULL, &i,
|
||||
|
@ -453,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
|
|||
*/
|
||||
int platform_profile_cycle(void)
|
||||
{
|
||||
struct aggregate_choices_data data = {
|
||||
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
|
||||
.count = 0,
|
||||
};
|
||||
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
|
||||
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
|
||||
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
|
||||
int err;
|
||||
|
||||
set_bit(PLATFORM_PROFILE_LAST, choices);
|
||||
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
|
||||
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
&profile, _aggregate_profiles);
|
||||
|
@ -470,14 +514,14 @@ int platform_profile_cycle(void)
|
|||
return -EINVAL;
|
||||
|
||||
err = class_for_each_device(&platform_profile_class, NULL,
|
||||
choices, _aggregate_choices);
|
||||
&data, _aggregate_choices);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* never iterate into a custom if all drivers supported it */
|
||||
clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
|
||||
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
|
||||
|
||||
next = find_next_bit_wrap(choices,
|
||||
next = find_next_bit_wrap(data.aggregate,
|
||||
PLATFORM_PROFILE_LAST,
|
||||
profile + 1);
|
||||
|
||||
|
@ -532,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (ops->hidden_choices) {
|
||||
err = ops->hidden_choices(drvdata, pprof->hidden_choices);
|
||||
if (err) {
|
||||
dev_err(dev, "platform_profile hidden_choices failed\n");
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
guard(mutex)(&profile_lock);
|
||||
|
||||
/* create class interface for individual handler */
|
||||
|
|
|
@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
|
|||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
if (refcount_dec_and_test(&device->ref)) {
|
||||
hlist_del_init(&device->hlist);
|
||||
kfree(device->context.name);
|
||||
kfree(device);
|
||||
}
|
||||
|
|
|
@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
|
|||
out:
|
||||
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
|
||||
put_device(sup_dev);
|
||||
put_device(con_dev);
|
||||
put_device(par_dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
|
|||
if (ph.len > sizeof(struct ublk_params))
|
||||
ph.len = sizeof(struct ublk_params);
|
||||
|
||||
/* parameters can only be changed when device isn't live */
|
||||
mutex_lock(&ub->mutex);
|
||||
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
|
||||
if (test_bit(UB_STATE_USED, &ub->state)) {
|
||||
/*
|
||||
* Parameters can only be changed when device hasn't
|
||||
* been started yet
|
||||
*/
|
||||
ret = -EACCES;
|
||||
} else if (copy_from_user(&ub->params, argp, ph.len)) {
|
||||
ret = -EFAULT;
|
||||
|
|
|
@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC
|
|||
Say Y here to enable USB poll_sync for Bluetooth USB devices by
|
||||
default.
|
||||
|
||||
config BT_HCIBTUSB_AUTO_ISOC_ALT
|
||||
bool "Automatically adjust alternate setting for Isoc endpoints"
|
||||
depends on BT_HCIBTUSB
|
||||
default y if CHROME_PLATFORMS
|
||||
help
|
||||
Say Y here to automatically adjusting the alternate setting for
|
||||
HCI_USER_CHANNEL whenever a SCO link is established.
|
||||
|
||||
When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
|
||||
and configures isoc endpoint alternate setting automatically when
|
||||
HCI_USER_CHANNEL is in use.
|
||||
|
||||
config BT_HCIBTUSB_BCM
|
||||
bool "Broadcom protocol support"
|
||||
depends on BT_HCIBTUSB
|
||||
|
|
|
@ -34,6 +34,7 @@ static bool force_scofix;
|
|||
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
|
||||
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
|
||||
static bool reset = true;
|
||||
static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
|
||||
|
||||
static struct usb_driver btusb_driver;
|
||||
|
||||
|
@ -1085,6 +1086,42 @@ static inline void btusb_free_frags(struct btusb_data *data)
|
|||
spin_unlock_irqrestore(&data->rxlock, flags);
|
||||
}
|
||||
|
||||
static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_event_hdr *hdr = (void *) skb->data;
|
||||
struct hci_ev_sync_conn_complete *ev =
|
||||
(void *) skb->data + sizeof(*hdr);
|
||||
struct hci_dev *hdev = data->hdev;
|
||||
unsigned int notify_air_mode;
|
||||
|
||||
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
|
||||
return;
|
||||
|
||||
if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
|
||||
return;
|
||||
|
||||
if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
|
||||
return;
|
||||
|
||||
switch (ev->air_mode) {
|
||||
case BT_CODEC_CVSD:
|
||||
notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
|
||||
break;
|
||||
|
||||
case BT_CODEC_TRANSPARENT:
|
||||
notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
|
||||
data->sco_num = 1;
|
||||
data->air_mode = notify_air_mode;
|
||||
schedule_work(&data->work);
|
||||
}
|
||||
|
||||
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
|
||||
{
|
||||
if (data->intr_interval) {
|
||||
|
@ -1092,6 +1129,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
|
|||
schedule_delayed_work(&data->rx_work, 0);
|
||||
}
|
||||
|
||||
/* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
|
||||
if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
|
||||
btusb_sco_connected(data, skb);
|
||||
|
||||
return data->recv_event(data->hdev, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -1095,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
|
|||
err_unprepare:
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
err_try_reset:
|
||||
if (pci_reset_function(pdev))
|
||||
dev_err(&pdev->dev, "Recovery failed\n");
|
||||
err = pci_try_reset_function(pdev);
|
||||
if (err)
|
||||
dev_err(&pdev->dev, "Recovery failed: %d\n", err);
|
||||
}
|
||||
|
||||
static void health_check(struct timer_list *t)
|
||||
|
|
|
@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int simple_pm_bus_suspend(struct device *dev)
|
||||
{
|
||||
struct simple_pm_bus *bus = dev_get_drvdata(dev);
|
||||
|
||||
if (!bus)
|
||||
return 0;
|
||||
|
||||
return pm_runtime_force_suspend(dev);
|
||||
}
|
||||
|
||||
static int simple_pm_bus_resume(struct device *dev)
|
||||
{
|
||||
struct simple_pm_bus *bus = dev_get_drvdata(dev);
|
||||
|
||||
if (!bus)
|
||||
return 0;
|
||||
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
|
||||
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
|
||||
NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
|
||||
};
|
||||
|
||||
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
|
||||
|
|
|
@ -473,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct cdx_device *cdx_dev = to_cdx_device(dev);
|
||||
ssize_t len;
|
||||
|
||||
return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
|
||||
device_lock(dev);
|
||||
len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
|
||||
device_unlock(dev);
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(driver_override);
|
||||
|
||||
|
|
|
@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
|
|||
device_create_with_groups(&misc_class, misc->parent, dev,
|
||||
misc, misc->groups, "%s", misc->name);
|
||||
if (IS_ERR(misc->this_device)) {
|
||||
misc_minor_free(misc->minor);
|
||||
if (is_dynamic) {
|
||||
misc_minor_free(misc->minor);
|
||||
misc->minor = MISC_DYNAMIC_MINOR;
|
||||
}
|
||||
err = PTR_ERR(misc->this_device);
|
||||
|
|
|
@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
|
|||
|
||||
pipe_lock(pipe);
|
||||
ret = 0;
|
||||
if (pipe_empty(pipe->head, pipe->tail))
|
||||
if (pipe_is_empty(pipe))
|
||||
goto error_out;
|
||||
|
||||
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
|
||||
if (ret < 0)
|
||||
goto error_out;
|
||||
|
||||
occupancy = pipe_occupancy(pipe->head, pipe->tail);
|
||||
occupancy = pipe_buf_usage(pipe);
|
||||
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
|
||||
|
||||
if (!buf) {
|
||||
|
|
|
@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
|
|||
struct platform_device *pdev;
|
||||
int res, id;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENOENT;
|
||||
|
||||
/* kernfs guarantees string termination, so count + 1 is safe */
|
||||
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
|
||||
if (!aggr)
|
||||
return -ENOMEM;
|
||||
if (!aggr) {
|
||||
res = -ENOMEM;
|
||||
goto put_module;
|
||||
}
|
||||
|
||||
memcpy(aggr->args, buf, count + 1);
|
||||
|
||||
|
@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
|
|||
}
|
||||
|
||||
aggr->pdev = pdev;
|
||||
module_put(THIS_MODULE);
|
||||
return count;
|
||||
|
||||
remove_table:
|
||||
|
@ -175,6 +181,8 @@ free_table:
|
|||
kfree(aggr->lookups);
|
||||
free_ga:
|
||||
kfree(aggr);
|
||||
put_module:
|
||||
module_put(THIS_MODULE);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&gpio_aggregator_lock);
|
||||
aggr = idr_remove(&gpio_aggregator_idr, id);
|
||||
mutex_unlock(&gpio_aggregator_lock);
|
||||
if (!aggr)
|
||||
if (!aggr) {
|
||||
module_put(THIS_MODULE);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
gpio_aggregator_free(aggr);
|
||||
module_put(THIS_MODULE);
|
||||
return count;
|
||||
}
|
||||
static DRIVER_ATTR_WO(delete_device);
|
||||
|
|
|
@ -40,7 +40,7 @@ struct gpio_rcar_info {
|
|||
|
||||
struct gpio_rcar_priv {
|
||||
void __iomem *base;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
struct device *dev;
|
||||
struct gpio_chip gpio_chip;
|
||||
unsigned int irq_parent;
|
||||
|
@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
|
|||
* "Setting Level-Sensitive Interrupt Input Mode"
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
|
||||
/* Configure positive or negative logic in POSNEG */
|
||||
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
|
||||
|
@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
|
|||
if (!level_trigger)
|
||||
gpio_rcar_write(p, INTCLR, BIT(hwirq));
|
||||
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
|
@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
|
|||
* "Setting General Input Mode"
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
|
||||
/* Configure positive logic in POSNEG */
|
||||
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
|
||||
|
@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
|
|||
if (p->info.has_outdtsel && output)
|
||||
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
|
||||
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
|
||||
|
@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
|
|||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
outputs = gpio_rcar_read(p, INOUTSEL);
|
||||
m = outputs & bankmask;
|
||||
if (m)
|
||||
|
@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
|
|||
m = ~outputs & bankmask;
|
||||
if (m)
|
||||
val |= gpio_rcar_read(p, INDT) & m;
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
|
||||
bits[0] = val;
|
||||
return 0;
|
||||
|
@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
|
|||
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
gpio_rcar_modify_bit(p, OUTDT, offset, value);
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
|
||||
|
@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
|
|||
if (!bankmask)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
val = gpio_rcar_read(p, OUTDT);
|
||||
val &= ~bankmask;
|
||||
val |= (bankmask & bits[0]);
|
||||
gpio_rcar_write(p, OUTDT, val);
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
|
||||
|
@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
|
|||
p->info = *info;
|
||||
|
||||
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
|
||||
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
|
||||
if (ret) {
|
||||
*npins = RCAR_MAX_GPIO_PER_BANK;
|
||||
} else {
|
||||
*npins = args.args[2];
|
||||
of_node_put(args.np);
|
||||
}
|
||||
|
||||
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
|
||||
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
|
||||
|
@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
|
||||
p->dev = dev;
|
||||
spin_lock_init(&p->lock);
|
||||
raw_spin_lock_init(&p->lock);
|
||||
|
||||
/* Get device configuration from DT node */
|
||||
ret = gpio_rcar_parse_dt(p, &npins);
|
||||
|
|
|
@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
|
|||
/* EOP buffer is not required for all ASICs */
|
||||
if (properties->eop_ring_buffer_address) {
|
||||
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
|
||||
pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
|
||||
properties->eop_buf_bo->tbo.base.size,
|
||||
pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
|
||||
properties->eop_ring_buffer_size,
|
||||
topo_dev->node_props.eop_buffer_size);
|
||||
err = -EINVAL;
|
||||
goto out_err_unreserve;
|
||||
|
|
|
@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
|||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
/* Invalid input */
|
||||
if (!plane_state->dst_rect.width ||
|
||||
if (!plane_state ||
|
||||
!plane_state->dst_rect.width ||
|
||||
!plane_state->dst_rect.height ||
|
||||
!plane_state->src_rect.width ||
|
||||
!plane_state->src_rect.height) {
|
||||
|
|
|
@ -1895,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
|
|||
NULL);
|
||||
}
|
||||
|
||||
static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
|
||||
ret = smu_v14_0_allow_ih_interrupt(smu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -1916,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return smu_v14_0_process_pending_interrupt(smu);
|
||||
return smu_v14_0_allow_ih_interrupt(smu);
|
||||
}
|
||||
|
||||
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
|
||||
|
|
|
@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
|
|||
return 0;
|
||||
|
||||
err_free_mmio:
|
||||
iounmap(hv->vram);
|
||||
vmbus_free_mmio(hv->mem->start, hv->fb_size);
|
||||
err_vmbus_close:
|
||||
vmbus_close(hdev->channel);
|
||||
|
@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
|
|||
vmbus_close(hdev->channel);
|
||||
hv_set_drvdata(hdev, NULL);
|
||||
|
||||
iounmap(hv->vram);
|
||||
vmbus_free_mmio(hv->mem->start, hv->fb_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -1867,7 +1867,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
|
|||
/* create encoders */
|
||||
mst_stream_encoders_create(dig_port);
|
||||
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
|
||||
&intel_dp->aux, 16, 3, conn_base_id);
|
||||
&intel_dp->aux, 16,
|
||||
INTEL_NUM_PIPES(display), conn_base_id);
|
||||
if (ret) {
|
||||
intel_dp->mst_mgr.cbs = NULL;
|
||||
return ret;
|
||||
|
|
|
@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
|
|||
static void
|
||||
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
|
||||
{
|
||||
pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
|
||||
fw_obj->fw_mm_node.size);
|
||||
struct pvr_gem_object *pvr_obj = fw_obj->gem;
|
||||
|
||||
pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
|
||||
fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
|
@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
|
|||
if (sf_id == ROGUE_FW_SF_LAST)
|
||||
return -EINVAL;
|
||||
|
||||
timestamp = read_fw_trace(trace_seq_data, 1) |
|
||||
((u64)read_fw_trace(trace_seq_data, 2) << 32);
|
||||
timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
|
||||
read_fw_trace(trace_seq_data, 2);
|
||||
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
|
||||
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
|
||||
|
||||
|
|
|
@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
|
|||
return PVR_DRIVER_NAME;
|
||||
}
|
||||
|
||||
static void pvr_queue_fence_release_work(struct work_struct *w)
|
||||
{
|
||||
struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
|
||||
|
||||
pvr_context_put(fence->queue->ctx);
|
||||
dma_fence_free(&fence->base);
|
||||
}
|
||||
|
||||
static void pvr_queue_fence_release(struct dma_fence *f)
|
||||
{
|
||||
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
|
||||
struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
|
||||
|
||||
pvr_context_put(fence->queue->ctx);
|
||||
dma_fence_free(f);
|
||||
queue_work(pvr_dev->sched_wq, &fence->release_work);
|
||||
}
|
||||
|
||||
static const char *
|
||||
|
@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
|
|||
|
||||
pvr_context_get(queue->ctx);
|
||||
fence->queue = queue;
|
||||
INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
|
||||
dma_fence_init(&fence->base, fence_ops,
|
||||
&fence_ctx->lock, fence_ctx->id,
|
||||
atomic_inc_return(&fence_ctx->seqno));
|
||||
|
@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
|
|||
static void
|
||||
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
|
||||
{
|
||||
pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
|
||||
&queue->job_fence_ctx);
|
||||
if (!fence->ops)
|
||||
pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
|
||||
&queue->job_fence_ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#define PVR_QUEUE_H
|
||||
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "pvr_cccb.h"
|
||||
#include "pvr_device.h"
|
||||
|
@ -63,6 +64,9 @@ struct pvr_queue_fence {
|
|||
|
||||
/** @queue: Queue that created this fence. */
|
||||
struct pvr_queue *queue;
|
||||
|
||||
/** @release_work: Fence release work structure. */
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -293,8 +293,9 @@ err_bind_op_fini:
|
|||
|
||||
static int
|
||||
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
|
||||
struct pvr_vm_context *vm_ctx, u64 device_addr,
|
||||
u64 size)
|
||||
struct pvr_vm_context *vm_ctx,
|
||||
struct pvr_gem_object *pvr_obj,
|
||||
u64 device_addr, u64 size)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
|
|||
goto err_bind_op_fini;
|
||||
}
|
||||
|
||||
bind_op->pvr_obj = pvr_obj;
|
||||
bind_op->vm_ctx = vm_ctx;
|
||||
bind_op->device_addr = device_addr;
|
||||
bind_op->size = size;
|
||||
|
@ -597,20 +599,6 @@ err_free:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
|
||||
* @vm_ctx: Target VM context.
|
||||
*
|
||||
* This function ensures that no mappings are left dangling by unmapping them
|
||||
* all in order of ascending device-virtual address.
|
||||
*/
|
||||
void
|
||||
pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
|
||||
{
|
||||
WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
|
||||
vm_ctx->gpuvm_mgr.mm_range));
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_context_release() - Teardown a VM context.
|
||||
* @ref_count: Pointer to reference counter of the VM context.
|
||||
|
@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
|
|||
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
|
||||
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
|
||||
|
||||
/* Unmap operations don't have an object to lock. */
|
||||
if (!pvr_obj)
|
||||
return 0;
|
||||
|
||||
/* Acquire lock on the GEM being mapped. */
|
||||
/* Acquire lock on the GEM object being mapped/unmapped. */
|
||||
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
|
||||
}
|
||||
|
||||
|
@ -772,8 +756,10 @@ err_cleanup:
|
|||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
|
||||
* pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
|
||||
* memory.
|
||||
* @vm_ctx: Target VM context.
|
||||
* @pvr_obj: Target PowerVR memory object.
|
||||
* @device_addr: Virtual device address at the start of the target mapping.
|
||||
* @size: Size of the target mapping.
|
||||
*
|
||||
|
@ -784,9 +770,13 @@ err_cleanup:
|
|||
* * Any error encountered while performing internal operations required to
|
||||
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
|
||||
* pvr_vm_gpuva_remap).
|
||||
*
|
||||
* The vm_ctx->lock must be held when calling this function.
|
||||
*/
|
||||
int
|
||||
pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
|
||||
static int
|
||||
pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
|
||||
struct pvr_gem_object *pvr_obj,
|
||||
u64 device_addr, u64 size)
|
||||
{
|
||||
struct pvr_vm_bind_op bind_op = {0};
|
||||
struct drm_gpuvm_exec vm_exec = {
|
||||
|
@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
|
|||
},
|
||||
};
|
||||
|
||||
int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
|
||||
size);
|
||||
int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
|
||||
device_addr, size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pvr_gem_object_get(pvr_obj);
|
||||
|
||||
err = drm_gpuvm_exec_lock(&vm_exec);
|
||||
if (err)
|
||||
goto err_cleanup;
|
||||
|
@ -818,6 +810,96 @@ err_cleanup:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
|
||||
* memory.
|
||||
* @vm_ctx: Target VM context.
|
||||
* @pvr_obj: Target PowerVR memory object.
|
||||
* @device_addr: Virtual device address at the start of the target mapping.
|
||||
* @size: Size of the target mapping.
|
||||
*
|
||||
* Return:
|
||||
* * 0 on success,
|
||||
* * Any error encountered by pvr_vm_unmap_obj_locked.
|
||||
*/
|
||||
int
|
||||
pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
|
||||
u64 device_addr, u64 size)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&vm_ctx->lock);
|
||||
err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
|
||||
mutex_unlock(&vm_ctx->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
|
||||
* @vm_ctx: Target VM context.
|
||||
* @device_addr: Virtual device address at the start of the target mapping.
|
||||
* @size: Size of the target mapping.
|
||||
*
|
||||
* Return:
|
||||
* * 0 on success,
|
||||
* * Any error encountered by drm_gpuva_find,
|
||||
* * Any error encountered by pvr_vm_unmap_obj_locked.
|
||||
*/
|
||||
int
|
||||
pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
|
||||
{
|
||||
struct pvr_gem_object *pvr_obj;
|
||||
struct drm_gpuva *va;
|
||||
int err;
|
||||
|
||||
mutex_lock(&vm_ctx->lock);
|
||||
|
||||
va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
|
||||
if (va) {
|
||||
pvr_obj = gem_to_pvr_gem(va->gem.obj);
|
||||
err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
|
||||
va->va.addr, va->va.range);
|
||||
} else {
|
||||
err = -ENOENT;
|
||||
}
|
||||
|
||||
mutex_unlock(&vm_ctx->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
|
||||
* @vm_ctx: Target VM context.
|
||||
*
|
||||
* This function ensures that no mappings are left dangling by unmapping them
|
||||
* all in order of ascending device-virtual address.
|
||||
*/
|
||||
void
|
||||
pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
|
||||
{
|
||||
mutex_lock(&vm_ctx->lock);
|
||||
|
||||
for (;;) {
|
||||
struct pvr_gem_object *pvr_obj;
|
||||
struct drm_gpuva *va;
|
||||
|
||||
va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
|
||||
vm_ctx->gpuvm_mgr.mm_start,
|
||||
vm_ctx->gpuvm_mgr.mm_range);
|
||||
if (!va)
|
||||
break;
|
||||
|
||||
pvr_obj = gem_to_pvr_gem(va->gem.obj);
|
||||
|
||||
WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
|
||||
va->va.addr, va->va.range));
|
||||
}
|
||||
|
||||
mutex_unlock(&vm_ctx->lock);
|
||||
}
|
||||
|
||||
/* Static data areas are determined by firmware. */
|
||||
static const struct drm_pvr_static_data_area static_data_areas[] = {
|
||||
{
|
||||
|
|
|
@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
|
|||
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
|
||||
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
|
||||
u64 device_addr, u64 size);
|
||||
int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
|
||||
struct pvr_gem_object *pvr_obj,
|
||||
u64 device_addr, u64 size);
|
||||
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
|
||||
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ config DRM_NOUVEAU
|
|||
depends on DRM && PCI && MMU
|
||||
select IOMMU_API
|
||||
select FW_LOADER
|
||||
select FW_CACHE if PM_SLEEP
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HDMI_HELPER
|
||||
|
|
|
@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void r300_gpu_init(struct radeon_device *rdev)
|
||||
/* rs400_gpu_init also calls this! */
|
||||
void r300_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t gb_tile_config, tmp;
|
||||
|
||||
|
|
|
@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
|
|||
*/
|
||||
extern int r300_init(struct radeon_device *rdev);
|
||||
extern void r300_fini(struct radeon_device *rdev);
|
||||
extern void r300_gpu_init(struct radeon_device *rdev);
|
||||
extern int r300_suspend(struct radeon_device *rdev);
|
||||
extern int r300_resume(struct radeon_device *rdev);
|
||||
extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
|
||||
|
|
|
@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
|
|||
|
||||
static void rs400_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
/* Earlier code was calling r420_pipes_init and then
|
||||
* rs400_mc_wait_for_idle(rdev). The problem is that
|
||||
* at least on my Mobility Radeon Xpress 200M RC410 card
|
||||
* that ends up in this code path ends up num_gb_pipes == 3
|
||||
* while the card seems to have only one pipe. With the
|
||||
* r420 pipe initialization method.
|
||||
*
|
||||
* Problems shown up as HyperZ glitches, see:
|
||||
* https://bugs.freedesktop.org/show_bug.cgi?id=110897
|
||||
*
|
||||
* Delegating initialization to r300 code seems to work
|
||||
* and results in proper pipe numbers. The rs400 cards
|
||||
* are said to be not r400, but r300 kind of cards.
|
||||
*/
|
||||
r300_gpu_init(rdev);
|
||||
|
||||
if (rs400_mc_wait_for_idle(rdev)) {
|
||||
pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
|
||||
RREG32(RADEON_MC_STATUS));
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _GPU_SCHED_TRACE_H_
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
|
|||
__entry->seqno)
|
||||
);
|
||||
|
||||
#endif
|
||||
#endif /* _GPU_SCHED_TRACE_H_ */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
|
|
@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
|
|||
bochs->xres, bochs->yres, bochs->bpp,
|
||||
bochs->yres_virtual);
|
||||
|
||||
bochs_hw_blank(bochs, false);
|
||||
|
||||
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
|
||||
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
|
||||
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
|
||||
|
@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
|||
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct bochs_device *bochs = to_bochs_device(crtc->dev);
|
||||
|
||||
bochs_hw_blank(bochs, false);
|
||||
}
|
||||
|
||||
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
|
||||
|
|
|
@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
|
|||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct drm_framebuffer *fb;
|
||||
struct i915_vma *vma;
|
||||
|
||||
|
@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
|
|||
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
|
||||
|
||||
plane_config->vma = vma;
|
||||
|
||||
/*
|
||||
* Flip to the newly created mapping ASAP, so we can re-use the
|
||||
* first part of GGTT for WOPCM, prevent flickering, and prevent
|
||||
* the lookup of sysmem scratch pages.
|
||||
*/
|
||||
plane->check_plane(crtc_state, plane_state);
|
||||
plane->async_flip(NULL, plane, crtc_state, plane_state, true);
|
||||
return;
|
||||
|
||||
nofb:
|
||||
|
|
|
@ -380,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
xe_wa_process_gt(gt);
|
||||
xe_wa_process_oob(gt);
|
||||
xe_tuning_process_gt(gt);
|
||||
|
||||
xe_force_wake_init_gt(gt, gt_to_fw(gt));
|
||||
spin_lock_init(>->global_invl_lock);
|
||||
|
@ -474,6 +472,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
|
|||
}
|
||||
|
||||
xe_gt_mcr_set_implicit_defaults(gt);
|
||||
xe_wa_process_gt(gt);
|
||||
xe_tuning_process_gt(gt);
|
||||
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
||||
|
||||
err = xe_gt_clock_init(gt);
|
||||
|
|
|
@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
|
|||
return (end - start) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* xe_mark_range_accessed() - mark a range is accessed, so core mm
|
||||
* have such information for memory eviction or write back to
|
||||
* hard disk
|
||||
*
|
||||
* @range: the range to mark
|
||||
* @write: if write to this range, we mark pages in this range
|
||||
* as dirty
|
||||
|
@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
|
||||
struct hmm_range *range, struct rw_semaphore *notifier_sem)
|
||||
{
|
||||
unsigned long i, npages, hmm_pfn;
|
||||
unsigned long num_chunks = 0;
|
||||
int ret;
|
||||
|
||||
/* HMM docs says this is needed. */
|
||||
ret = down_read_interruptible(notifier_sem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
|
||||
up_read(notifier_sem);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
npages = xe_npages_in_range(range->start, range->end);
|
||||
for (i = 0; i < npages;) {
|
||||
unsigned long len;
|
||||
|
||||
hmm_pfn = range->hmm_pfns[i];
|
||||
xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
|
||||
|
||||
len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
|
||||
|
||||
/* If order > 0 the page may extend beyond range->start */
|
||||
len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
|
||||
i += len;
|
||||
num_chunks++;
|
||||
}
|
||||
up_read(notifier_sem);
|
||||
|
||||
return sg_alloc_table(st, num_chunks, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
|
||||
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
|
||||
* and will be used to program GPU page table later.
|
||||
*
|
||||
* @xe: the xe device who will access the dma-address in sg table
|
||||
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
|
||||
* has the pfn numbers of pages that back up this hmm address range.
|
||||
* @st: pointer to the sg table.
|
||||
* @notifier_sem: The xe notifier lock.
|
||||
* @write: whether we write to this range. This decides dma map direction
|
||||
* for system pages. If write we map it bi-diretional; otherwise
|
||||
* DMA_TO_DEVICE
|
||||
|
@ -78,43 +113,84 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
|
|||
* Returns 0 if successful; -ENOMEM if fails to allocate memory
|
||||
*/
|
||||
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
|
||||
struct sg_table *st, bool write)
|
||||
struct sg_table *st,
|
||||
struct rw_semaphore *notifier_sem,
|
||||
bool write)
|
||||
{
|
||||
unsigned long npages = xe_npages_in_range(range->start, range->end);
|
||||
struct device *dev = xe->drm.dev;
|
||||
struct page **pages;
|
||||
u64 i, npages;
|
||||
int ret;
|
||||
struct scatterlist *sgl;
|
||||
struct page *page;
|
||||
unsigned long i, j;
|
||||
|
||||
npages = xe_npages_in_range(range->start, range->end);
|
||||
pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
lockdep_assert_held(notifier_sem);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
|
||||
xe_assert(xe, !is_device_private_page(pages[i]));
|
||||
i = 0;
|
||||
for_each_sg(st->sgl, sgl, st->nents, j) {
|
||||
unsigned long hmm_pfn, size;
|
||||
|
||||
hmm_pfn = range->hmm_pfns[i];
|
||||
page = hmm_pfn_to_page(hmm_pfn);
|
||||
xe_assert(xe, !is_device_private_page(page));
|
||||
|
||||
size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
|
||||
size -= page_to_pfn(page) & (size - 1);
|
||||
i += size;
|
||||
|
||||
if (unlikely(j == st->nents - 1)) {
|
||||
if (i > npages)
|
||||
size -= (i - npages);
|
||||
sg_mark_end(sgl);
|
||||
}
|
||||
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
|
||||
}
|
||||
xe_assert(xe, i == npages);
|
||||
|
||||
ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
|
||||
xe_sg_segment_size(dev), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto free_pages;
|
||||
|
||||
ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
|
||||
if (ret) {
|
||||
sg_free_table(st);
|
||||
st = NULL;
|
||||
}
|
||||
|
||||
free_pages:
|
||||
kvfree(pages);
|
||||
return ret;
|
||||
return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
|
||||
}
|
||||
|
||||
/*
|
||||
static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_userptr *userptr = &uvma->userptr;
|
||||
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
|
||||
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
lockdep_assert_held(&vm->userptr.notifier_lock);
|
||||
|
||||
mutex_lock(&userptr->unmap_mutex);
|
||||
xe_assert(vm->xe, !userptr->mapped);
|
||||
userptr->mapped = true;
|
||||
mutex_unlock(&userptr->unmap_mutex);
|
||||
}
|
||||
|
||||
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_userptr *userptr = &uvma->userptr;
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
bool write = !xe_vma_read_only(vma);
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_device *xe = vm->xe;
|
||||
|
||||
if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
|
||||
!lockdep_is_held_type(&vm->lock, 0) &&
|
||||
!(vma->gpuva.flags & XE_VMA_DESTROYED)) {
|
||||
/* Don't unmap in exec critical section. */
|
||||
xe_vm_assert_held(vm);
|
||||
/* Don't unmap while mapping the sg. */
|
||||
lockdep_assert_held(&vm->lock);
|
||||
}
|
||||
|
||||
mutex_lock(&userptr->unmap_mutex);
|
||||
if (userptr->sg && userptr->mapped)
|
||||
dma_unmap_sgtable(xe->drm.dev, userptr->sg,
|
||||
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
|
||||
userptr->mapped = false;
|
||||
mutex_unlock(&userptr->unmap_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
|
||||
*
|
||||
* @uvma: the userptr vma which hold the scatter gather table
|
||||
*
|
||||
* With function xe_userptr_populate_range, we allocate storage of
|
||||
|
@ -124,16 +200,9 @@ free_pages:
|
|||
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_userptr *userptr = &uvma->userptr;
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
bool write = !xe_vma_read_only(vma);
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_device *xe = vm->xe;
|
||||
struct device *dev = xe->drm.dev;
|
||||
|
||||
xe_assert(xe, userptr->sg);
|
||||
dma_unmap_sgtable(dev, userptr->sg,
|
||||
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
|
||||
|
||||
xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
|
||||
xe_hmm_userptr_unmap(uvma);
|
||||
sg_free_table(userptr->sg);
|
||||
userptr->sg = NULL;
|
||||
}
|
||||
|
@ -166,13 +235,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
|
|||
{
|
||||
unsigned long timeout =
|
||||
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
||||
unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
|
||||
unsigned long *pfns;
|
||||
struct xe_userptr *userptr;
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
u64 userptr_start = xe_vma_userptr(vma);
|
||||
u64 userptr_end = userptr_start + xe_vma_size(vma);
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct hmm_range hmm_range;
|
||||
struct hmm_range hmm_range = {
|
||||
.pfn_flags_mask = 0, /* ignore pfns */
|
||||
.default_flags = HMM_PFN_REQ_FAULT,
|
||||
.start = userptr_start,
|
||||
.end = userptr_end,
|
||||
.notifier = &uvma->userptr.notifier,
|
||||
.dev_private_owner = vm->xe,
|
||||
};
|
||||
bool write = !xe_vma_read_only(vma);
|
||||
unsigned long notifier_seq;
|
||||
u64 npages;
|
||||
|
@ -199,19 +275,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
|
|||
return -ENOMEM;
|
||||
|
||||
if (write)
|
||||
flags |= HMM_PFN_REQ_WRITE;
|
||||
hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
|
||||
|
||||
if (!mmget_not_zero(userptr->notifier.mm)) {
|
||||
ret = -EFAULT;
|
||||
goto free_pfns;
|
||||
}
|
||||
|
||||
hmm_range.default_flags = flags;
|
||||
hmm_range.hmm_pfns = pfns;
|
||||
hmm_range.notifier = &userptr->notifier;
|
||||
hmm_range.start = userptr_start;
|
||||
hmm_range.end = userptr_end;
|
||||
hmm_range.dev_private_owner = vm->xe;
|
||||
|
||||
while (true) {
|
||||
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
|
||||
|
@ -238,16 +309,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
|
|||
if (ret)
|
||||
goto free_pfns;
|
||||
|
||||
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
|
||||
ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
|
||||
if (ret)
|
||||
goto free_pfns;
|
||||
|
||||
ret = down_read_interruptible(&vm->userptr.notifier_lock);
|
||||
if (ret)
|
||||
goto free_st;
|
||||
|
||||
if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
|
||||
&vm->userptr.notifier_lock, write);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
xe_mark_range_accessed(&hmm_range, write);
|
||||
userptr->sg = &userptr->sgt;
|
||||
xe_hmm_userptr_set_mapped(uvma);
|
||||
userptr->notifier_seq = hmm_range.notifier_seq;
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
kvfree(pfns);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
free_st:
|
||||
sg_free_table(&userptr->sgt);
|
||||
free_pfns:
|
||||
kvfree(pfns);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,9 +3,16 @@
|
|||
* Copyright © 2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_HMM_H_
|
||||
#define _XE_HMM_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_userptr_vma;
|
||||
|
||||
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
|
||||
|
||||
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
|
||||
|
||||
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
|
||||
#endif
|
||||
|
|
|
@ -28,6 +28,8 @@ struct xe_pt_dir {
|
|||
struct xe_pt pt;
|
||||
/** @children: Array of page-table child nodes */
|
||||
struct xe_ptw *children[XE_PDES];
|
||||
/** @staging: Array of page-table staging nodes */
|
||||
struct xe_ptw *staging[XE_PDES];
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
|
||||
|
@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
|
|||
return container_of(pt, struct xe_pt_dir, pt);
|
||||
}
|
||||
|
||||
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
|
||||
static struct xe_pt *
|
||||
xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
|
||||
{
|
||||
return container_of(pt_dir->children[index], struct xe_pt, base);
|
||||
return container_of(pt_dir->staging[index], struct xe_pt, base);
|
||||
}
|
||||
|
||||
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
|
||||
|
@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
|
|||
}
|
||||
pt->bo = bo;
|
||||
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
|
||||
pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
|
||||
|
||||
if (vm->xef)
|
||||
xe_drm_client_add_bo(vm->xef->client, pt->bo);
|
||||
|
@ -206,8 +210,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
|
|||
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
|
||||
|
||||
for (i = 0; i < XE_PDES; i++) {
|
||||
if (xe_pt_entry(pt_dir, i))
|
||||
xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
|
||||
if (xe_pt_entry_staging(pt_dir, i))
|
||||
xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
|
||||
deferred);
|
||||
}
|
||||
}
|
||||
|
@ -376,8 +380,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
|
|||
/* Continue building a non-connected subtree. */
|
||||
struct iosys_map *map = &parent->bo->vmap;
|
||||
|
||||
if (unlikely(xe_child))
|
||||
if (unlikely(xe_child)) {
|
||||
parent->base.children[offset] = &xe_child->base;
|
||||
parent->base.staging[offset] = &xe_child->base;
|
||||
}
|
||||
|
||||
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
|
||||
parent->num_live++;
|
||||
|
@ -614,6 +620,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
|
|||
.ops = &xe_pt_stage_bind_ops,
|
||||
.shifts = xe_normal_pt_shifts,
|
||||
.max_level = XE_PT_HIGHEST_LEVEL,
|
||||
.staging = true,
|
||||
},
|
||||
.vm = xe_vma_vm(vma),
|
||||
.tile = tile,
|
||||
|
@ -873,7 +880,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
|
|||
}
|
||||
}
|
||||
|
||||
static void xe_pt_commit_locks_assert(struct xe_vma *vma)
|
||||
static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
|
||||
{
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
|
||||
|
@ -885,6 +892,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
|
|||
xe_vm_assert_held(vm);
|
||||
}
|
||||
|
||||
static void xe_pt_commit_locks_assert(struct xe_vma *vma)
|
||||
{
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
}
|
||||
|
||||
static void xe_pt_commit(struct xe_vma *vma,
|
||||
struct xe_vm_pgtable_update *entries,
|
||||
u32 num_entries, struct llist_head *deferred)
|
||||
|
@ -895,13 +912,17 @@ static void xe_pt_commit(struct xe_vma *vma,
|
|||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
struct xe_pt *pt = entries[i].pt;
|
||||
struct xe_pt_dir *pt_dir;
|
||||
|
||||
if (!pt->level)
|
||||
continue;
|
||||
|
||||
pt_dir = as_xe_pt_dir(pt);
|
||||
for (j = 0; j < entries[i].qwords; j++) {
|
||||
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
|
||||
int j_ = j + entries[i].ofs;
|
||||
|
||||
pt_dir->children[j_] = pt_dir->staging[j_];
|
||||
xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
|
||||
}
|
||||
}
|
||||
|
@ -913,7 +934,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
|
|||
{
|
||||
int i, j;
|
||||
|
||||
xe_pt_commit_locks_assert(vma);
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
for (i = num_entries - 1; i >= 0; --i) {
|
||||
struct xe_pt *pt = entries[i].pt;
|
||||
|
@ -928,10 +949,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
|
|||
pt_dir = as_xe_pt_dir(pt);
|
||||
for (j = 0; j < entries[i].qwords; j++) {
|
||||
u32 j_ = j + entries[i].ofs;
|
||||
struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
|
||||
struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
|
||||
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
|
||||
|
||||
pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
|
||||
pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
|
||||
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
|
||||
}
|
||||
}
|
||||
|
@ -943,7 +964,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
|
|||
{
|
||||
u32 i, j;
|
||||
|
||||
xe_pt_commit_locks_assert(vma);
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
struct xe_pt *pt = entries[i].pt;
|
||||
|
@ -961,10 +982,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
|
|||
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
|
||||
struct xe_pt *oldpte = NULL;
|
||||
|
||||
if (xe_pt_entry(pt_dir, j_))
|
||||
oldpte = xe_pt_entry(pt_dir, j_);
|
||||
if (xe_pt_entry_staging(pt_dir, j_))
|
||||
oldpte = xe_pt_entry_staging(pt_dir, j_);
|
||||
|
||||
pt_dir->children[j_] = &newpte->base;
|
||||
pt_dir->staging[j_] = &newpte->base;
|
||||
entries[i].pt_entries[j].pt = oldpte;
|
||||
}
|
||||
}
|
||||
|
@ -1213,42 +1234,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
|
|||
return 0;
|
||||
|
||||
uvma = to_userptr_vma(vma);
|
||||
if (xe_pt_userptr_inject_eagain(uvma))
|
||||
xe_vma_userptr_force_invalidate(uvma);
|
||||
|
||||
notifier_seq = uvma->userptr.notifier_seq;
|
||||
|
||||
if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
|
||||
return 0;
|
||||
|
||||
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
|
||||
notifier_seq) &&
|
||||
!xe_pt_userptr_inject_eagain(uvma))
|
||||
notifier_seq))
|
||||
return 0;
|
||||
|
||||
if (xe_vm_in_fault_mode(vm)) {
|
||||
if (xe_vm_in_fault_mode(vm))
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_move_tail(&uvma->userptr.invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
|
||||
if (xe_vm_in_preempt_fence_mode(vm)) {
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
long err;
|
||||
|
||||
dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence)
|
||||
dma_fence_enable_sw_signaling(fence);
|
||||
dma_resv_iter_end(&cursor);
|
||||
|
||||
err = dma_resv_wait_timeout(xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
XE_WARN_ON(err <= 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Just continue the operation since exec or rebind worker
|
||||
* will take care of rebinding.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1514,6 +1515,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
|
|||
.ops = &xe_pt_stage_unbind_ops,
|
||||
.shifts = xe_normal_pt_shifts,
|
||||
.max_level = XE_PT_HIGHEST_LEVEL,
|
||||
.staging = true,
|
||||
},
|
||||
.tile = tile,
|
||||
.modified_start = xe_vma_start(vma),
|
||||
|
@ -1555,7 +1557,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
|
|||
{
|
||||
int i, j;
|
||||
|
||||
xe_pt_commit_locks_assert(vma);
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
for (i = num_entries - 1; i >= 0; --i) {
|
||||
struct xe_vm_pgtable_update *entry = &entries[i];
|
||||
|
@ -1568,7 +1570,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
|
|||
continue;
|
||||
|
||||
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
|
||||
pt_dir->children[j] =
|
||||
pt_dir->staging[j] =
|
||||
entries[i].pt_entries[j - entry->ofs].pt ?
|
||||
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
|
||||
}
|
||||
|
@ -1581,7 +1583,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
|
|||
{
|
||||
int i, j;
|
||||
|
||||
xe_pt_commit_locks_assert(vma);
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
for (i = 0; i < num_entries; ++i) {
|
||||
struct xe_vm_pgtable_update *entry = &entries[i];
|
||||
|
@ -1595,8 +1597,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
|
|||
pt_dir = as_xe_pt_dir(pt);
|
||||
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
|
||||
entry->pt_entries[j - entry->ofs].pt =
|
||||
xe_pt_entry(pt_dir, j);
|
||||
pt_dir->children[j] = NULL;
|
||||
xe_pt_entry_staging(pt_dir, j);
|
||||
pt_dir->staging[j] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
|
|||
u64 addr, u64 end, struct xe_pt_walk *walk)
|
||||
{
|
||||
pgoff_t offset = xe_pt_offset(addr, level, walk);
|
||||
struct xe_ptw **entries = parent->children ? parent->children : NULL;
|
||||
struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
|
||||
(parent->children ?: NULL);
|
||||
const struct xe_pt_walk_ops *ops = walk->ops;
|
||||
enum page_walk_action action;
|
||||
struct xe_ptw *child;
|
||||
|
|
|
@ -11,12 +11,14 @@
|
|||
/**
|
||||
* struct xe_ptw - base class for driver pagetable subclassing.
|
||||
* @children: Pointer to an array of children if any.
|
||||
* @staging: Pointer to an array of staging if any.
|
||||
*
|
||||
* Drivers could subclass this, and if it's a page-directory, typically
|
||||
* embed an array of xe_ptw pointers.
|
||||
*/
|
||||
struct xe_ptw {
|
||||
struct xe_ptw **children;
|
||||
struct xe_ptw **staging;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -41,6 +43,8 @@ struct xe_pt_walk {
|
|||
* as shared pagetables.
|
||||
*/
|
||||
bool shared_pt_mode;
|
||||
/** @staging: Walk staging PT structure */
|
||||
bool staging;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -579,51 +579,26 @@ out_unlock_outer:
|
|||
trace_xe_vm_rebind_worker_exit(vm);
|
||||
}
|
||||
|
||||
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
|
||||
struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
|
||||
struct xe_userptr *userptr = &uvma->userptr;
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
long err;
|
||||
|
||||
xe_assert(vm->xe, xe_vma_is_userptr(vma));
|
||||
trace_xe_vma_userptr_invalidate(vma);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
vm_dbg(&xe_vma_vm(vma)->xe->drm,
|
||||
"NOTIFIER: addr=0x%016llx, range=0x%016llx",
|
||||
xe_vma_start(vma), xe_vma_size(vma));
|
||||
|
||||
down_write(&vm->userptr.notifier_lock);
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
/* No need to stop gpu access if the userptr is not yet bound. */
|
||||
if (!userptr->initial_bind) {
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell exec and rebind worker they need to repin and rebind this
|
||||
* userptr.
|
||||
*/
|
||||
if (!xe_vm_in_fault_mode(vm) &&
|
||||
!(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
|
||||
!(vma->gpuva.flags & XE_VMA_DESTROYED)) {
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_move_tail(&userptr->invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
}
|
||||
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
|
||||
/*
|
||||
* Preempt fences turn into schedule disables, pipeline these.
|
||||
* Note that even in fault mode, we need to wait for binds and
|
||||
|
@ -641,11 +616,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
|||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
XE_WARN_ON(err <= 0);
|
||||
|
||||
if (xe_vm_in_fault_mode(vm)) {
|
||||
if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
|
||||
err = xe_vm_invalidate_vma(vma);
|
||||
XE_WARN_ON(err);
|
||||
}
|
||||
|
||||
xe_hmm_userptr_unmap(uvma);
|
||||
}
|
||||
|
||||
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
|
||||
xe_assert(vm->xe, xe_vma_is_userptr(vma));
|
||||
trace_xe_vma_userptr_invalidate(vma);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
vm_dbg(&xe_vma_vm(vma)->xe->drm,
|
||||
"NOTIFIER: addr=0x%016llx, range=0x%016llx",
|
||||
xe_vma_start(vma), xe_vma_size(vma));
|
||||
|
||||
down_write(&vm->userptr.notifier_lock);
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
__vma_userptr_invalidate(vm, uvma);
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
trace_xe_vma_userptr_invalidate_complete(vma);
|
||||
|
||||
return true;
|
||||
|
@ -655,6 +656,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
|
|||
.invalidate = vma_userptr_invalidate,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
|
||||
/**
|
||||
* xe_vma_userptr_force_invalidate() - force invalidate a userptr
|
||||
* @uvma: The userptr vma to invalidate
|
||||
*
|
||||
* Perform a forced userptr invalidation for testing purposes.
|
||||
*/
|
||||
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
|
||||
|
||||
/* Protect against concurrent userptr pinning */
|
||||
lockdep_assert_held(&vm->lock);
|
||||
/* Protect against concurrent notifiers */
|
||||
lockdep_assert_held(&vm->userptr.notifier_lock);
|
||||
/*
|
||||
* Protect against concurrent instances of this function and
|
||||
* the critical exec sections
|
||||
*/
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
|
||||
uvma->userptr.notifier_seq))
|
||||
uvma->userptr.notifier_seq -= 2;
|
||||
__vma_userptr_invalidate(vm, uvma);
|
||||
}
|
||||
#endif
|
||||
|
||||
int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_userptr_vma *uvma, *next;
|
||||
|
@ -1012,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
|||
INIT_LIST_HEAD(&userptr->invalidate_link);
|
||||
INIT_LIST_HEAD(&userptr->repin_link);
|
||||
vma->gpuva.gem.offset = bo_offset_or_userptr;
|
||||
mutex_init(&userptr->unmap_mutex);
|
||||
|
||||
err = mmu_interval_notifier_insert(&userptr->notifier,
|
||||
current->mm,
|
||||
|
@ -1053,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
|||
* them anymore
|
||||
*/
|
||||
mmu_interval_notifier_remove(&userptr->notifier);
|
||||
mutex_destroy(&userptr->unmap_mutex);
|
||||
xe_vm_put(vm);
|
||||
} else if (xe_vma_is_null(vma)) {
|
||||
xe_vm_put(vm);
|
||||
|
@ -2286,8 +2317,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
|||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
|
||||
break;
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
/* FIXME: Need to skip some prefetch ops */
|
||||
vma = gpuva_to_vma(op->base.prefetch.va);
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -274,9 +274,17 @@ static inline void vm_dbg(const struct drm_device *dev,
|
|||
const char *format, ...)
|
||||
{ /* noop */ }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
|
||||
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
|
||||
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
|
||||
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
|
||||
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
|
||||
#else
|
||||
static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -59,12 +59,16 @@ struct xe_userptr {
|
|||
struct sg_table *sg;
|
||||
/** @notifier_seq: notifier sequence number */
|
||||
unsigned long notifier_seq;
|
||||
/** @unmap_mutex: Mutex protecting dma-unmapping */
|
||||
struct mutex unmap_mutex;
|
||||
/**
|
||||
* @initial_bind: user pointer has been bound at least once.
|
||||
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
|
||||
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
|
||||
*/
|
||||
bool initial_bind;
|
||||
/** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
|
||||
bool mapped;
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
|
||||
u32 divisor;
|
||||
#endif
|
||||
|
@ -227,8 +231,8 @@ struct xe_vm {
|
|||
* up for revalidation. Protected from access with the
|
||||
* @invalidated_lock. Removing items from the list
|
||||
* additionally requires @lock in write mode, and adding
|
||||
* items to the list requires the @userptr.notifer_lock in
|
||||
* write mode.
|
||||
* items to the list requires either the @userptr.notifer_lock in
|
||||
* write mode, OR @lock in write mode.
|
||||
*/
|
||||
struct list_head invalidated;
|
||||
} userptr;
|
||||
|
|
|
@ -2262,12 +2262,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
|
|||
struct resource *iter;
|
||||
|
||||
mutex_lock(&hyperv_mmio_lock);
|
||||
|
||||
/*
|
||||
* If all bytes of the MMIO range to be released are within the
|
||||
* special case fb_mmio shadow region, skip releasing the shadow
|
||||
* region since no corresponding __request_region() was done
|
||||
* in vmbus_allocate_mmio().
|
||||
*/
|
||||
if (fb_mmio && start >= fb_mmio->start &&
|
||||
(start + size - 1 <= fb_mmio->end))
|
||||
goto skip_shadow_release;
|
||||
|
||||
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
|
||||
if ((iter->start >= start + size) || (iter->end <= start))
|
||||
continue;
|
||||
|
||||
__release_region(iter, start, size);
|
||||
}
|
||||
|
||||
skip_shadow_release:
|
||||
release_mem_region(start, size);
|
||||
mutex_unlock(&hyperv_mmio_lock);
|
||||
|
||||
|
|
|
@ -22,11 +22,13 @@
|
|||
*/
|
||||
#define AD7314_TEMP_MASK 0x7FE0
|
||||
#define AD7314_TEMP_SHIFT 5
|
||||
#define AD7314_LEADING_ZEROS_MASK BIT(15)
|
||||
|
||||
/*
|
||||
* ADT7301 and ADT7302 temperature masks
|
||||
*/
|
||||
#define ADT7301_TEMP_MASK 0x3FFF
|
||||
#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14))
|
||||
|
||||
enum ad7314_variant {
|
||||
adt7301,
|
||||
|
@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
|
|||
return ret;
|
||||
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
|
||||
case ad7314:
|
||||
if (ret & AD7314_LEADING_ZEROS_MASK) {
|
||||
/* Invalid read-out, leading zero part is missing */
|
||||
return -EIO;
|
||||
}
|
||||
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
|
||||
data = sign_extend32(data, 9);
|
||||
|
||||
return sprintf(buf, "%d\n", 250 * data);
|
||||
case adt7301:
|
||||
case adt7302:
|
||||
if (ret & ADT7301_LEADING_ZEROS_MASK) {
|
||||
/* Invalid read-out, leading zero part is missing */
|
||||
return -EIO;
|
||||
}
|
||||
/*
|
||||
* Documented as a 13 bit twos complement register
|
||||
* with a sign bit - which is a 14 bit 2's complement
|
||||
|
|
|
@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
|
|||
};
|
||||
|
||||
static const struct ntc_compensation ncpXXxh103[] = {
|
||||
{ .temp_c = -40, .ohm = 247565 },
|
||||
{ .temp_c = -35, .ohm = 181742 },
|
||||
{ .temp_c = -30, .ohm = 135128 },
|
||||
{ .temp_c = -25, .ohm = 101678 },
|
||||
{ .temp_c = -20, .ohm = 77373 },
|
||||
{ .temp_c = -15, .ohm = 59504 },
|
||||
{ .temp_c = -10, .ohm = 46222 },
|
||||
{ .temp_c = -5, .ohm = 36244 },
|
||||
{ .temp_c = 0, .ohm = 28674 },
|
||||
{ .temp_c = 5, .ohm = 22878 },
|
||||
{ .temp_c = 10, .ohm = 18399 },
|
||||
{ .temp_c = 15, .ohm = 14910 },
|
||||
{ .temp_c = 20, .ohm = 12169 },
|
||||
{ .temp_c = -40, .ohm = 195652 },
|
||||
{ .temp_c = -35, .ohm = 148171 },
|
||||
{ .temp_c = -30, .ohm = 113347 },
|
||||
{ .temp_c = -25, .ohm = 87559 },
|
||||
{ .temp_c = -20, .ohm = 68237 },
|
||||
{ .temp_c = -15, .ohm = 53650 },
|
||||
{ .temp_c = -10, .ohm = 42506 },
|
||||
{ .temp_c = -5, .ohm = 33892 },
|
||||
{ .temp_c = 0, .ohm = 27219 },
|
||||
{ .temp_c = 5, .ohm = 22021 },
|
||||
{ .temp_c = 10, .ohm = 17926 },
|
||||
{ .temp_c = 15, .ohm = 14674 },
|
||||
{ .temp_c = 20, .ohm = 12081 },
|
||||
{ .temp_c = 25, .ohm = 10000 },
|
||||
{ .temp_c = 30, .ohm = 8271 },
|
||||
{ .temp_c = 35, .ohm = 6883 },
|
||||
{ .temp_c = 40, .ohm = 5762 },
|
||||
{ .temp_c = 45, .ohm = 4851 },
|
||||
{ .temp_c = 50, .ohm = 4105 },
|
||||
{ .temp_c = 55, .ohm = 3492 },
|
||||
{ .temp_c = 60, .ohm = 2985 },
|
||||
{ .temp_c = 65, .ohm = 2563 },
|
||||
{ .temp_c = 70, .ohm = 2211 },
|
||||
{ .temp_c = 75, .ohm = 1915 },
|
||||
{ .temp_c = 80, .ohm = 1666 },
|
||||
{ .temp_c = 85, .ohm = 1454 },
|
||||
{ .temp_c = 90, .ohm = 1275 },
|
||||
{ .temp_c = 95, .ohm = 1121 },
|
||||
{ .temp_c = 100, .ohm = 990 },
|
||||
{ .temp_c = 105, .ohm = 876 },
|
||||
{ .temp_c = 110, .ohm = 779 },
|
||||
{ .temp_c = 115, .ohm = 694 },
|
||||
{ .temp_c = 120, .ohm = 620 },
|
||||
{ .temp_c = 125, .ohm = 556 },
|
||||
{ .temp_c = 30, .ohm = 8315 },
|
||||
{ .temp_c = 35, .ohm = 6948 },
|
||||
{ .temp_c = 40, .ohm = 5834 },
|
||||
{ .temp_c = 45, .ohm = 4917 },
|
||||
{ .temp_c = 50, .ohm = 4161 },
|
||||
{ .temp_c = 55, .ohm = 3535 },
|
||||
{ .temp_c = 60, .ohm = 3014 },
|
||||
{ .temp_c = 65, .ohm = 2586 },
|
||||
{ .temp_c = 70, .ohm = 2228 },
|
||||
{ .temp_c = 75, .ohm = 1925 },
|
||||
{ .temp_c = 80, .ohm = 1669 },
|
||||
{ .temp_c = 85, .ohm = 1452 },
|
||||
{ .temp_c = 90, .ohm = 1268 },
|
||||
{ .temp_c = 95, .ohm = 1110 },
|
||||
{ .temp_c = 100, .ohm = 974 },
|
||||
{ .temp_c = 105, .ohm = 858 },
|
||||
{ .temp_c = 110, .ohm = 758 },
|
||||
{ .temp_c = 115, .ohm = 672 },
|
||||
{ .temp_c = 120, .ohm = 596 },
|
||||
{ .temp_c = 125, .ohm = 531 },
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
|
|||
return 0;
|
||||
|
||||
ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
|
||||
if (ret == -ENODATA) /* Use default or previous value */
|
||||
return 0;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
|
|||
|
||||
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val);
|
||||
if (ret || !(reg_val & BIT(31)))
|
||||
return -ENODATA; /* Use default or previous value */
|
||||
return -ENODATA;
|
||||
|
||||
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val);
|
||||
if (ret)
|
||||
return -ENODATA; /* Use default or previous value */
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
|
||||
|
@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
|
|||
|
||||
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val);
|
||||
if (ret || !(reg_val & BIT(31)))
|
||||
return -ENODATA; /* Use default or previous value */
|
||||
return -ENODATA;
|
||||
|
||||
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val);
|
||||
if (ret)
|
||||
return -ENODATA; /* Use default or previous value */
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
|
||||
|
|
|
@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
|
|||
if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
|
||||
int page;
|
||||
|
||||
info->pages = PMBUS_PAGES;
|
||||
|
||||
for (page = 1; page < PMBUS_PAGES; page++) {
|
||||
if (pmbus_set_page(client, page, 0xff) < 0)
|
||||
break;
|
||||
|
|
|
@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!ctx->pcc_comm_addr) {
|
||||
if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to ioremap PCC comm region\n");
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -105,23 +105,32 @@ struct msc_iter {
|
|||
|
||||
/**
|
||||
* struct msc - MSC device representation
|
||||
* @reg_base: register window base address
|
||||
* @reg_base: register window base address for the entire MSU
|
||||
* @msu_base: register window base address for this MSC
|
||||
* @thdev: intel_th_device pointer
|
||||
* @mbuf: MSU buffer, if assigned
|
||||
* @mbuf_priv MSU buffer's private data, if @mbuf
|
||||
* @mbuf_priv: MSU buffer's private data, if @mbuf
|
||||
* @work: a work to stop the trace when the buffer is full
|
||||
* @win_list: list of windows in multiblock mode
|
||||
* @single_sgt: single mode buffer
|
||||
* @cur_win: current window
|
||||
* @switch_on_unlock: window to switch to when it becomes available
|
||||
* @nr_pages: total number of pages allocated for this buffer
|
||||
* @single_sz: amount of data in single mode
|
||||
* @single_wrap: single mode wrap occurred
|
||||
* @base: buffer's base pointer
|
||||
* @base_addr: buffer's base address
|
||||
* @orig_addr: MSC0 buffer's base address
|
||||
* @orig_sz: MSC0 buffer's size
|
||||
* @user_count: number of users of the buffer
|
||||
* @mmap_count: number of mappings
|
||||
* @buf_mutex: mutex to serialize access to buffer-related bits
|
||||
* @iter_list: list of open file descriptor iterators
|
||||
* @stop_on_full: stop the trace if the current window is full
|
||||
* @enabled: MSC is enabled
|
||||
* @wrap: wrapping is enabled
|
||||
* @do_irq: IRQ resource is available, handle interrupts
|
||||
* @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata)
|
||||
* @mode: MSC operating mode
|
||||
* @burst_len: write burst length
|
||||
* @index: number of this MSC in the MSU
|
||||
|
|
|
@ -334,6 +334,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Arrow Lake */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Panther Lake-H */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Panther Lake-P/U */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Alder Lake CPU */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
|
||||
|
|
|
@ -1084,7 +1084,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
|
|||
|
||||
conf &= ~AD7192_CONF_CHAN_MASK;
|
||||
for_each_set_bit(i, scan_mask, 8)
|
||||
conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
|
||||
conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
|
||||
|
||||
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1047,7 +1047,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
|
|||
|
||||
cs = &st->chan_scales[ch];
|
||||
*vals = (int *)cs->scale_avail;
|
||||
*length = cs->num_scales;
|
||||
*length = cs->num_scales * 2;
|
||||
*type = IIO_VAL_INT_PLUS_MICRO;
|
||||
|
||||
return IIO_AVAIL_LIST;
|
||||
|
|
|
@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
|
|||
#define AT91_HWFIFO_MAX_SIZE_STR "128"
|
||||
#define AT91_HWFIFO_MAX_SIZE 128
|
||||
|
||||
#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
|
||||
#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \
|
||||
{ \
|
||||
.type = IIO_VOLTAGE, \
|
||||
.channel = num, \
|
||||
|
@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
|
|||
.scan_index = index, \
|
||||
.scan_type = { \
|
||||
.sign = 'u', \
|
||||
.realbits = 14, \
|
||||
.realbits = rbits, \
|
||||
.storagebits = 16, \
|
||||
}, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
|
||||
|
@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
|
|||
.indexed = 1, \
|
||||
}
|
||||
|
||||
#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
|
||||
#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
|
||||
AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
|
||||
|
||||
#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \
|
||||
AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
|
||||
|
||||
#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \
|
||||
{ \
|
||||
.type = IIO_VOLTAGE, \
|
||||
.differential = 1, \
|
||||
|
@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
|
|||
.scan_index = index, \
|
||||
.scan_type = { \
|
||||
.sign = 's', \
|
||||
.realbits = 14, \
|
||||
.realbits = rbits, \
|
||||
.storagebits = 16, \
|
||||
}, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
|
||||
|
@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
|
|||
.indexed = 1, \
|
||||
}
|
||||
|
||||
#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
|
||||
AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
|
||||
|
||||
#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \
|
||||
AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
|
||||
|
||||
#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
|
||||
{ \
|
||||
.type = IIO_POSITIONRELATIVE, \
|
||||
|
@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
|
|||
};
|
||||
|
||||
static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
|
||||
AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
|
||||
AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
|
||||
AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
|
||||
AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
|
||||
AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
|
||||
AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
|
||||
AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
|
||||
AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
|
||||
AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
|
||||
AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
|
||||
AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
|
||||
AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
|
||||
AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
|
||||
AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
|
||||
AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
|
||||
AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
|
||||
AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
|
||||
AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
|
||||
AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
|
||||
IIO_CHAN_SOFT_TIMESTAMP(24),
|
||||
AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
|
||||
};
|
||||
|
|
|
@ -1198,11 +1198,11 @@ static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
|
|||
|
||||
label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
|
||||
GFP_KERNEL);
|
||||
ACPI_FREE(status);
|
||||
if (!label)
|
||||
return -ENOMEM;
|
||||
|
||||
indio_dev->label = label;
|
||||
ACPI_FREE(status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue