Driver core fixes for 6.16-rc3

- Fix a race condition in Devres::drop(). This depends on two other
     patches:
     - (Minimal) Rust abstractions for struct completion.
     - Let Revocable indicate whether its data is already being revoked.
 
   - Fix Devres to avoid exposing the internal Revocable.
 
   - Add .mailmap entry for Danilo Krummrich.
 
   - Add Madhavan Srinivasan to embargoed-hardware-issues.rst.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS2q/xV6QjXAdC7k+1FlHeO1qrKLgUCaFLtYAAKCRBFlHeO1qrK
 LsMRAP0XC/uvG5mvaRR2uE2yosEfbEEj+Qm/QBa7ebwfh0uspgD6A5UXFS7QRs8d
 FEBVpmuCmGtnqVFLmuKp02qj5csqEAc=
 =F5CN
 -----END PGP SIGNATURE-----

Merge tag 'driver-core-6.16-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core

Pull driver core fixes from Danilo Krummrich:

 - Fix a race condition in Devres::drop(). This depends on two other
   patches:
     - (Minimal) Rust abstractions for struct completion
     - Let Revocable indicate whether its data is already being revoked

 - Fix Devres to avoid exposing the internal Revocable

 - Add .mailmap entry for Danilo Krummrich

 - Add Madhavan Srinivasan to embargoed-hardware-issues.rst

* tag 'driver-core-6.16-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core:
  Documentation: embargoed-hardware-issues.rst: Add myself for Power
  mailmap: add entry for Danilo Krummrich
  rust: devres: do not dereference to the internal Revocable
  rust: devres: fix race in Devres::drop()
  rust: revocable: indicate whether `data` has been revoked already
  rust: completion: implement initial abstraction
This commit is contained in:
Linus Torvalds 2025-06-18 14:31:16 -07:00
commit 229f135e06
9 changed files with 183 additions and 21 deletions

View file

@ -197,6 +197,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>

View file

@ -290,6 +290,7 @@ an involved disclosed party. The current ambassadors list:
AMD Tom Lendacky <thomas.lendacky@amd.com>
Ampere Darren Hart <darren@os.amperecomputing.com>
ARM Catalin Marinas <catalin.marinas@arm.com>
IBM Power Madhavan Srinivasan <maddy@linux.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <quic_tsoni@quicinc.com>

View file

@ -39,6 +39,7 @@
#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>

View file

@ -0,0 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/completion.h>
void rust_helper_init_completion(struct completion *x)
{
init_completion(x);
}

View file

@ -13,6 +13,7 @@
#include "build_assert.c"
#include "build_bug.c"
#include "clk.c"
#include "completion.c"
#include "cpu.c"
#include "cpufreq.c"
#include "cpumask.c"

View file

@ -12,26 +12,28 @@ use crate::{
error::{Error, Result},
ffi::c_void,
prelude::*,
revocable::Revocable,
sync::Arc,
revocable::{Revocable, RevocableGuard},
sync::{rcu, Arc, Completion},
types::ARef,
};
use core::ops::Deref;
#[pin_data]
struct DevresInner<T> {
dev: ARef<Device>,
callback: unsafe extern "C" fn(*mut c_void),
#[pin]
data: Revocable<T>,
#[pin]
revoke: Completion,
}
/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
/// manage their lifetime.
///
/// [`Device`] bound resources should be freed when either the resource goes out of scope or the
/// [`Device`] is unbound respectively, depending on what happens first.
/// [`Device`] is unbound respectively, depending on what happens first. In any case, it is always
/// guaranteed that revoking the device resource is completed before the corresponding [`Device`]
/// is unbound.
///
/// To achieve that [`Devres`] registers a devres callback on creation, which is called once the
/// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]).
@ -102,6 +104,7 @@ impl<T> DevresInner<T> {
dev: dev.into(),
callback: Self::devres_callback,
data <- Revocable::new(data),
revoke <- Completion::new(),
}),
flags,
)?;
@ -130,26 +133,28 @@ impl<T> DevresInner<T> {
self as _
}
fn remove_action(this: &Arc<Self>) {
fn remove_action(this: &Arc<Self>) -> bool {
// SAFETY:
// - `self.inner.dev` is a valid `Device`,
// - the `action` and `data` pointers are the exact same ones as given to devm_add_action()
// previously,
// - `self` is always valid, even if the action has been released already.
let ret = unsafe {
let success = unsafe {
bindings::devm_remove_action_nowarn(
this.dev.as_raw(),
Some(this.callback),
this.as_ptr() as _,
)
};
} == 0;
if ret == 0 {
if success {
// SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if
// devm_remove_action_nowarn() was successful we can (and have to) claim back ownership
// of this reference.
let _ = unsafe { Arc::from_raw(this.as_ptr()) };
}
success
}
#[allow(clippy::missing_safety_doc)]
@ -161,7 +166,12 @@ impl<T> DevresInner<T> {
// `DevresInner::new`.
let inner = unsafe { Arc::from_raw(ptr) };
inner.data.revoke();
if !inner.data.revoke() {
// If `revoke()` returns false, it means that `Devres::drop` already started revoking
// `inner.data` for us. Hence we have to wait until `Devres::drop()` signals that it
// completed revoking `inner.data`.
inner.revoke.wait_for_completion();
}
}
}
@ -218,20 +228,36 @@ impl<T> Devres<T> {
// SAFETY: `dev` being the same device as the device this `Devres` has been created for
// proves that `self.0.data` hasn't been revoked and is guaranteed to not be revoked as
// long as `dev` lives; `dev` lives at least as long as `self`.
Ok(unsafe { self.deref().access() })
Ok(unsafe { self.0.data.access() })
}
}
impl<T> Deref for Devres<T> {
type Target = Revocable<T>;
/// [`Devres`] accessor for [`Revocable::try_access`].
pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
self.0.data.try_access()
}
fn deref(&self) -> &Self::Target {
&self.0.data
/// [`Devres`] accessor for [`Revocable::try_access_with`].
pub fn try_access_with<R, F: FnOnce(&T) -> R>(&self, f: F) -> Option<R> {
self.0.data.try_access_with(f)
}
/// [`Devres`] accessor for [`Revocable::try_access_with_guard`].
pub fn try_access_with_guard<'a>(&'a self, guard: &'a rcu::Guard) -> Option<&'a T> {
self.0.data.try_access_with_guard(guard)
}
}
impl<T> Drop for Devres<T> {
fn drop(&mut self) {
DevresInner::remove_action(&self.0);
// SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
// anymore, hence it is safe not to wait for the grace period to finish.
if unsafe { self.0.data.revoke_nosync() } {
// We revoked `self.0.data` before the devres action did, hence try to remove it.
if !DevresInner::remove_action(&self.0) {
// We could not remove the devres action, which means that it now runs concurrently,
// hence signal that `self.0.data` has been revoked successfully.
self.0.revoke.complete_all();
}
}
}
}

View file

@ -154,8 +154,10 @@ impl<T> Revocable<T> {
/// # Safety
///
/// Callers must ensure that there are no more concurrent users of the revocable object.
unsafe fn revoke_internal<const SYNC: bool>(&self) {
if self.is_available.swap(false, Ordering::Relaxed) {
unsafe fn revoke_internal<const SYNC: bool>(&self) -> bool {
let revoke = self.is_available.swap(false, Ordering::Relaxed);
if revoke {
if SYNC {
// SAFETY: Just an FFI call, there are no further requirements.
unsafe { bindings::synchronize_rcu() };
@ -165,6 +167,8 @@ impl<T> Revocable<T> {
// `compare_exchange` above that takes `is_available` from `true` to `false`.
unsafe { drop_in_place(self.data.get()) };
}
revoke
}
/// Revokes access to and drops the wrapped object.
@ -172,10 +176,13 @@ impl<T> Revocable<T> {
/// Access to the object is revoked immediately to new callers of [`Revocable::try_access`],
/// expecting that there are no concurrent users of the object.
///
/// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked
/// already.
///
/// # Safety
///
/// Callers must ensure that there are no more concurrent users of the revocable object.
pub unsafe fn revoke_nosync(&self) {
pub unsafe fn revoke_nosync(&self) -> bool {
// SAFETY: By the safety requirement of this function, the caller ensures that nobody is
// accessing the data anymore and hence we don't have to wait for the grace period to
// finish.
@ -189,7 +196,10 @@ impl<T> Revocable<T> {
/// If there are concurrent users of the object (i.e., ones that called
/// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this
/// function waits for the concurrent access to complete before dropping the wrapped object.
pub fn revoke(&self) {
///
/// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked
/// already.
pub fn revoke(&self) -> bool {
// SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to
// finish.
unsafe { self.revoke_internal::<true>() }

View file

@ -10,6 +10,7 @@ use crate::types::Opaque;
use pin_init;
mod arc;
pub mod completion;
mod condvar;
pub mod lock;
mod locked_by;
@ -17,6 +18,7 @@ pub mod poll;
pub mod rcu;
pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use completion::Completion;
pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
pub use lock::mutex::{new_mutex, Mutex, MutexGuard};

View file

@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0
//! Completion support.
//!
//! Reference: <https://docs.kernel.org/scheduler/completion.html>
//!
//! C header: [`include/linux/completion.h`](srctree/include/linux/completion.h)
use crate::{bindings, prelude::*, types::Opaque};
/// Synchronization primitive to signal when a certain task has been completed.
///
/// The [`Completion`] synchronization primitive signals when a certain task has been completed by
/// waking up other tasks that have been queued up to wait for the [`Completion`] to be completed.
///
/// # Examples
///
/// ```
/// use kernel::sync::{Arc, Completion};
/// use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
///
/// #[pin_data]
/// struct MyTask {
/// #[pin]
/// work: Work<MyTask>,
/// #[pin]
/// done: Completion,
/// }
///
/// impl_has_work! {
/// impl HasWork<Self> for MyTask { self.work }
/// }
///
/// impl MyTask {
/// fn new() -> Result<Arc<Self>> {
/// let this = Arc::pin_init(pin_init!(MyTask {
/// work <- new_work!("MyTask::work"),
/// done <- Completion::new(),
/// }), GFP_KERNEL)?;
///
/// let _ = workqueue::system().enqueue(this.clone());
///
/// Ok(this)
/// }
///
/// fn wait_for_completion(&self) {
/// self.done.wait_for_completion();
///
/// pr_info!("Completion: task complete\n");
/// }
/// }
///
/// impl WorkItem for MyTask {
/// type Pointer = Arc<MyTask>;
///
/// fn run(this: Arc<MyTask>) {
/// // process this task
/// this.done.complete_all();
/// }
/// }
///
/// let task = MyTask::new()?;
/// task.wait_for_completion();
/// # Ok::<(), Error>(())
/// ```
#[pin_data]
pub struct Completion {
#[pin]
inner: Opaque<bindings::completion>,
}
// SAFETY: `Completion` is safe to be send to any task.
unsafe impl Send for Completion {}
// SAFETY: `Completion` is safe to be accessed concurrently.
unsafe impl Sync for Completion {}
impl Completion {
/// Create an initializer for a new [`Completion`].
pub fn new() -> impl PinInit<Self> {
pin_init!(Self {
inner <- Opaque::ffi_init(|slot: *mut bindings::completion| {
// SAFETY: `slot` is a valid pointer to an uninitialized `struct completion`.
unsafe { bindings::init_completion(slot) };
}),
})
}
fn as_raw(&self) -> *mut bindings::completion {
self.inner.get()
}
/// Signal all tasks waiting on this completion.
///
/// This method wakes up all tasks waiting on this completion; after this operation the
/// completion is permanently done, i.e. signals all current and future waiters.
pub fn complete_all(&self) {
// SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
unsafe { bindings::complete_all(self.as_raw()) };
}
/// Wait for completion of a task.
///
/// This method waits for the completion of a task; it is not interruptible and there is no
/// timeout.
///
/// See also [`Completion::complete_all`].
pub fn wait_for_completion(&self) {
// SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
unsafe { bindings::wait_for_completion(self.as_raw()) };
}
}