mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge branch 'rework/misc-cleanups' into for-linus
This commit is contained in:
commit
f0f6923953
5 changed files with 156 additions and 73 deletions
|
@ -154,6 +154,8 @@ static inline int con_debug_leave(void)
|
||||||
* receiving the printk spam for obvious reasons.
|
* receiving the printk spam for obvious reasons.
|
||||||
* @CON_EXTENDED: The console supports the extended output format of
|
* @CON_EXTENDED: The console supports the extended output format of
|
||||||
* /dev/kmesg which requires a larger output buffer.
|
* /dev/kmesg which requires a larger output buffer.
|
||||||
|
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
|
||||||
|
* printing callbacks must not be called.
|
||||||
*/
|
*/
|
||||||
enum cons_flags {
|
enum cons_flags {
|
||||||
CON_PRINTBUFFER = BIT(0),
|
CON_PRINTBUFFER = BIT(0),
|
||||||
|
@ -163,6 +165,7 @@ enum cons_flags {
|
||||||
CON_ANYTIME = BIT(4),
|
CON_ANYTIME = BIT(4),
|
||||||
CON_BRL = BIT(5),
|
CON_BRL = BIT(5),
|
||||||
CON_EXTENDED = BIT(6),
|
CON_EXTENDED = BIT(6),
|
||||||
|
CON_SUSPENDED = BIT(7),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -576,6 +576,8 @@ static void kdb_msg_write(const char *msg, int msg_len)
|
||||||
continue;
|
continue;
|
||||||
if (c == dbg_io_ops->cons)
|
if (c == dbg_io_ops->cons)
|
||||||
continue;
|
continue;
|
||||||
|
if (!c->write)
|
||||||
|
continue;
|
||||||
/*
|
/*
|
||||||
* Set oops_in_progress to encourage the console drivers to
|
* Set oops_in_progress to encourage the console drivers to
|
||||||
* disregard their internal spin locks: in the current calling
|
* disregard their internal spin locks: in the current calling
|
||||||
|
|
|
@ -103,3 +103,5 @@ struct printk_message {
|
||||||
u64 seq;
|
u64 seq;
|
||||||
unsigned long dropped;
|
unsigned long dropped;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool other_cpu_in_panic(void);
|
||||||
|
|
|
@ -86,7 +86,7 @@ EXPORT_SYMBOL(oops_in_progress);
|
||||||
static DEFINE_MUTEX(console_mutex);
|
static DEFINE_MUTEX(console_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* console_sem protects updates to console->seq and console_suspended,
|
* console_sem protects updates to console->seq
|
||||||
* and also provides serialization for console printing.
|
* and also provides serialization for console printing.
|
||||||
*/
|
*/
|
||||||
static DEFINE_SEMAPHORE(console_sem);
|
static DEFINE_SEMAPHORE(console_sem);
|
||||||
|
@ -359,7 +359,7 @@ static bool panic_in_progress(void)
|
||||||
* paths in the console code where we end up in places I want
|
* paths in the console code where we end up in places I want
|
||||||
* locked without the console semaphore held).
|
* locked without the console semaphore held).
|
||||||
*/
|
*/
|
||||||
static int console_locked, console_suspended;
|
static int console_locked;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Array of consoles built from command line options (console=)
|
* Array of consoles built from command line options (console=)
|
||||||
|
@ -2308,7 +2308,11 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (in_sched)
|
||||||
|
defer_console_output();
|
||||||
|
else
|
||||||
wake_up_klogd();
|
wake_up_klogd();
|
||||||
|
|
||||||
return printed_len;
|
return printed_len;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vprintk_emit);
|
EXPORT_SYMBOL(vprintk_emit);
|
||||||
|
@ -2547,22 +2551,46 @@ MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to hig
|
||||||
*/
|
*/
|
||||||
void suspend_console(void)
|
void suspend_console(void)
|
||||||
{
|
{
|
||||||
|
struct console *con;
|
||||||
|
|
||||||
if (!console_suspend_enabled)
|
if (!console_suspend_enabled)
|
||||||
return;
|
return;
|
||||||
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
|
||||||
pr_flush(1000, true);
|
pr_flush(1000, true);
|
||||||
console_lock();
|
|
||||||
console_suspended = 1;
|
console_list_lock();
|
||||||
up_console_sem();
|
for_each_console(con)
|
||||||
|
console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
|
||||||
|
console_list_unlock();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that all SRCU list walks have completed. All printing
|
||||||
|
* contexts must be able to see that they are suspended so that it
|
||||||
|
* is guaranteed that all printing has stopped when this function
|
||||||
|
* completes.
|
||||||
|
*/
|
||||||
|
synchronize_srcu(&console_srcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void resume_console(void)
|
void resume_console(void)
|
||||||
{
|
{
|
||||||
|
struct console *con;
|
||||||
|
|
||||||
if (!console_suspend_enabled)
|
if (!console_suspend_enabled)
|
||||||
return;
|
return;
|
||||||
down_console_sem();
|
|
||||||
console_suspended = 0;
|
console_list_lock();
|
||||||
console_unlock();
|
for_each_console(con)
|
||||||
|
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
|
||||||
|
console_list_unlock();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that all SRCU list walks have completed. All printing
|
||||||
|
* contexts must be able to see they are no longer suspended so
|
||||||
|
* that they are guaranteed to wake up and resume printing.
|
||||||
|
*/
|
||||||
|
synchronize_srcu(&console_srcu);
|
||||||
|
|
||||||
pr_flush(1000, true);
|
pr_flush(1000, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2585,6 +2613,26 @@ static int console_cpu_notify(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return true if a panic is in progress on a remote CPU.
|
||||||
|
*
|
||||||
|
* On true, the local CPU should immediately release any printing resources
|
||||||
|
* that may be needed by the panic CPU.
|
||||||
|
*/
|
||||||
|
bool other_cpu_in_panic(void)
|
||||||
|
{
|
||||||
|
if (!panic_in_progress())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can use raw_smp_processor_id() here because it is impossible for
|
||||||
|
* the task to be migrated to the panic_cpu, or away from it. If
|
||||||
|
* panic_cpu has already been set, and we're not currently executing on
|
||||||
|
* that CPU, then we never will be.
|
||||||
|
*/
|
||||||
|
return atomic_read(&panic_cpu) != raw_smp_processor_id();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* console_lock - block the console subsystem from printing
|
* console_lock - block the console subsystem from printing
|
||||||
*
|
*
|
||||||
|
@ -2597,9 +2645,11 @@ void console_lock(void)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
|
/* On panic, the console_lock must be left to the panic cpu. */
|
||||||
|
while (other_cpu_in_panic())
|
||||||
|
msleep(1000);
|
||||||
|
|
||||||
down_console_sem();
|
down_console_sem();
|
||||||
if (console_suspended)
|
|
||||||
return;
|
|
||||||
console_locked = 1;
|
console_locked = 1;
|
||||||
console_may_schedule = 1;
|
console_may_schedule = 1;
|
||||||
}
|
}
|
||||||
|
@ -2615,12 +2665,11 @@ EXPORT_SYMBOL(console_lock);
|
||||||
*/
|
*/
|
||||||
int console_trylock(void)
|
int console_trylock(void)
|
||||||
{
|
{
|
||||||
|
/* On panic, the console_lock must be left to the panic cpu. */
|
||||||
|
if (other_cpu_in_panic())
|
||||||
|
return 0;
|
||||||
if (down_trylock_console_sem())
|
if (down_trylock_console_sem())
|
||||||
return 0;
|
return 0;
|
||||||
if (console_suspended) {
|
|
||||||
up_console_sem();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
console_locked = 1;
|
console_locked = 1;
|
||||||
console_may_schedule = 0;
|
console_may_schedule = 0;
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -2633,25 +2682,6 @@ int is_console_locked(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(is_console_locked);
|
EXPORT_SYMBOL(is_console_locked);
|
||||||
|
|
||||||
/*
|
|
||||||
* Return true when this CPU should unlock console_sem without pushing all
|
|
||||||
* messages to the console. This reduces the chance that the console is
|
|
||||||
* locked when the panic CPU tries to use it.
|
|
||||||
*/
|
|
||||||
static bool abandon_console_lock_in_panic(void)
|
|
||||||
{
|
|
||||||
if (!panic_in_progress())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We can use raw_smp_processor_id() here because it is impossible for
|
|
||||||
* the task to be migrated to the panic_cpu, or away from it. If
|
|
||||||
* panic_cpu has already been set, and we're not currently executing on
|
|
||||||
* that CPU, then we never will be.
|
|
||||||
*/
|
|
||||||
return atomic_read(&panic_cpu) != raw_smp_processor_id();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the given console is currently capable and allowed to print
|
* Check if the given console is currently capable and allowed to print
|
||||||
* records.
|
* records.
|
||||||
|
@ -2665,6 +2695,9 @@ static inline bool console_is_usable(struct console *con)
|
||||||
if (!(flags & CON_ENABLED))
|
if (!(flags & CON_ENABLED))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if ((flags & CON_SUSPENDED))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (!con->write)
|
if (!con->write)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -2948,7 +2981,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||||||
any_progress = true;
|
any_progress = true;
|
||||||
|
|
||||||
/* Allow panic_cpu to take over the consoles safely. */
|
/* Allow panic_cpu to take over the consoles safely. */
|
||||||
if (abandon_console_lock_in_panic())
|
if (other_cpu_in_panic())
|
||||||
goto abandon;
|
goto abandon;
|
||||||
|
|
||||||
if (do_cond_resched)
|
if (do_cond_resched)
|
||||||
|
@ -2983,11 +3016,6 @@ void console_unlock(void)
|
||||||
bool flushed;
|
bool flushed;
|
||||||
u64 next_seq;
|
u64 next_seq;
|
||||||
|
|
||||||
if (console_suspended) {
|
|
||||||
up_console_sem();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Console drivers are called with interrupts disabled, so
|
* Console drivers are called with interrupts disabled, so
|
||||||
* @console_may_schedule should be cleared before; however, we may
|
* @console_may_schedule should be cleared before; however, we may
|
||||||
|
@ -3045,9 +3073,27 @@ EXPORT_SYMBOL(console_conditional_schedule);
|
||||||
|
|
||||||
void console_unblank(void)
|
void console_unblank(void)
|
||||||
{
|
{
|
||||||
|
bool found_unblank = false;
|
||||||
struct console *c;
|
struct console *c;
|
||||||
int cookie;
|
int cookie;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First check if there are any consoles implementing the unblank()
|
||||||
|
* callback. If not, there is no reason to continue and take the
|
||||||
|
* console lock, which in particular can be dangerous if
|
||||||
|
* @oops_in_progress is set.
|
||||||
|
*/
|
||||||
|
cookie = console_srcu_read_lock();
|
||||||
|
for_each_console_srcu(c) {
|
||||||
|
if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) {
|
||||||
|
found_unblank = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console_srcu_read_unlock(cookie);
|
||||||
|
if (!found_unblank)
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stop console printing because the unblank() callback may
|
* Stop console printing because the unblank() callback may
|
||||||
* assume the console is not within its write() callback.
|
* assume the console is not within its write() callback.
|
||||||
|
@ -3056,6 +3102,16 @@ void console_unblank(void)
|
||||||
* In that case, attempt a trylock as best-effort.
|
* In that case, attempt a trylock as best-effort.
|
||||||
*/
|
*/
|
||||||
if (oops_in_progress) {
|
if (oops_in_progress) {
|
||||||
|
/* Semaphores are not NMI-safe. */
|
||||||
|
if (in_nmi())
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempting to trylock the console lock can deadlock
|
||||||
|
* if another CPU was stopped while modifying the
|
||||||
|
* semaphore. "Hope and pray" that this is not the
|
||||||
|
* current situation.
|
||||||
|
*/
|
||||||
if (down_trylock_console_sem() != 0)
|
if (down_trylock_console_sem() != 0)
|
||||||
return;
|
return;
|
||||||
} else
|
} else
|
||||||
|
@ -3085,14 +3141,24 @@ void console_unblank(void)
|
||||||
*/
|
*/
|
||||||
void console_flush_on_panic(enum con_flush_mode mode)
|
void console_flush_on_panic(enum con_flush_mode mode)
|
||||||
{
|
{
|
||||||
|
bool handover;
|
||||||
|
u64 next_seq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If someone else is holding the console lock, trylock will fail
|
* Ignore the console lock and flush out the messages. Attempting a
|
||||||
* and may_schedule may be set. Ignore and proceed to unlock so
|
* trylock would not be useful because:
|
||||||
* that messages are flushed out. As this can be called from any
|
*
|
||||||
* context and we don't want to get preempted while flushing,
|
* - if it is contended, it must be ignored anyway
|
||||||
* ensure may_schedule is cleared.
|
* - console_lock() and console_trylock() block and fail
|
||||||
|
* respectively in panic for non-panic CPUs
|
||||||
|
* - semaphores are not NMI-safe
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If another context is holding the console lock,
|
||||||
|
* @console_may_schedule might be set. Clear it so that
|
||||||
|
* this context does not call cond_resched() while flushing.
|
||||||
*/
|
*/
|
||||||
console_trylock();
|
|
||||||
console_may_schedule = 0;
|
console_may_schedule = 0;
|
||||||
|
|
||||||
if (mode == CONSOLE_REPLAY_ALL) {
|
if (mode == CONSOLE_REPLAY_ALL) {
|
||||||
|
@ -3105,15 +3171,15 @@ void console_flush_on_panic(enum con_flush_mode mode)
|
||||||
cookie = console_srcu_read_lock();
|
cookie = console_srcu_read_lock();
|
||||||
for_each_console_srcu(c) {
|
for_each_console_srcu(c) {
|
||||||
/*
|
/*
|
||||||
* If the above console_trylock() failed, this is an
|
* This is an unsynchronized assignment, but the
|
||||||
* unsynchronized assignment. But in that case, the
|
|
||||||
* kernel is in "hope and pray" mode anyway.
|
* kernel is in "hope and pray" mode anyway.
|
||||||
*/
|
*/
|
||||||
c->seq = seq;
|
c->seq = seq;
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
}
|
}
|
||||||
console_unlock();
|
|
||||||
|
console_flush_all(false, &next_seq, &handover);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3679,8 +3745,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hold the console_lock to guarantee safe access to
|
* Hold the console_lock to guarantee safe access to
|
||||||
* console->seq and to prevent changes to @console_suspended
|
* console->seq.
|
||||||
* until all consoles have been processed.
|
|
||||||
*/
|
*/
|
||||||
console_lock();
|
console_lock();
|
||||||
|
|
||||||
|
@ -3688,6 +3753,11 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||||||
for_each_console_srcu(c) {
|
for_each_console_srcu(c) {
|
||||||
if (con && con != c)
|
if (con && con != c)
|
||||||
continue;
|
continue;
|
||||||
|
/*
|
||||||
|
* If consoles are not usable, it cannot be expected
|
||||||
|
* that they make forward progress, so only increment
|
||||||
|
* @diff for usable consoles.
|
||||||
|
*/
|
||||||
if (!console_is_usable(c))
|
if (!console_is_usable(c))
|
||||||
continue;
|
continue;
|
||||||
printk_seq = c->seq;
|
printk_seq = c->seq;
|
||||||
|
@ -3696,18 +3766,12 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||||||
}
|
}
|
||||||
console_srcu_read_unlock(cookie);
|
console_srcu_read_unlock(cookie);
|
||||||
|
|
||||||
/*
|
if (diff != last_diff && reset_on_progress)
|
||||||
* If consoles are suspended, it cannot be expected that they
|
|
||||||
* make forward progress, so timeout immediately. @diff is
|
|
||||||
* still used to return a valid flush status.
|
|
||||||
*/
|
|
||||||
if (console_suspended)
|
|
||||||
remaining = 0;
|
|
||||||
else if (diff != last_diff && reset_on_progress)
|
|
||||||
remaining = timeout_ms;
|
remaining = timeout_ms;
|
||||||
|
|
||||||
console_unlock();
|
console_unlock();
|
||||||
|
|
||||||
|
/* Note: @diff is 0 if there are no usable consoles. */
|
||||||
if (diff == 0 || remaining == 0)
|
if (diff == 0 || remaining == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -3741,7 +3805,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||||||
* printer has been seen to make some forward progress.
|
* printer has been seen to make some forward progress.
|
||||||
*
|
*
|
||||||
* Context: Process context. May sleep while acquiring console lock.
|
* Context: Process context. May sleep while acquiring console lock.
|
||||||
* Return: true if all enabled printers are caught up.
|
* Return: true if all usable printers are caught up.
|
||||||
*/
|
*/
|
||||||
static bool pr_flush(int timeout_ms, bool reset_on_progress)
|
static bool pr_flush(int timeout_ms, bool reset_on_progress)
|
||||||
{
|
{
|
||||||
|
@ -3798,11 +3862,33 @@ static void __wake_up_klogd(int val)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* wake_up_klogd - Wake kernel logging daemon
|
||||||
|
*
|
||||||
|
* Use this function when new records have been added to the ringbuffer
|
||||||
|
* and the console printing of those records has already occurred or is
|
||||||
|
* known to be handled by some other context. This function will only
|
||||||
|
* wake the logging daemon.
|
||||||
|
*
|
||||||
|
* Context: Any context.
|
||||||
|
*/
|
||||||
void wake_up_klogd(void)
|
void wake_up_klogd(void)
|
||||||
{
|
{
|
||||||
__wake_up_klogd(PRINTK_PENDING_WAKEUP);
|
__wake_up_klogd(PRINTK_PENDING_WAKEUP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* defer_console_output - Wake kernel logging daemon and trigger
|
||||||
|
* console printing in a deferred context
|
||||||
|
*
|
||||||
|
* Use this function when new records have been added to the ringbuffer,
|
||||||
|
* this context is responsible for console printing those records, but
|
||||||
|
* the current context is not allowed to perform the console printing.
|
||||||
|
* Trigger an irq_work context to perform the console printing. This
|
||||||
|
* function also wakes the logging daemon.
|
||||||
|
*
|
||||||
|
* Context: Any context.
|
||||||
|
*/
|
||||||
void defer_console_output(void)
|
void defer_console_output(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -3819,12 +3905,7 @@ void printk_trigger_flush(void)
|
||||||
|
|
||||||
int vprintk_deferred(const char *fmt, va_list args)
|
int vprintk_deferred(const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
int r;
|
return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
|
||||||
|
|
||||||
r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
|
|
||||||
defer_console_output();
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int _printk_deferred(const char *fmt, ...)
|
int _printk_deferred(const char *fmt, ...)
|
||||||
|
|
|
@ -38,13 +38,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
|
||||||
* Use the main logbuf even in NMI. But avoid calling console
|
* Use the main logbuf even in NMI. But avoid calling console
|
||||||
* drivers that might have their own locks.
|
* drivers that might have their own locks.
|
||||||
*/
|
*/
|
||||||
if (this_cpu_read(printk_context) || in_nmi()) {
|
if (this_cpu_read(printk_context) || in_nmi())
|
||||||
int len;
|
return vprintk_deferred(fmt, args);
|
||||||
|
|
||||||
len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
|
|
||||||
defer_console_output();
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No obstacles. */
|
/* No obstacles. */
|
||||||
return vprintk_default(fmt, args);
|
return vprintk_default(fmt, args);
|
||||||
|
|
Loading…
Add table
Reference in a new issue