linux/fs/xfs/xfs_message.c

173 lines
3.2 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011 Red Hat, Inc. All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_error.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
/*
* XFS logging functions
*/
static void
__xfs_printk(
const char *level,
const struct xfs_mount *mp,
struct va_format *vaf)
{
if (mp && mp->m_super) {
printk("%sXFS (%s): %pV\n", level, mp->m_super->s_id, vaf);
return;
}
printk("%sXFS: %pV\n", level, vaf);
}
void
xfs_printk_level(
const char *kern_level,
const struct xfs_mount *mp,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int level;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
__xfs_printk(kern_level, mp, &vaf);
va_end(args);
if (!kstrtoint(kern_level, 0, &level) &&
level <= LOGLEVEL_ERR &&
xfs_error_level >= XFS_ERRLEVEL_HIGH)
xfs_stack_trace();
}
void
_xfs_alert_tag(
const struct xfs_mount *mp,
uint32_t panic_tag,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int do_panic = 0;
if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
xfs_alert(mp, "Transforming an alert into a BUG.");
do_panic = 1;
}
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
__xfs_printk(KERN_ALERT, mp, &vaf);
va_end(args);
BUG_ON(do_panic);
}
void
asswarn(
struct xfs_mount *mp,
char *expr,
char *file,
int line)
{
xfs_warn(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
WARN_ON(1);
}
void
assfail(
struct xfs_mount *mp,
char *expr,
char *file,
int line)
{
xfs_emerg(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
if (xfs_globals.bug_on_assert)
BUG();
else
WARN_ON(1);
}
void
xfs_hex_dump(const void *p, int length)
{
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
void
xfs_buf_alert_ratelimited(
struct xfs_buf *bp,
const char *rlmsg,
const char *fmt,
...)
{
struct xfs_mount *mp = bp->b_mount;
struct va_format vaf;
va_list args;
/* use the more aggressive per-target rate limit for buffers */
if (!___ratelimit(&bp->b_target->bt_ioerror_rl, rlmsg))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
__xfs_printk(KERN_ALERT, mp, &vaf);
va_end(args);
}
void
xfs_warn_experimental(
struct xfs_mount *mp,
enum xfs_experimental_feat feat)
{
static const struct {
const char *name;
long opstate;
} features[] = {
[XFS_EXPERIMENTAL_SHRINK] = {
.opstate = XFS_OPSTATE_WARNED_SHRINK,
.name = "online shrink",
},
[XFS_EXPERIMENTAL_LARP] = {
.opstate = XFS_OPSTATE_WARNED_LARP,
.name = "logged extended attributes",
},
[XFS_EXPERIMENTAL_LBS] = {
.opstate = XFS_OPSTATE_WARNED_LBS,
.name = "large block size",
},
[XFS_EXPERIMENTAL_METADIR] = {
.opstate = XFS_OPSTATE_WARNED_METADIR,
.name = "metadata directory tree",
},
xfs: define the zoned on-disk format Zone file systems reuse the basic RT group enabled XFS file system structure to support a mode where each RT group is always written from start to end and then reset for reuse (after moving out any remaining data). There are few minor but important changes, which are indicated by a new incompat flag: 1) there are no bitmap and summary inodes, thus the /rtgroups/{rgno}.{bitmap,summary} metadir files do not exist and the sb_rbmblocks superblock field must be cleared to zero. 2) there is a new superblock field that specifies the start of an internal RT section. This allows supporting SMR HDDs that have random writable space at the beginning which is used for the XFS data device (which really is the metadata device for this configuration), directly followed by a RT device on the same block device. While something similar could be achieved using dm-linear just having a single device directly consumed by XFS makes handling the file systems a lot easier. 3) Another superblock field that tracks the amount of reserved space (or overprovisioning) that is never used for user capacity, but allows GC to run more smoothly. 4) an overlay of the cowextsize field for the rtrmap inode so that we can persistently track the total amount of rtblocks currently used in a RT group. There is no data structure other than the rmap that tracks used space in an RT group, and this counter is used to decide when a RT group has been entirely emptied, and to select one that is relatively empty if garbage collection needs to be performed. While this counter could be tracked entirely in memory and rebuilt from the rmap at mount time, that would lead to very long mount times with the large number of RT groups implied by the number of hardware zones especially on SMR hard drives with 256MB zone sizes. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
2024-11-30 04:48:33 +01:00
[XFS_EXPERIMENTAL_ZONED] = {
.opstate = XFS_OPSTATE_WARNED_ZONED,
.name = "zoned RT device",
},
};
ASSERT(feat >= 0 && feat < XFS_EXPERIMENTAL_MAX);
BUILD_BUG_ON(ARRAY_SIZE(features) != XFS_EXPERIMENTAL_MAX);
if (xfs_should_warn(mp, features[feat].opstate))
xfs_warn(mp,
"EXPERIMENTAL %s feature enabled. Use at your own risk!",
features[feat].name);
}