mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 16:25:34 +00:00

In addition to keeping the kernel's copy of zstd up to date, this update was requested by Intel to expose upstream's APIs that allow QAT to accelerate the LZ match finding stage of Zstd. This patch is imported from the upstream tag v1.5.7-kernel [0], which is signed with upstream's signing key EF8FE99528B52FFD [1]. It was imported from upstream using this command: export ZSTD=/path/to/repo/zstd/ export LINUX=/path/to/repo/linux/ cd "$ZSTD/contrib/linux-kernel" git checkout v1.5.7-kernel make import LINUX="$LINUX" This patch has been tested on x86-64, and has been boot tested with a zstd compressed kernel & initramfs on i386 and aarch64. I benchmarked the patch on x86-64 with gcc-14.2.1 on an Intel i9-9900K by measruing the performance of compressed filesystem reads and writes. Component, Level, Size delta, C. time delta, D. time delta Btrfs , 1, +0.00%, -6.1%, +1.4% Btrfs , 3, +0.00%, -9.8%, +3.0% Btrfs , 5, +0.00%, +1.7%, +1.4% Btrfs , 7, +0.00%, -1.9%, +2.7% Btrfs , 9, +0.00%, -3.4%, +3.7% Btrfs , 15, +0.00%, -0.3%, +3.6% SquashFS , 1, +0.00%, N/A, +1.9% The major changes that impact the kernel use cases for each version are: v1.5.7: https://github.com/facebook/zstd/releases/tag/v1.5.7 * Add zstd_compress_sequences_and_literals() for use by Intel's QAT driver to implement Zstd compression acceleration in the kernel. * Fix an underflow bug in 32-bit builds that can cause data corruption when processing more than 4GB of data with a single `ZSTD_CCtx` object, when an input crosses the 4GB boundry. I don't believe this impacts any current kernel use cases, because the `ZSTD_CCtx` is typically reconstructed between compressions. * Levels 1-4 see 5-10% compression speed improvements for inputs smaller than 128KB. v1.5.6: https://github.com/facebook/zstd/releases/tag/v1.5.6 * Improved compression ratio for the highest compression levels. I don't expect these see much use however, due to their slow speeds. v1.5.5: https://github.com/facebook/zstd/releases/tag/v1.5.5 * Fix a rare corruption bug that can trigger on levels 13 and above. * Improve compression speed of levels 5-11 on incompressible data. v1.5.4: https://github.com/facebook/zstd/releases/tag/v1.5.4 * Improve copmression speed of levels 5-11 on ARM. * Improve dictionary compression speed. Signed-off-by: Nick Terrell <terrelln@fb.com>
176 lines
6.9 KiB
C
176 lines
6.9 KiB
C
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
|
|
/* ******************************************************************
|
|
* hist : Histogram functions
|
|
* part of Finite State Entropy project
|
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
*
|
|
* You can contact the author at :
|
|
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
|
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
|
*
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
* in the COPYING file in the root directory of this source tree).
|
|
* You may select, at your option, one of the above-listed licenses.
|
|
****************************************************************** */
|
|
|
|
/* --- dependencies --- */
|
|
#include "../common/mem.h" /* U32, BYTE, etc. */
|
|
#include "../common/debug.h" /* assert, DEBUGLOG */
|
|
#include "../common/error_private.h" /* ERROR */
|
|
#include "hist.h"
|
|
|
|
|
|
/* --- Error management --- */
|
|
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
|
|
|
|
/*-**************************************************************
|
|
* Histogram functions
|
|
****************************************************************/
|
|
void HIST_add(unsigned* count, const void* src, size_t srcSize)
|
|
{
|
|
const BYTE* ip = (const BYTE*)src;
|
|
const BYTE* const end = ip + srcSize;
|
|
|
|
while (ip<end) {
|
|
count[*ip++]++;
|
|
}
|
|
}
|
|
|
|
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* src, size_t srcSize)
|
|
{
|
|
const BYTE* ip = (const BYTE*)src;
|
|
const BYTE* const end = ip + srcSize;
|
|
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
|
unsigned largestCount=0;
|
|
|
|
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
|
|
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
|
|
|
while (ip<end) {
|
|
assert(*ip <= maxSymbolValue);
|
|
count[*ip++]++;
|
|
}
|
|
|
|
while (!count[maxSymbolValue]) maxSymbolValue--;
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
|
|
{ U32 s;
|
|
for (s=0; s<=maxSymbolValue; s++)
|
|
if (count[s] > largestCount) largestCount = count[s];
|
|
}
|
|
|
|
return largestCount;
|
|
}
|
|
|
|
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
|
|
|
|
/* HIST_count_parallel_wksp() :
|
|
* store histogram into 4 intermediate tables, recombined at the end.
|
|
* this design makes better use of OoO cpus,
|
|
* and is noticeably faster when some values are heavily repeated.
|
|
* But it needs some additional workspace for intermediate tables.
|
|
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
|
|
* @return : largest histogram frequency,
|
|
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
|
|
static size_t HIST_count_parallel_wksp(
|
|
unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
HIST_checkInput_e check,
|
|
U32* const workSpace)
|
|
{
|
|
const BYTE* ip = (const BYTE*)source;
|
|
const BYTE* const iend = ip+sourceSize;
|
|
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
|
|
unsigned max=0;
|
|
U32* const Counting1 = workSpace;
|
|
U32* const Counting2 = Counting1 + 256;
|
|
U32* const Counting3 = Counting2 + 256;
|
|
U32* const Counting4 = Counting3 + 256;
|
|
|
|
/* safety checks */
|
|
assert(*maxSymbolValuePtr <= 255);
|
|
if (!sourceSize) {
|
|
ZSTD_memset(count, 0, countSize);
|
|
*maxSymbolValuePtr = 0;
|
|
return 0;
|
|
}
|
|
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
|
|
|
|
/* by stripes of 16 bytes */
|
|
{ U32 cached = MEM_read32(ip); ip += 4;
|
|
while (ip < iend-15) {
|
|
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
}
|
|
ip-=4;
|
|
}
|
|
|
|
/* finish last symbols */
|
|
while (ip<iend) Counting1[*ip++]++;
|
|
|
|
{ U32 s;
|
|
for (s=0; s<256; s++) {
|
|
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
|
if (Counting1[s] > max) max = Counting1[s];
|
|
} }
|
|
|
|
{ unsigned maxSymbolValue = 255;
|
|
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
|
|
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
|
|
}
|
|
return (size_t)max;
|
|
}
|
|
|
|
/* HIST_countFast_wksp() :
|
|
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
|
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
|
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
|
*/
|
|
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
void* workSpace, size_t workSpaceSize)
|
|
{
|
|
if (sourceSize < 1500) /* heuristic threshold */
|
|
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
|
|
}
|
|
|
|
/* HIST_count_wksp() :
|
|
* Same as HIST_count(), but using an externally provided scratch buffer.
|
|
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
|
|
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
void* workSpace, size_t workSpaceSize)
|
|
{
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
if (*maxSymbolValuePtr < 255)
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
|
|
*maxSymbolValuePtr = 255;
|
|
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
|
|
}
|
|
|