2019-04-30 14:42:39 -04:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-09-26 19:20:07 -04:00
|
|
|
/*
|
|
|
|
* t10_pi.c - Functions for generating and verifying T10 Protection
|
|
|
|
* Information.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/t10-pi.h>
|
2021-09-20 14:33:27 +02:00
|
|
|
#include <linux/blk-integrity.h>
|
2014-09-26 19:20:07 -04:00
|
|
|
#include <linux/crc-t10dif.h>
|
2022-03-03 12:13:11 -08:00
|
|
|
#include <linux/crc64.h>
|
2014-09-26 19:20:07 -04:00
|
|
|
#include <net/checksum.h>
|
2024-10-01 15:35:57 -04:00
|
|
|
#include <linux/unaligned.h>
|
2024-06-13 10:48:15 +02:00
|
|
|
#include "blk.h"
|
2014-09-26 19:20:07 -04:00
|
|
|
|
2024-06-26 06:59:38 +02:00
|
|
|
struct blk_integrity_iter {
|
|
|
|
void *prot_buf;
|
|
|
|
void *data_buf;
|
|
|
|
sector_t seed;
|
|
|
|
unsigned int data_size;
|
|
|
|
unsigned short interval;
|
|
|
|
const char *disk_name;
|
|
|
|
};
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
|
|
|
|
unsigned char csum_type)
|
2014-09-26 19:20:07 -04:00
|
|
|
{
|
2024-06-13 10:48:15 +02:00
|
|
|
if (csum_type == BLK_INTEGRITY_CSUM_IP)
|
|
|
|
return (__force __be16)ip_compute_csum(data, len);
|
|
|
|
return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
|
2014-09-26 19:20:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Type 1 and Type 2 protection use the same format: 16 bit guard tag,
|
|
|
|
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
|
|
|
|
* tag.
|
|
|
|
*/
|
2024-06-13 10:48:15 +02:00
|
|
|
static void t10_pi_generate(struct blk_integrity_iter *iter,
|
|
|
|
struct blk_integrity *bi)
|
2014-09-26 19:20:07 -04:00
|
|
|
{
|
2024-06-13 10:48:15 +02:00
|
|
|
u8 offset = bi->pi_offset;
|
2014-09-26 19:20:07 -04:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct t10_pi_tuple *pi = iter->prot_buf + offset;
|
2014-09-26 19:20:07 -04:00
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
|
|
|
|
bi->csum_type);
|
2024-02-01 18:31:25 +05:30
|
|
|
if (offset)
|
2024-06-13 10:48:15 +02:00
|
|
|
pi->guard_tag = t10_pi_csum(pi->guard_tag,
|
|
|
|
iter->prot_buf, offset, bi->csum_type);
|
2014-09-26 19:20:07 -04:00
|
|
|
pi->app_tag = 0;
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
if (bi->flags & BLK_INTEGRITY_REF_TAG)
|
2014-09-26 19:20:07 -04:00
|
|
|
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
|
|
|
|
else
|
|
|
|
pi->ref_tag = 0;
|
|
|
|
|
|
|
|
iter->data_buf += iter->interval;
|
2025-06-30 14:35:45 +05:30
|
|
|
iter->prot_buf += bi->metadata_size;
|
2014-09-26 19:20:07 -04:00
|
|
|
iter->seed++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-03 09:38:06 +02:00
|
|
|
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
|
2024-06-13 10:48:15 +02:00
|
|
|
struct blk_integrity *bi)
|
2014-09-26 19:20:07 -04:00
|
|
|
{
|
2024-06-13 10:48:15 +02:00
|
|
|
u8 offset = bi->pi_offset;
|
2014-09-26 19:20:07 -04:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct t10_pi_tuple *pi = iter->prot_buf + offset;
|
2014-09-26 19:20:07 -04:00
|
|
|
__be16 csum;
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
|
2017-06-29 11:31:12 -07:00
|
|
|
if (pi->app_tag == T10_PI_APP_ESCAPE)
|
2014-09-26 19:20:07 -04:00
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (be32_to_cpu(pi->ref_tag) !=
|
|
|
|
lower_32_bits(iter->seed)) {
|
|
|
|
pr_err("%s: ref tag error at location %llu " \
|
|
|
|
"(rcvd %u)\n", iter->disk_name,
|
|
|
|
(unsigned long long)
|
|
|
|
iter->seed, be32_to_cpu(pi->ref_tag));
|
2017-06-13 08:07:33 -07:00
|
|
|
return BLK_STS_PROTECTION;
|
2014-09-26 19:20:07 -04:00
|
|
|
}
|
2024-06-13 10:48:15 +02:00
|
|
|
} else {
|
2017-06-29 11:31:12 -07:00
|
|
|
if (pi->app_tag == T10_PI_APP_ESCAPE &&
|
|
|
|
pi->ref_tag == T10_PI_REF_ESCAPE)
|
2014-09-26 19:20:07 -04:00
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
csum = t10_pi_csum(0, iter->data_buf, iter->interval,
|
|
|
|
bi->csum_type);
|
2024-02-01 18:31:25 +05:30
|
|
|
if (offset)
|
2024-06-13 10:48:15 +02:00
|
|
|
csum = t10_pi_csum(csum, iter->prot_buf, offset,
|
|
|
|
bi->csum_type);
|
2014-09-26 19:20:07 -04:00
|
|
|
|
|
|
|
if (pi->guard_tag != csum) {
|
|
|
|
pr_err("%s: guard tag error at sector %llu " \
|
|
|
|
"(rcvd %04x, want %04x)\n", iter->disk_name,
|
|
|
|
(unsigned long long)iter->seed,
|
|
|
|
be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
|
2017-06-03 09:38:06 +02:00
|
|
|
return BLK_STS_PROTECTION;
|
2014-09-26 19:20:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
|
|
|
iter->data_buf += iter->interval;
|
2025-06-30 14:35:45 +05:30
|
|
|
iter->prot_buf += bi->metadata_size;
|
2014-09-26 19:20:07 -04:00
|
|
|
iter->seed++;
|
|
|
|
}
|
|
|
|
|
2017-06-03 09:38:06 +02:00
|
|
|
return BLK_STS_OK;
|
2014-09-26 19:20:07 -04:00
|
|
|
}
|
|
|
|
|
2018-07-30 00:15:32 +03:00
|
|
|
/**
|
2019-09-16 18:44:29 +03:00
|
|
|
* t10_pi_type1_prepare - prepare PI prior submitting request to device
|
2018-07-30 00:15:32 +03:00
|
|
|
* @rq: request with PI that should be prepared
|
|
|
|
*
|
|
|
|
* For Type 1/Type 2, the virtual start sector is the one that was
|
|
|
|
* originally submitted by the block layer for the ref_tag usage. Due to
|
|
|
|
* partitioning, MD/DM cloning, etc. the actual physical start sector is
|
|
|
|
* likely to be different. Remap protection information to match the
|
|
|
|
* physical LBA.
|
|
|
|
*/
|
2019-09-16 18:44:29 +03:00
|
|
|
static void t10_pi_type1_prepare(struct request *rq)
|
2018-07-30 00:15:32 +03:00
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2025-06-30 14:35:45 +05:30
|
|
|
const int tuple_sz = bi->metadata_size;
|
2018-07-30 00:15:32 +03:00
|
|
|
u32 ref_tag = t10_pi_ref_tag(rq);
|
2024-02-01 18:31:25 +05:30
|
|
|
u8 offset = bi->pi_offset;
|
2018-07-30 00:15:32 +03:00
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
__rq_for_each_bio(bio, rq) {
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
u32 virt = bip_get_seed(bip) & 0xffffffff;
|
|
|
|
struct bio_vec iv;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
|
|
|
|
/* Already remapped? */
|
|
|
|
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bip_for_each_vec(iv, bip, iter) {
|
|
|
|
unsigned int j;
|
2021-07-27 07:56:45 +02:00
|
|
|
void *p;
|
2018-07-30 00:15:32 +03:00
|
|
|
|
2021-07-27 07:56:45 +02:00
|
|
|
p = bvec_kmap_local(&iv);
|
2018-07-30 00:15:32 +03:00
|
|
|
for (j = 0; j < iv.bv_len; j += tuple_sz) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct t10_pi_tuple *pi = p + offset;
|
2018-07-30 00:15:32 +03:00
|
|
|
|
|
|
|
if (be32_to_cpu(pi->ref_tag) == virt)
|
|
|
|
pi->ref_tag = cpu_to_be32(ref_tag);
|
|
|
|
virt++;
|
|
|
|
ref_tag++;
|
|
|
|
p += tuple_sz;
|
|
|
|
}
|
2021-07-27 07:56:45 +02:00
|
|
|
kunmap_local(p);
|
2018-07-30 00:15:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-09-16 18:44:29 +03:00
|
|
|
* t10_pi_type1_complete - prepare PI prior returning request to the blk layer
|
2018-07-30 00:15:32 +03:00
|
|
|
* @rq: request with PI that should be prepared
|
2019-09-16 18:44:29 +03:00
|
|
|
* @nr_bytes: total bytes to prepare
|
2018-07-30 00:15:32 +03:00
|
|
|
*
|
|
|
|
* For Type 1/Type 2, the virtual start sector is the one that was
|
|
|
|
* originally submitted by the block layer for the ref_tag usage. Due to
|
|
|
|
* partitioning, MD/DM cloning, etc. the actual physical start sector is
|
|
|
|
* likely to be different. Since the physical start sector was submitted
|
|
|
|
* to the device, we should remap it back to virtual values expected by the
|
|
|
|
* block layer.
|
|
|
|
*/
|
2019-09-16 18:44:29 +03:00
|
|
|
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
|
2018-07-30 00:15:32 +03:00
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2024-02-01 18:31:25 +05:30
|
|
|
unsigned intervals = nr_bytes >> bi->interval_exp;
|
2025-06-30 14:35:45 +05:30
|
|
|
const int tuple_sz = bi->metadata_size;
|
2018-07-30 00:15:32 +03:00
|
|
|
u32 ref_tag = t10_pi_ref_tag(rq);
|
2024-02-01 18:31:25 +05:30
|
|
|
u8 offset = bi->pi_offset;
|
2018-07-30 00:15:32 +03:00
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
__rq_for_each_bio(bio, rq) {
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
u32 virt = bip_get_seed(bip) & 0xffffffff;
|
|
|
|
struct bio_vec iv;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
|
|
|
|
bip_for_each_vec(iv, bip, iter) {
|
|
|
|
unsigned int j;
|
2021-07-27 07:56:45 +02:00
|
|
|
void *p;
|
2018-07-30 00:15:32 +03:00
|
|
|
|
2021-07-27 07:56:45 +02:00
|
|
|
p = bvec_kmap_local(&iv);
|
2018-07-30 00:15:32 +03:00
|
|
|
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct t10_pi_tuple *pi = p + offset;
|
2018-07-30 00:15:32 +03:00
|
|
|
|
|
|
|
if (be32_to_cpu(pi->ref_tag) == ref_tag)
|
|
|
|
pi->ref_tag = cpu_to_be32(virt);
|
|
|
|
virt++;
|
|
|
|
ref_tag++;
|
|
|
|
intervals--;
|
|
|
|
p += tuple_sz;
|
|
|
|
}
|
2021-07-27 07:56:45 +02:00
|
|
|
kunmap_local(p);
|
2018-07-30 00:15:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-16 18:44:29 +03:00
|
|
|
|
2024-02-01 18:31:24 +05:30
|
|
|
static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
lib/crc64: rename CRC64-Rocksoft to CRC64-NVME
This CRC64 variant comes from the NVME NVM Command Set Specification
(https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0e-2024.07.29-Ratified.pdf).
The "Rocksoft Model CRC Algorithm", published in 1993 and available at
https://www.zlib.net/crc_v3.txt, is a generalized CRC algorithm that can
calculate any variant of CRC, given a list of parameters such as
polynomial, bit order, etc. It is not a CRC variant.
The NVME NVM Command Set Specification has a table that gives the
"Rocksoft Model Parameters" for the CRC variant it uses. When support
for this CRC variant was added to Linux, this table seems to have been
misinterpreted as naming the CRC variant the "Rocksoft" CRC. In fact,
the table names the CRC variant as the "NVM Express 64b CRC".
Most implementations of this CRC variant outside Linux have been calling
it CRC64-NVME. Therefore, update Linux to match.
While at it, remove the superfluous "update" from the function name, so
crc64_rocksoft_update() is now just crc64_nvme(), matching most of the
other CRC library functions.
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: "Martin K. Petersen" <martin.petersen@oracle.com>
Acked-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20250130035130.180676-4-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
2025-01-29 19:51:22 -08:00
|
|
|
return cpu_to_be64(crc64_nvme(crc, data, len));
|
2022-03-03 12:13:11 -08:00
|
|
|
}
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
|
|
|
|
struct blk_integrity *bi)
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
2024-06-13 10:48:15 +02:00
|
|
|
u8 offset = bi->pi_offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct crc64_pi_tuple *pi = iter->prot_buf + offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
|
2024-02-01 18:31:24 +05:30
|
|
|
pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
|
2024-02-01 18:31:25 +05:30
|
|
|
if (offset)
|
|
|
|
pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag),
|
|
|
|
iter->prot_buf, offset);
|
2022-03-03 12:13:11 -08:00
|
|
|
pi->app_tag = 0;
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
if (bi->flags & BLK_INTEGRITY_REF_TAG)
|
2022-03-03 12:13:11 -08:00
|
|
|
put_unaligned_be48(iter->seed, pi->ref_tag);
|
|
|
|
else
|
|
|
|
put_unaligned_be48(0ULL, pi->ref_tag);
|
|
|
|
|
|
|
|
iter->data_buf += iter->interval;
|
2025-06-30 14:35:45 +05:30
|
|
|
iter->prot_buf += bi->metadata_size;
|
2022-03-03 12:13:11 -08:00
|
|
|
iter->seed++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-12 21:12:10 +03:00
|
|
|
static bool ext_pi_ref_escape(const u8 ref_tag[6])
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
2024-08-12 21:12:10 +03:00
|
|
|
static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
2022-03-03 12:13:11 -08:00
|
|
|
|
|
|
|
return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
|
2024-06-13 10:48:15 +02:00
|
|
|
struct blk_integrity *bi)
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
2024-06-13 10:48:15 +02:00
|
|
|
u8 offset = bi->pi_offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < iter->data_size; i += iter->interval) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct crc64_pi_tuple *pi = iter->prot_buf + offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
u64 ref, seed;
|
|
|
|
__be64 csum;
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
|
2022-03-03 12:13:11 -08:00
|
|
|
if (pi->app_tag == T10_PI_APP_ESCAPE)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
ref = get_unaligned_be48(pi->ref_tag);
|
|
|
|
seed = lower_48_bits(iter->seed);
|
|
|
|
if (ref != seed) {
|
|
|
|
pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
|
|
|
|
iter->disk_name, seed, ref);
|
|
|
|
return BLK_STS_PROTECTION;
|
|
|
|
}
|
2024-06-13 10:48:15 +02:00
|
|
|
} else {
|
2022-03-03 12:13:11 -08:00
|
|
|
if (pi->app_tag == T10_PI_APP_ESCAPE &&
|
|
|
|
ext_pi_ref_escape(pi->ref_tag))
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2024-02-01 18:31:24 +05:30
|
|
|
csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
|
2024-02-01 18:31:25 +05:30
|
|
|
if (offset)
|
|
|
|
csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
|
|
|
|
offset);
|
|
|
|
|
2022-03-03 12:13:11 -08:00
|
|
|
if (pi->guard_tag != csum) {
|
|
|
|
pr_err("%s: guard tag error at sector %llu " \
|
|
|
|
"(rcvd %016llx, want %016llx)\n",
|
|
|
|
iter->disk_name, (unsigned long long)iter->seed,
|
|
|
|
be64_to_cpu(pi->guard_tag), be64_to_cpu(csum));
|
|
|
|
return BLK_STS_PROTECTION;
|
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
|
|
|
iter->data_buf += iter->interval;
|
2025-06-30 14:35:45 +05:30
|
|
|
iter->prot_buf += bi->metadata_size;
|
2022-03-03 12:13:11 -08:00
|
|
|
iter->seed++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return BLK_STS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ext_pi_type1_prepare(struct request *rq)
|
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2025-06-30 14:35:45 +05:30
|
|
|
const int tuple_sz = bi->metadata_size;
|
2022-03-03 12:13:11 -08:00
|
|
|
u64 ref_tag = ext_pi_ref_tag(rq);
|
2024-02-01 18:31:25 +05:30
|
|
|
u8 offset = bi->pi_offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
__rq_for_each_bio(bio, rq) {
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
u64 virt = lower_48_bits(bip_get_seed(bip));
|
|
|
|
struct bio_vec iv;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
|
|
|
|
/* Already remapped? */
|
|
|
|
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bip_for_each_vec(iv, bip, iter) {
|
|
|
|
unsigned int j;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
p = bvec_kmap_local(&iv);
|
|
|
|
for (j = 0; j < iv.bv_len; j += tuple_sz) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct crc64_pi_tuple *pi = p + offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
u64 ref = get_unaligned_be48(pi->ref_tag);
|
|
|
|
|
|
|
|
if (ref == virt)
|
|
|
|
put_unaligned_be48(ref_tag, pi->ref_tag);
|
|
|
|
virt++;
|
|
|
|
ref_tag++;
|
|
|
|
p += tuple_sz;
|
|
|
|
}
|
|
|
|
kunmap_local(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
|
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2024-02-01 18:31:25 +05:30
|
|
|
unsigned intervals = nr_bytes >> bi->interval_exp;
|
2025-06-30 14:35:45 +05:30
|
|
|
const int tuple_sz = bi->metadata_size;
|
2022-03-03 12:13:11 -08:00
|
|
|
u64 ref_tag = ext_pi_ref_tag(rq);
|
2024-02-01 18:31:25 +05:30
|
|
|
u8 offset = bi->pi_offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
__rq_for_each_bio(bio, rq) {
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
u64 virt = lower_48_bits(bip_get_seed(bip));
|
|
|
|
struct bio_vec iv;
|
|
|
|
struct bvec_iter iter;
|
|
|
|
|
|
|
|
bip_for_each_vec(iv, bip, iter) {
|
|
|
|
unsigned int j;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
p = bvec_kmap_local(&iv);
|
|
|
|
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
|
2024-02-01 18:31:25 +05:30
|
|
|
struct crc64_pi_tuple *pi = p + offset;
|
2022-03-03 12:13:11 -08:00
|
|
|
u64 ref = get_unaligned_be48(pi->ref_tag);
|
|
|
|
|
|
|
|
if (ref == ref_tag)
|
|
|
|
put_unaligned_be48(virt, pi->ref_tag);
|
|
|
|
virt++;
|
|
|
|
ref_tag++;
|
|
|
|
intervals--;
|
|
|
|
p += tuple_sz;
|
|
|
|
}
|
|
|
|
kunmap_local(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-26 06:59:38 +02:00
|
|
|
void blk_integrity_generate(struct bio *bio)
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
2024-06-26 06:59:38 +02:00
|
|
|
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
struct blk_integrity_iter iter;
|
|
|
|
struct bvec_iter bviter;
|
|
|
|
struct bio_vec bv;
|
|
|
|
|
|
|
|
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
|
|
|
|
iter.interval = 1 << bi->interval_exp;
|
|
|
|
iter.seed = bio->bi_iter.bi_sector;
|
|
|
|
iter.prot_buf = bvec_virt(bip->bip_vec);
|
|
|
|
bio_for_each_segment(bv, bio, bviter) {
|
|
|
|
void *kaddr = bvec_kmap_local(&bv);
|
|
|
|
|
|
|
|
iter.data_buf = kaddr;
|
|
|
|
iter.data_size = bv.bv_len;
|
|
|
|
switch (bi->csum_type) {
|
|
|
|
case BLK_INTEGRITY_CSUM_CRC64:
|
|
|
|
ext_pi_crc64_generate(&iter, bi);
|
|
|
|
break;
|
|
|
|
case BLK_INTEGRITY_CSUM_CRC:
|
|
|
|
case BLK_INTEGRITY_CSUM_IP:
|
|
|
|
t10_pi_generate(&iter, bi);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kunmap_local(kaddr);
|
2024-06-13 10:48:15 +02:00
|
|
|
}
|
2022-03-03 12:13:11 -08:00
|
|
|
}
|
|
|
|
|
2025-02-25 07:44:33 -08:00
|
|
|
void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter)
|
2022-03-03 12:13:11 -08:00
|
|
|
{
|
2024-06-26 06:59:38 +02:00
|
|
|
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
|
|
struct blk_integrity_iter iter;
|
|
|
|
struct bvec_iter bviter;
|
|
|
|
struct bio_vec bv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At the moment verify is called bi_iter has been advanced during split
|
|
|
|
* and completion, so use the copy created during submission here.
|
|
|
|
*/
|
|
|
|
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
|
|
|
|
iter.interval = 1 << bi->interval_exp;
|
2025-02-25 07:44:33 -08:00
|
|
|
iter.seed = saved_iter->bi_sector;
|
2024-06-26 06:59:38 +02:00
|
|
|
iter.prot_buf = bvec_virt(bip->bip_vec);
|
2025-02-25 07:44:33 -08:00
|
|
|
__bio_for_each_segment(bv, bio, bviter, *saved_iter) {
|
2024-06-26 06:59:38 +02:00
|
|
|
void *kaddr = bvec_kmap_local(&bv);
|
|
|
|
blk_status_t ret = BLK_STS_OK;
|
|
|
|
|
|
|
|
iter.data_buf = kaddr;
|
|
|
|
iter.data_size = bv.bv_len;
|
|
|
|
switch (bi->csum_type) {
|
|
|
|
case BLK_INTEGRITY_CSUM_CRC64:
|
|
|
|
ret = ext_pi_crc64_verify(&iter, bi);
|
|
|
|
break;
|
|
|
|
case BLK_INTEGRITY_CSUM_CRC:
|
|
|
|
case BLK_INTEGRITY_CSUM_IP:
|
|
|
|
ret = t10_pi_verify(&iter, bi);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kunmap_local(kaddr);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
bio->bi_status = ret;
|
|
|
|
return;
|
|
|
|
}
|
2024-06-13 10:48:15 +02:00
|
|
|
}
|
2022-03-03 12:13:11 -08:00
|
|
|
}
|
|
|
|
|
2024-06-13 10:48:15 +02:00
|
|
|
void blk_integrity_prepare(struct request *rq)
|
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2024-06-13 10:48:15 +02:00
|
|
|
|
|
|
|
if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
|
|
|
|
ext_pi_type1_prepare(rq);
|
|
|
|
else
|
|
|
|
t10_pi_type1_prepare(rq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
|
|
|
|
{
|
2024-06-13 10:48:22 +02:00
|
|
|
struct blk_integrity *bi = &rq->q->limits.integrity;
|
2024-06-13 10:48:15 +02:00
|
|
|
|
|
|
|
if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
|
|
|
|
ext_pi_type1_complete(rq, nr_bytes);
|
|
|
|
else
|
|
|
|
t10_pi_type1_complete(rq, nr_bytes);
|
|
|
|
}
|