mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
sunrpc: implement rfc2203 rpcsec_gss seqnum cache
This implements a sequence number cache of the last three (right now hardcoded) sent sequence numbers for a given XID, as suggested by the RFC. From RFC2203 5.3.3.1: "Note that the sequence number algorithm requires that the client increment the sequence number even if it is retrying a request with the same RPC transaction identifier. It is not infrequent for clients to get into a situation where they send two or more attempts and a slow server sends the reply for the first attempt. With RPCSEC_GSS, each request and reply will have a unique sequence number. If the client wishes to improve turn around time on the RPC call, it can cache the RPCSEC_GSS sequence number of each request it sends. Then when it receives a response with a matching RPC transaction identifier, it can compute the checksum of each sequence number in the cache to try to match the checksum in the reply's verifier." Signed-off-by: Nikhil Jha <njha@janestreet.com> Acked-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
This commit is contained in:
parent
a5806cd506
commit
08d6ee6d8a
5 changed files with 57 additions and 28 deletions
|
@ -30,6 +30,8 @@
|
|||
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
|
||||
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
|
||||
|
||||
#define RPC_GSS_SEQNO_ARRAY_SIZE 3U
|
||||
|
||||
enum rpc_display_format_t {
|
||||
RPC_DISPLAY_ADDR = 0,
|
||||
RPC_DISPLAY_PORT,
|
||||
|
@ -66,7 +68,8 @@ struct rpc_rqst {
|
|||
struct rpc_cred * rq_cred; /* Bound cred */
|
||||
__be32 rq_xid; /* request XID */
|
||||
int rq_cong; /* has incremented xprt->cong */
|
||||
u32 rq_seqno; /* gss seq no. used on req. */
|
||||
u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */
|
||||
unsigned int rq_seqno_count; /* number of entries in rq_seqnos */
|
||||
int rq_enc_pages_num;
|
||||
struct page **rq_enc_pages; /* scratch pages for use by
|
||||
gss privacy code */
|
||||
|
@ -119,6 +122,18 @@ struct rpc_rqst {
|
|||
#define rq_svec rq_snd_buf.head
|
||||
#define rq_slen rq_snd_buf.len
|
||||
|
||||
static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno)
|
||||
{
|
||||
if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE))
|
||||
req->rq_seqno_count++;
|
||||
|
||||
/* Shift array to make room for the newest element at the beginning */
|
||||
memmove(&req->rq_seqnos[1], &req->rq_seqnos[0],
|
||||
(RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0]));
|
||||
req->rq_seqnos[0] = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* RPC transport layer security policies */
|
||||
enum xprtsec_policies {
|
||||
RPC_XPRTSEC_NONE = 0,
|
||||
|
|
|
@ -409,7 +409,7 @@ TRACE_EVENT(rpcgss_seqno,
|
|||
__entry->task_id = task->tk_pid;
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->seqno = rqst->rq_seqno;
|
||||
__entry->seqno = *rqst->rq_seqnos;
|
||||
),
|
||||
|
||||
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
|
||||
|
@ -440,7 +440,7 @@ TRACE_EVENT(rpcgss_need_reencode,
|
|||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
|
||||
__entry->seq_xmit = seq_xmit;
|
||||
__entry->seqno = task->tk_rqstp->rq_seqno;
|
||||
__entry->seqno = *task->tk_rqstp->rq_seqnos;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
|
|
|
@ -1100,7 +1100,7 @@ TRACE_EVENT(xprt_transmit,
|
|||
__entry->client_id = rqst->rq_task->tk_client ?
|
||||
rqst->rq_task->tk_client->cl_clid : -1;
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->seqno = rqst->rq_seqno;
|
||||
__entry->seqno = *rqst->rq_seqnos;
|
||||
__entry->status = status;
|
||||
),
|
||||
|
||||
|
|
|
@ -1545,6 +1545,7 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
struct kvec iov;
|
||||
struct xdr_buf verf_buf;
|
||||
int status;
|
||||
u32 seqno;
|
||||
|
||||
/* Credential */
|
||||
|
||||
|
@ -1556,15 +1557,16 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
cred_len = p++;
|
||||
|
||||
spin_lock(&ctx->gc_seq_lock);
|
||||
req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
|
||||
seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
|
||||
xprt_rqst_add_seqno(req, seqno);
|
||||
spin_unlock(&ctx->gc_seq_lock);
|
||||
if (req->rq_seqno == MAXSEQ)
|
||||
if (*req->rq_seqnos == MAXSEQ)
|
||||
goto expired;
|
||||
trace_rpcgss_seqno(task);
|
||||
|
||||
*p++ = cpu_to_be32(RPC_GSS_VERSION);
|
||||
*p++ = cpu_to_be32(ctx->gc_proc);
|
||||
*p++ = cpu_to_be32(req->rq_seqno);
|
||||
*p++ = cpu_to_be32(*req->rq_seqnos);
|
||||
*p++ = cpu_to_be32(gss_cred->gc_service);
|
||||
p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
|
||||
*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
|
||||
|
@ -1678,17 +1680,31 @@ gss_refresh_null(struct rpc_task *task)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_validate_seqno_mic(struct gss_cl_ctx *ctx, u32 seqno, __be32 *seq, __be32 *p, u32 len)
|
||||
{
|
||||
struct kvec iov;
|
||||
struct xdr_buf verf_buf;
|
||||
struct xdr_netobj mic;
|
||||
|
||||
*seq = cpu_to_be32(seqno);
|
||||
iov.iov_base = seq;
|
||||
iov.iov_len = 4;
|
||||
xdr_buf_from_iov(&iov, &verf_buf);
|
||||
mic.data = (u8 *)p;
|
||||
mic.len = len;
|
||||
return gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
|
||||
}
|
||||
|
||||
static int
|
||||
gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
|
||||
{
|
||||
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||
__be32 *p, *seq = NULL;
|
||||
struct kvec iov;
|
||||
struct xdr_buf verf_buf;
|
||||
struct xdr_netobj mic;
|
||||
u32 len, maj_stat;
|
||||
int status;
|
||||
int i = 1; /* don't recheck the first item */
|
||||
|
||||
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
|
||||
if (!p)
|
||||
|
@ -1705,13 +1721,10 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
seq = kmalloc(4, GFP_KERNEL);
|
||||
if (!seq)
|
||||
goto validate_failed;
|
||||
*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
|
||||
iov.iov_base = seq;
|
||||
iov.iov_len = 4;
|
||||
xdr_buf_from_iov(&iov, &verf_buf);
|
||||
mic.data = (u8 *)p;
|
||||
mic.len = len;
|
||||
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
|
||||
maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len);
|
||||
/* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */
|
||||
while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count))
|
||||
maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i], seq, p, len);
|
||||
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
||||
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
||||
if (maj_stat)
|
||||
|
@ -1750,7 +1763,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|||
if (!p)
|
||||
goto wrap_failed;
|
||||
integ_len = p++;
|
||||
*p = cpu_to_be32(rqstp->rq_seqno);
|
||||
*p = cpu_to_be32(*rqstp->rq_seqnos);
|
||||
|
||||
if (rpcauth_wrap_req_encode(task, xdr))
|
||||
goto wrap_failed;
|
||||
|
@ -1847,7 +1860,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|||
if (!p)
|
||||
goto wrap_failed;
|
||||
opaque_len = p++;
|
||||
*p = cpu_to_be32(rqstp->rq_seqno);
|
||||
*p = cpu_to_be32(*rqstp->rq_seqnos);
|
||||
|
||||
if (rpcauth_wrap_req_encode(task, xdr))
|
||||
goto wrap_failed;
|
||||
|
@ -2001,7 +2014,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
|
|||
offset = rcv_buf->len - xdr_stream_remaining(xdr);
|
||||
if (xdr_stream_decode_u32(xdr, &seqno))
|
||||
goto unwrap_failed;
|
||||
if (seqno != rqstp->rq_seqno)
|
||||
if (seqno != *rqstp->rq_seqnos)
|
||||
goto bad_seqno;
|
||||
if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
|
||||
goto unwrap_failed;
|
||||
|
@ -2045,7 +2058,7 @@ unwrap_failed:
|
|||
trace_rpcgss_unwrap_failed(task);
|
||||
goto out;
|
||||
bad_seqno:
|
||||
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
|
||||
trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, seqno);
|
||||
goto out;
|
||||
bad_mic:
|
||||
trace_rpcgss_verify_mic(task, maj_stat);
|
||||
|
@ -2077,7 +2090,7 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
if (maj_stat != GSS_S_COMPLETE)
|
||||
goto bad_unwrap;
|
||||
/* gss_unwrap decrypted the sequence number */
|
||||
if (be32_to_cpup(p++) != rqstp->rq_seqno)
|
||||
if (be32_to_cpup(p++) != *rqstp->rq_seqnos)
|
||||
goto bad_seqno;
|
||||
|
||||
/* gss_unwrap redacts the opaque blob from the head iovec.
|
||||
|
@ -2093,7 +2106,7 @@ unwrap_failed:
|
|||
trace_rpcgss_unwrap_failed(task);
|
||||
return -EIO;
|
||||
bad_seqno:
|
||||
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
|
||||
trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, be32_to_cpup(--p));
|
||||
return -EIO;
|
||||
bad_unwrap:
|
||||
trace_rpcgss_unwrap(task, maj_stat);
|
||||
|
@ -2118,14 +2131,14 @@ gss_xmit_need_reencode(struct rpc_task *task)
|
|||
if (!ctx)
|
||||
goto out;
|
||||
|
||||
if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
|
||||
if (gss_seq_is_newer(*req->rq_seqnos, READ_ONCE(ctx->gc_seq)))
|
||||
goto out_ctx;
|
||||
|
||||
seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
|
||||
while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
|
||||
while (gss_seq_is_newer(*req->rq_seqnos, seq_xmit)) {
|
||||
u32 tmp = seq_xmit;
|
||||
|
||||
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
|
||||
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, *req->rq_seqnos);
|
||||
if (seq_xmit == tmp) {
|
||||
ret = false;
|
||||
goto out_ctx;
|
||||
|
@ -2134,7 +2147,7 @@ gss_xmit_need_reencode(struct rpc_task *task)
|
|||
|
||||
win = ctx->gc_win;
|
||||
if (win > 0)
|
||||
ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
|
||||
ret = !gss_seq_is_newer(*req->rq_seqnos, seq_xmit - win);
|
||||
|
||||
out_ctx:
|
||||
gss_put_ctx(ctx);
|
||||
|
|
|
@ -1365,7 +1365,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
|
|||
INIT_LIST_HEAD(&req->rq_xmit2);
|
||||
goto out;
|
||||
}
|
||||
} else if (!req->rq_seqno) {
|
||||
} else if (req->rq_seqno_count == 0) {
|
||||
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
|
||||
if (pos->rq_task->tk_owner != task->tk_owner)
|
||||
continue;
|
||||
|
@ -1898,6 +1898,7 @@ xprt_request_init(struct rpc_task *task)
|
|||
req->rq_snd_buf.bvec = NULL;
|
||||
req->rq_rcv_buf.bvec = NULL;
|
||||
req->rq_release_snd_buf = NULL;
|
||||
req->rq_seqno_count = 0;
|
||||
xprt_init_majortimeo(task, req, task->tk_client->cl_timeout);
|
||||
|
||||
trace_xprt_reserve(req);
|
||||
|
|
Loading…
Add table
Reference in a new issue