forked from luck/tmp_suning_uos_patched
Fixes:
- Resolve a data integrity problem with NFSD that I inadvertently
introduced last year. The change I made makes the NFS server's
duplicate reply cache ineffective when krb5i or krb5p are in use,
thus allowing the replay of non-idempotent NFS requests such as
RENAME, SETATTR, or even WRITEs.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQIcBAABAgAGBQJerCuyAAoJEDNqszNvZn+XxvAQAJmUW5412OO7mkI2IW5PDP71
ZnBAuTs4UpLBgp1VpS3ai0LYnOX9o8WLqolzGuFxGfK69ZZdh7U7fzX2aEytoTSP
KkW3dNo+NzRppWOhMBEfMBLnAu22YF+F689RvwEqd0C1AgGugaFfzlF1ECrJVpA7
g1WVhTi0ihfArhzSWTWO4LiuwjRd5TNF8gEci2j3DuHn1Hp6BagbKOv0rFdgK99X
BbK8IaEalBUjtpGAPgRU/WY/WznzhgARVeOX7Rh/P/zFdFB1G1M4kycaadBk6uaU
SHbdWBwDsYatDNuhZUI3Wv2g+DQ5LJRrjNNesLRot+kC3XD12sBCMsSI3owoz7Jt
u0s48YmOJO8uWi4kDenR9XV8bAaDmX7R/+XGZm1lethNrpBKat9EIrqSHNvqAXZ4
b3cC8/A/aCcOrWXtZnWqvJdqjx2EgL6DbcpaFheaPEekRofuiyOaAbXdlJQvzcwY
Sv4EC4ymABpQRg0si+Sya5Int7bZ9ryLZTSCMiLA+L1TnoW26XjMlGAaRqYi7Tx7
Qg4Bt400IIDE0FlE/76vE7b7YWQj7GfErA6moIyDio5AInRU9sHDFyB8iCfdpKxh
ajNl1NuEO/FSoXOGQvOo1uHD0vKvNVK21T6vQsRCT1f6JXtpiwTn6eLX4Wn9YLdI
iKqg2YXfdCbJnAuoxzGi
=hT3x
-----END PGP SIGNATURE-----
Merge tag 'nfsd-5.7-rc-2' of git://git.linux-nfs.org/projects/cel/cel-2.6
Pull nfsd fixes from Chuck Lever:
"Resolve a data integrity problem with NFSD that I inadvertently
introduced last year.
The change I made makes the NFS server's duplicate reply cache
ineffective when krb5i or krb5p are in use, thus allowing the replay
of non-idempotent NFS requests such as RENAME, SETATTR, or even
WRITEs"
* tag 'nfsd-5.7-rc-2' of git://git.linux-nfs.org/projects/cel/cel-2.6:
SUNRPC: Revert 241b1f419f
("SUNRPC: Remove xdr_buf_trim()")
SUNRPC: Fix GSS privacy computation of auth->au_ralign
SUNRPC: Add "@len" parameter to gss_unwrap()
This commit is contained in:
commit
152036d137
|
@ -21,6 +21,7 @@
|
|||
struct gss_ctx {
|
||||
struct gss_api_mech *mech_type;
|
||||
void *internal_ctx_id;
|
||||
unsigned int slack, align;
|
||||
};
|
||||
|
||||
#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
|
||||
|
@ -66,6 +67,7 @@ u32 gss_wrap(
|
|||
u32 gss_unwrap(
|
||||
struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
int len,
|
||||
struct xdr_buf *inbuf);
|
||||
u32 gss_delete_sec_context(
|
||||
struct gss_ctx **ctx_id);
|
||||
|
@ -126,6 +128,7 @@ struct gss_api_ops {
|
|||
u32 (*gss_unwrap)(
|
||||
struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
int len,
|
||||
struct xdr_buf *buf);
|
||||
void (*gss_delete_sec_context)(
|
||||
void *internal_ctx_id);
|
||||
|
|
|
@ -83,7 +83,7 @@ struct gss_krb5_enctype {
|
|||
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf,
|
||||
struct page **pages); /* v2 encryption function */
|
||||
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
|
||||
struct xdr_buf *buf, u32 *headskip,
|
||||
u32 *tailskip); /* v2 decryption function */
|
||||
};
|
||||
|
@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
|||
struct xdr_buf *outbuf, struct page **pages);
|
||||
|
||||
u32
|
||||
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
||||
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
|
||||
struct xdr_buf *buf);
|
||||
|
||||
|
||||
|
@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
|||
struct page **pages);
|
||||
|
||||
u32
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
|
||||
struct xdr_buf *buf, u32 *plainoffset,
|
||||
u32 *plainlen);
|
||||
|
||||
|
|
|
@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
|
|||
extern void xdr_shift_buf(struct xdr_buf *, size_t);
|
||||
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
|
||||
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
|
||||
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
|
||||
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
||||
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
||||
|
||||
|
|
|
@ -2032,7 +2032,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
|
||||
struct kvec *head = rqstp->rq_rcv_buf.head;
|
||||
struct rpc_auth *auth = cred->cr_auth;
|
||||
unsigned int savedlen = rcv_buf->len;
|
||||
u32 offset, opaque_len, maj_stat;
|
||||
__be32 *p;
|
||||
|
||||
|
@ -2043,9 +2042,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
offset = (u8 *)(p) - (u8 *)head->iov_base;
|
||||
if (offset + opaque_len > rcv_buf->len)
|
||||
goto unwrap_failed;
|
||||
rcv_buf->len = offset + opaque_len;
|
||||
|
||||
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
|
||||
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
|
||||
offset + opaque_len, rcv_buf);
|
||||
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
||||
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
||||
if (maj_stat != GSS_S_COMPLETE)
|
||||
|
@ -2059,10 +2058,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
*/
|
||||
xdr_init_decode(xdr, rcv_buf, p, rqstp);
|
||||
|
||||
auth->au_rslack = auth->au_verfsize + 2 +
|
||||
XDR_QUADLEN(savedlen - rcv_buf->len);
|
||||
auth->au_ralign = auth->au_verfsize + 2 +
|
||||
XDR_QUADLEN(savedlen - rcv_buf->len);
|
||||
auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
|
||||
auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
|
||||
|
||||
return 0;
|
||||
unwrap_failed:
|
||||
trace_rpcgss_unwrap_failed(task);
|
||||
|
|
|
@ -851,8 +851,8 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
|||
}
|
||||
|
||||
u32
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
||||
u32 *headskip, u32 *tailskip)
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
|
||||
struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
|
||||
{
|
||||
struct xdr_buf subbuf;
|
||||
u32 ret = 0;
|
||||
|
@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
|||
|
||||
/* create a segment skipping the header and leaving out the checksum */
|
||||
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
|
||||
(buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
|
||||
(len - offset - GSS_KRB5_TOK_HDR_LEN -
|
||||
kctx->gk5e->cksumlength));
|
||||
|
||||
nblocks = (subbuf.len + blocksize - 1) / blocksize;
|
||||
|
@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
|||
goto out_err;
|
||||
|
||||
/* Get the packet's hmac value */
|
||||
ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
|
||||
ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
|
||||
pkt_hmac, kctx->gk5e->cksumlength);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
|
|
@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
|
|||
}
|
||||
|
||||
static u32
|
||||
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
||||
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
|
||||
struct xdr_buf *buf, unsigned int *slack,
|
||||
unsigned int *align)
|
||||
{
|
||||
int signalg;
|
||||
int sealalg;
|
||||
|
@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
u32 conflen = kctx->gk5e->conflen;
|
||||
int crypt_offset;
|
||||
u8 *cksumkey;
|
||||
unsigned int saved_len = buf->len;
|
||||
|
||||
dprintk("RPC: gss_unwrap_kerberos\n");
|
||||
|
||||
ptr = (u8 *)buf->head[0].iov_base + offset;
|
||||
if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
|
||||
buf->len - offset))
|
||||
len - offset))
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
|
||||
|
@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
(!kctx->initiate && direction != 0))
|
||||
return GSS_S_BAD_SIG;
|
||||
|
||||
buf->len = len;
|
||||
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
|
||||
struct crypto_sync_skcipher *cipher;
|
||||
int err;
|
||||
|
@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
|
||||
memmove(orig_start, data_start, data_len);
|
||||
buf->head[0].iov_len -= (data_start - orig_start);
|
||||
buf->len -= (data_start - orig_start);
|
||||
buf->len = len - (data_start - orig_start);
|
||||
|
||||
if (gss_krb5_remove_padding(buf, blocksize))
|
||||
return GSS_S_DEFECTIVE_TOKEN;
|
||||
|
||||
/* slack must include room for krb5 padding */
|
||||
*slack = XDR_QUADLEN(saved_len - buf->len);
|
||||
/* The GSS blob always precedes the RPC message payload */
|
||||
*align = *slack;
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
|
@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
|
|||
}
|
||||
|
||||
static u32
|
||||
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
||||
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
|
||||
struct xdr_buf *buf, unsigned int *slack,
|
||||
unsigned int *align)
|
||||
{
|
||||
time64_t now;
|
||||
u8 *ptr;
|
||||
|
@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
if (rrc != 0)
|
||||
rotate_left(offset + 16, buf, rrc);
|
||||
|
||||
err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
|
||||
err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
|
||||
&headskip, &tailskip);
|
||||
if (err)
|
||||
return GSS_S_FAILURE;
|
||||
|
@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
* it against the original
|
||||
*/
|
||||
err = read_bytes_from_xdr_buf(buf,
|
||||
buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
|
||||
len - GSS_KRB5_TOK_HDR_LEN - tailskip,
|
||||
decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
|
||||
if (err) {
|
||||
dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
|
||||
|
@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|||
* Note that buf->head[0].iov_len may indicate the available
|
||||
* head buffer space rather than that actually occupied.
|
||||
*/
|
||||
movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
|
||||
movelen = min_t(unsigned int, buf->head[0].iov_len, len);
|
||||
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
|
||||
buf->head[0].iov_len)
|
||||
return GSS_S_FAILURE;
|
||||
BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
|
||||
buf->head[0].iov_len);
|
||||
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
|
||||
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
|
||||
|
||||
/* Trim off the trailing "extra count" and checksum blob */
|
||||
buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
|
||||
xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
|
||||
|
||||
*align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
|
||||
*slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
|
||||
return GSS_S_COMPLETE;
|
||||
}
|
||||
|
||||
|
@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
|
|||
}
|
||||
|
||||
u32
|
||||
gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
|
||||
gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
|
||||
int len, struct xdr_buf *buf)
|
||||
{
|
||||
struct krb5_ctx *kctx = gctx->internal_ctx_id;
|
||||
|
||||
|
@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
|
|||
case ENCTYPE_DES_CBC_RAW:
|
||||
case ENCTYPE_DES3_CBC_RAW:
|
||||
case ENCTYPE_ARCFOUR_HMAC:
|
||||
return gss_unwrap_kerberos_v1(kctx, offset, buf);
|
||||
return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
|
||||
&gctx->slack, &gctx->align);
|
||||
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
||||
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
||||
return gss_unwrap_kerberos_v2(kctx, offset, buf);
|
||||
return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
|
||||
&gctx->slack, &gctx->align);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx *ctx_id,
|
|||
u32
|
||||
gss_unwrap(struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
int len,
|
||||
struct xdr_buf *buf)
|
||||
{
|
||||
return ctx_id->mech_type->gm_ops
|
||||
->gss_unwrap(ctx_id, offset, buf);
|
||||
->gss_unwrap(ctx_id, offset, len, buf);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -906,7 +906,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
|
|||
if (svc_getnl(&buf->head[0]) != seq)
|
||||
goto out;
|
||||
/* trim off the mic and padding at the end before returning */
|
||||
buf->len -= 4 + round_up_to_quad(mic.len);
|
||||
xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
|
||||
stat = 0;
|
||||
out:
|
||||
kfree(mic.data);
|
||||
|
@ -934,7 +934,7 @@ static int
|
|||
unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
|
||||
{
|
||||
u32 priv_len, maj_stat;
|
||||
int pad, saved_len, remaining_len, offset;
|
||||
int pad, remaining_len, offset;
|
||||
|
||||
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
||||
|
||||
|
@ -954,12 +954,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
|
|||
buf->len -= pad;
|
||||
fix_priv_head(buf, pad);
|
||||
|
||||
/* Maybe it would be better to give gss_unwrap a length parameter: */
|
||||
saved_len = buf->len;
|
||||
buf->len = priv_len;
|
||||
maj_stat = gss_unwrap(ctx, 0, buf);
|
||||
maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
|
||||
pad = priv_len - buf->len;
|
||||
buf->len = saved_len;
|
||||
buf->len -= pad;
|
||||
/* The upper layers assume the buffer is aligned on 4-byte boundaries.
|
||||
* In the krb5p case, at least, the data ends up offset, so we need to
|
||||
|
|
|
@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
|
||||
|
||||
/**
|
||||
* xdr_buf_trim - lop at most "len" bytes off the end of "buf"
|
||||
* @buf: buf to be trimmed
|
||||
* @len: number of bytes to reduce "buf" by
|
||||
*
|
||||
* Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
|
||||
* that it's possible that we'll trim less than that amount if the xdr_buf is
|
||||
* too small, or if (for instance) it's all in the head and the parser has
|
||||
* already read too far into it.
|
||||
*/
|
||||
void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
|
||||
{
|
||||
size_t cur;
|
||||
unsigned int trim = len;
|
||||
|
||||
if (buf->tail[0].iov_len) {
|
||||
cur = min_t(size_t, buf->tail[0].iov_len, trim);
|
||||
buf->tail[0].iov_len -= cur;
|
||||
trim -= cur;
|
||||
if (!trim)
|
||||
goto fix_len;
|
||||
}
|
||||
|
||||
if (buf->page_len) {
|
||||
cur = min_t(unsigned int, buf->page_len, trim);
|
||||
buf->page_len -= cur;
|
||||
trim -= cur;
|
||||
if (!trim)
|
||||
goto fix_len;
|
||||
}
|
||||
|
||||
if (buf->head[0].iov_len) {
|
||||
cur = min_t(size_t, buf->head[0].iov_len, trim);
|
||||
buf->head[0].iov_len -= cur;
|
||||
trim -= cur;
|
||||
}
|
||||
fix_len:
|
||||
buf->len -= (len - trim);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdr_buf_trim);
|
||||
|
||||
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
||||
{
|
||||
unsigned int this_len;
|
||||
|
|
Loading…
Reference in New Issue
Block a user