From 8920e8f94c44e31a73bdf923b04721e26e88cadd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 7 Sep 2005 18:28:51 -0700 Subject: [PATCH] Fix 32bit sendmsg() flaw When we copy 32bit ->msg_control contents to kernel, we walk the same userland data twice without sanity checks on the second pass. Second version of this patch: the original broke with 64-bit arches running 32-bit-compat-mode executables doing sendmsg() syscalls with unaligned CMSG data areas Another thing is that we use kmalloc() to allocate and sock_kfree_s() to free afterwards; less serious, but also needs fixing. Signed-off-by: Al Viro Signed-off-by: David Woodhouse Signed-off-by: Chris Wright Signed-off-by: Linus Torvalds --- net/compat.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) (limited to 'net/compat.c') diff --git a/net/compat.c b/net/compat.c index d99ab96..e593dac 100644 --- a/net/compat.c +++ b/net/compat.c @@ -135,13 +135,14 @@ static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *ms * thus placement) of cmsg headers and length are different for * 32-bit apps. -DaveM */ -int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, +int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, unsigned char *stackbuf, int stackbuf_size) { struct compat_cmsghdr __user *ucmsg; struct cmsghdr *kcmsg, *kcmsg_base; compat_size_t ucmlen; __kernel_size_t kcmlen, tmp; + int err = -EFAULT; kcmlen = 0; kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; @@ -156,6 +157,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + CMSG_ALIGN(sizeof(struct cmsghdr))); + tmp = CMSG_ALIGN(tmp); kcmlen += tmp; ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); } @@ -167,30 +169,34 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, * until we have successfully copied over all of the data * from the user. */ - if(kcmlen > stackbuf_size) - kcmsg_base = kcmsg = kmalloc(kcmlen, GFP_KERNEL); - if(kcmsg == NULL) + if (kcmlen > stackbuf_size) + kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); + if (kcmsg == NULL) return -ENOBUFS; /* Now copy them over neatly. */ memset(kcmsg, 0, kcmlen); ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); while(ucmsg != NULL) { - __get_user(ucmlen, &ucmsg->cmsg_len); + if (__get_user(ucmlen, &ucmsg->cmsg_len)) + goto Efault; + if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) + goto Einval; tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) + CMSG_ALIGN(sizeof(struct cmsghdr))); + if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp)) + goto Einval; kcmsg->cmsg_len = tmp; - __get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level); - __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type); - - /* Copy over the data. */ - if(copy_from_user(CMSG_DATA(kcmsg), - CMSG_COMPAT_DATA(ucmsg), - (ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))))) - goto out_free_efault; + tmp = CMSG_ALIGN(tmp); + if (__get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level) || + __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type) || + copy_from_user(CMSG_DATA(kcmsg), + CMSG_COMPAT_DATA(ucmsg), + (ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))))) + goto Efault; /* Advance. */ - kcmsg = (struct cmsghdr *)((char *)kcmsg + CMSG_ALIGN(tmp)); + kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); } @@ -199,10 +205,12 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, kmsg->msg_controllen = kcmlen; return 0; -out_free_efault: - if(kcmsg_base != (struct cmsghdr *)stackbuf) - kfree(kcmsg_base); - return -EFAULT; +Einval: + err = -EINVAL; +Efault: + if (kcmsg_base != (struct cmsghdr *)stackbuf) + sock_kfree_s(sk, kcmsg_base, kcmlen); + return err; } int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) -- cgit v1.1