diff --git a/kernel.spec b/kernel.spec index a204945..564f135 100644 --- a/kernel.spec +++ b/kernel.spec @@ -686,6 +686,9 @@ Patch633: net_43.mbox #CVE-2016-0728 rhbz 1296623 1297475 Patch634: KEYS-Fix-keyring-ref-leak-in-join_session_keyring.patch +#CVE-2013-4312 rhbz 1297813 1300216 +Patch636: unix-properly-account-for-FDs-passed-over-unix-socke.patch + # END OF PATCH DEFINITIONS %endif @@ -2129,6 +2132,9 @@ fi # # %changelog +* Wed Jan 20 2016 Josh Boyer +- CVE-2013-4312 file descr passed over unix sockects not properly accounted (rhbz 1297813 1300216) + * Tue Jan 19 2016 Josh Boyer - 4.3.3-303 - Backport nouveau stable fixes (rhbz 1299349) - CVE-2016-0728 Keys: reference leak in join_session_keyring (rhbz 1296623 1297475) diff --git a/unix-properly-account-for-FDs-passed-over-unix-socke.patch b/unix-properly-account-for-FDs-passed-over-unix-socke.patch new file mode 100644 index 0000000..c263abf --- /dev/null +++ b/unix-properly-account-for-FDs-passed-over-unix-socke.patch @@ -0,0 +1,140 @@ +From 0cd038d23b86853d68993c94f3c713e4375fd61f Mon Sep 17 00:00:00 2001 +From: willy tarreau +Date: Sun, 10 Jan 2016 07:54:56 +0100 +Subject: [PATCH] unix: properly account for FDs passed over unix sockets + +It is possible for a process to allocate and accumulate far more FDs than +the process' limit by sending them over a unix socket then closing them +to keep the process' fd count low. + +This change addresses this problem by keeping track of the number of FDs +in flight per user and preventing non-privileged processes from having +more FDs in flight than their configured FD limit. + +Reported-by: socketpair@gmail.com +Reported-by: Tetsuo Handa +Mitigates: CVE-2013-4312 (Linux 2.0+) +Suggested-by: Linus Torvalds +Acked-by: Hannes Frederic Sowa +Signed-off-by: Willy Tarreau +Signed-off-by: David S. Miller +--- + include/linux/sched.h | 1 + + net/unix/af_unix.c | 24 ++++++++++++++++++++---- + net/unix/garbage.c | 13 ++++++++----- + 3 files changed, 29 insertions(+), 9 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index b7b9501b41af..f477e87ca46f 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -830,6 +830,7 @@ struct user_struct { + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ + #endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ ++ unsigned long unix_inflight; /* How many files in flight in unix sockets */ + + #ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 128b0982c96b..9085de63bb81 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1498,6 +1498,21 @@ static void unix_destruct_scm(struct sk_buff *skb) + sock_wfree(skb); + } + ++/* ++ * The "user->unix_inflight" variable is protected by the garbage ++ * collection lock, and we just read it locklessly here. If you go ++ * over the limit, there might be a tiny race in actually noticing ++ * it across threads. Tough. ++ */ ++static inline bool too_many_unix_fds(struct task_struct *p) ++{ ++ struct user_struct *user = current_user(); ++ ++ if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) ++ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); ++ return false; ++} ++ + #define MAX_RECURSION_LEVEL 4 + + static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +@@ -1506,6 +1521,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) + unsigned char max_level = 0; + int unix_sock_count = 0; + ++ if (too_many_unix_fds(current)) ++ return -ETOOMANYREFS; ++ + for (i = scm->fp->count - 1; i >= 0; i--) { + struct sock *sk = unix_get_socket(scm->fp->fp[i]); + +@@ -1527,10 +1545,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) + if (!UNIXCB(skb).fp) + return -ENOMEM; + +- if (unix_sock_count) { +- for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->fp[i]); +- } ++ for (i = scm->fp->count - 1; i >= 0; i--) ++ unix_inflight(scm->fp->fp[i]); + return max_level; + } + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index a73a226f2d33..8fcdc2283af5 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -120,11 +120,11 @@ void unix_inflight(struct file *fp) + { + struct sock *s = unix_get_socket(fp); + ++ spin_lock(&unix_gc_lock); ++ + if (s) { + struct unix_sock *u = unix_sk(s); + +- spin_lock(&unix_gc_lock); +- + if (atomic_long_inc_return(&u->inflight) == 1) { + BUG_ON(!list_empty(&u->link)); + list_add_tail(&u->link, &gc_inflight_list); +@@ -132,25 +132,28 @@ void unix_inflight(struct file *fp) + BUG_ON(list_empty(&u->link)); + } + unix_tot_inflight++; +- spin_unlock(&unix_gc_lock); + } ++ fp->f_cred->user->unix_inflight++; ++ spin_unlock(&unix_gc_lock); + } + + void unix_notinflight(struct file *fp) + { + struct sock *s = unix_get_socket(fp); + ++ spin_lock(&unix_gc_lock); ++ + if (s) { + struct unix_sock *u = unix_sk(s); + +- spin_lock(&unix_gc_lock); + BUG_ON(list_empty(&u->link)); + + if (atomic_long_dec_and_test(&u->inflight)) + list_del_init(&u->link); + unix_tot_inflight--; +- spin_unlock(&unix_gc_lock); + } ++ fp->f_cred->user->unix_inflight--; ++ spin_unlock(&unix_gc_lock); + } + + static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), +-- +2.5.0 +