0cc31ee
From 6a4ca79652219cf22da800d990e5b46feaea1ad9 Mon Sep 17 00:00:00 2001
0cc31ee
From: Jason Baron <jbaron@redhat.com>
0cc31ee
Date: Mon, 24 Oct 2011 14:59:02 +1100
0cc31ee
Subject: [PATCH] epoll: limit paths
0cc31ee
0cc31ee
epoll: limit paths
0cc31ee
0cc31ee
The current epoll code can be tickled to run basically indefinitely in
0cc31ee
both loop detection path check (on ep_insert()), and in the wakeup paths.
0cc31ee
The programs that tickle this behavior set up deeply linked networks of
0cc31ee
epoll file descriptors that cause the epoll algorithms to traverse them
0cc31ee
indefinitely.  A couple of these sample programs have been previously
0cc31ee
posted in this thread: https://lkml.org/lkml/2011/2/25/297.
0cc31ee
0cc31ee
To fix the loop detection path check algorithms, I simply keep track of
0cc31ee
the epoll nodes that have been already visited.  Thus, the loop detection
0cc31ee
becomes proportional to the number of epoll file descriptor and links.
0cc31ee
This dramatically decreases the run-time of the loop check algorithm.  In
0cc31ee
one diabolical case I tried it reduced the run-time from 15 mintues (all
0cc31ee
in kernel time) to .3 seconds.
0cc31ee
0cc31ee
Fixing the wakeup paths could be done at wakeup time in a similar manner
0cc31ee
by keeping track of nodes that have already been visited, but the
0cc31ee
complexity is harder, since there can be multiple wakeups on different
0cc31ee
cpus...Thus, I've opted to limit the number of possible wakeup paths when
0cc31ee
the paths are created.
0cc31ee
0cc31ee
This is accomplished, by noting that the end file descriptor points that
0cc31ee
are found during the loop detection pass (from the newly added link), are
0cc31ee
actually the sources for wakeup events.  I keep a list of these file
0cc31ee
descriptors and limit the number and length of these paths that emanate
0cc31ee
from these 'source file descriptors'.  In the current implemetation I
0cc31ee
allow 1000 paths of length 1, 500 of length 2, 100 of length 3, 50 of
0cc31ee
length 4 and 10 of length 5.  Note that it is sufficient to check the
0cc31ee
'source file descriptors' reachable from the newly added link, since no
0cc31ee
other 'source file descriptors' will have newly added links.  This allows
0cc31ee
us to check only the wakeup paths that may have gotten too long, and not
0cc31ee
re-check all possible wakeup paths on the system.
0cc31ee
0cc31ee
In terms of the path limit selection, I think its first worth noting that
0cc31ee
the most common case for epoll, is probably the model where you have 1
0cc31ee
epoll file descriptor that is monitoring n number of 'source file
0cc31ee
descriptors'.  In this case, each 'source file descriptor' has a 1 path of
0cc31ee
length 1.  Thus, I believe that the limits I'm proposing are quite
0cc31ee
reasonable and in fact may be too generous.  Thus, I'm hoping that the
0cc31ee
proposed limits will not prevent any workloads that currently work to
0cc31ee
fail.
0cc31ee
0cc31ee
In terms of locking, I have extended the use of the 'epmutex' to all
0cc31ee
epoll_ctl add and remove operations.  Currently its only used in a subset
0cc31ee
of the add paths.  I need to hold the epmutex, so that we can correctly
0cc31ee
traverse a coherent graph, to check the number of paths.  I believe that
0cc31ee
this additional locking is probably ok, since its in the setup/teardown
0cc31ee
paths, and doesn't affect the running paths, but it certainly is going to
0cc31ee
add some extra overhead.  Also, worth noting is that the epmuex was
0cc31ee
recently added to the ep_ctl add operations in the initial path loop
0cc31ee
detection code using the argument that it was not on a critical path.
0cc31ee
0cc31ee
Another thing to note here, is the length of epoll chains that is allowed.
0cc31ee
Currently, eventpoll.c defines:
0cc31ee
0cc31ee
/* Maximum number of nesting allowed inside epoll sets */
0cc31ee
#define EP_MAX_NESTS 4
0cc31ee
0cc31ee
This basically means that I am limited to a graph depth of 5 (EP_MAX_NESTS
0cc31ee
+ 1).  However, this limit is currently only enforced during the loop
0cc31ee
check detection code, and only when the epoll file descriptors are added
0cc31ee
in a certain order.  Thus, this limit is currently easily bypassed.  The
0cc31ee
newly added check for wakeup paths, stricly limits the wakeup paths to a
0cc31ee
length of 5, regardless of the order in which ep's are linked together.
0cc31ee
Thus, a side-effect of the new code is a more consistent enforcement of
0cc31ee
the graph depth.
0cc31ee
0cc31ee
Thus far, I've tested this, using the sample programs previously
0cc31ee
mentioned, which now either return quickly or return -EINVAL.  I've also
0cc31ee
testing using the piptest.c epoll tester, which showed no difference in
0cc31ee
performance.  I've also created a number of different epoll networks and
0cc31ee
tested that they behave as expectded.
0cc31ee
0cc31ee
I believe this solves the original diabolical test cases, while still
0cc31ee
preserving the sane epoll nesting.
0cc31ee
0cc31ee
Signed-off-by: Jason Baron <jbaron@redhat.com>
0cc31ee
Cc: Nelson Elhage <nelhage@ksplice.com>
0cc31ee
Cc: Davide Libenzi <davidel@xmailserver.org>
0cc31ee
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
0cc31ee
---
0cc31ee
 fs/eventpoll.c            |  226 ++++++++++++++++++++++++++++++++++++++++-----
0cc31ee
 include/linux/eventpoll.h |    1 +
0cc31ee
 include/linux/fs.h        |    1 +
0cc31ee
 3 files changed, 203 insertions(+), 25 deletions(-)
0cc31ee
0cc31ee
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
0cc31ee
index 4a53743..414ac74 100644
0cc31ee
--- a/fs/eventpoll.c
0cc31ee
+++ b/fs/eventpoll.c
0cc31ee
@@ -197,6 +197,12 @@ struct eventpoll {
0cc31ee
 
0cc31ee
 	/* The user that created the eventpoll descriptor */
0cc31ee
 	struct user_struct *user;
0cc31ee
+
0cc31ee
+	struct file *file;
0cc31ee
+
0cc31ee
+	/* used to optimize loop detection check */
0cc31ee
+	int visited;
0cc31ee
+	struct list_head visitedllink;
0cc31ee
 };
0cc31ee
 
0cc31ee
 /* Wait structure used by the poll hooks */
0cc31ee
@@ -255,6 +261,12 @@ static struct kmem_cache *epi_cache __read_mostly;
0cc31ee
 /* Slab cache used to allocate "struct eppoll_entry" */
0cc31ee
 static struct kmem_cache *pwq_cache __read_mostly;
0cc31ee
 
0cc31ee
+/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
0cc31ee
+LIST_HEAD(visited_list);
0cc31ee
+
0cc31ee
+/* Files with newly added links, which need a limit on emanating paths */
0cc31ee
+LIST_HEAD(tfile_check_list);
0cc31ee
+
0cc31ee
 #ifdef CONFIG_SYSCTL
0cc31ee
 
0cc31ee
 #include <linux/sysctl.h>
0cc31ee
@@ -276,6 +288,12 @@ ctl_table epoll_table[] = {
0cc31ee
 };
0cc31ee
 #endif /* CONFIG_SYSCTL */
0cc31ee
 
0cc31ee
+static const struct file_operations eventpoll_fops;
0cc31ee
+
0cc31ee
+static inline int is_file_epoll(struct file *f)
0cc31ee
+{
0cc31ee
+	return f->f_op == &eventpoll_fops;
0cc31ee
+}
0cc31ee
 
0cc31ee
 /* Setup the structure that is used as key for the RB tree */
0cc31ee
 static inline void ep_set_ffd(struct epoll_filefd *ffd,
0cc31ee
@@ -711,12 +729,6 @@ static const struct file_operations eventpoll_fops = {
0cc31ee
 	.llseek		= noop_llseek,
0cc31ee
 };
0cc31ee
 
0cc31ee
-/* Fast test to see if the file is an evenpoll file */
0cc31ee
-static inline int is_file_epoll(struct file *f)
0cc31ee
-{
0cc31ee
-	return f->f_op == &eventpoll_fops;
0cc31ee
-}
0cc31ee
-
0cc31ee
 /*
0cc31ee
  * This is called from eventpoll_release() to unlink files from the eventpoll
0cc31ee
  * interface. We need to have this facility to cleanup correctly files that are
0cc31ee
@@ -926,6 +938,96 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
0cc31ee
 	rb_insert_color(&epi->rbn, &ep->rbr);
0cc31ee
 }
0cc31ee
 
0cc31ee
+
0cc31ee
+
0cc31ee
+#define PATH_ARR_SIZE 5
0cc31ee
+/* These are the number paths of length 1 to 5, that we are allowing to emanate
0cc31ee
+ * from a single file of interest. For example, we allow 1000 paths of length
0cc31ee
+ * 1, to emanate from each file of interest. This essentially represents the
0cc31ee
+ * potential wakeup paths, which need to be limited in order to avoid massive
0cc31ee
+ * uncontrolled wakeup storms. The common use case should be a single ep which
0cc31ee
+ * is connected to n file sources. In this case each file source has 1 path
0cc31ee
+ * of length 1. Thus, the numbers below should be more than sufficient.
0cc31ee
+ */
0cc31ee
+int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
0cc31ee
+int path_count[PATH_ARR_SIZE];
0cc31ee
+
0cc31ee
+static int path_count_inc(int nests)
0cc31ee
+{
0cc31ee
+	if (++path_count[nests] > path_limits[nests])
0cc31ee
+		return -1;
0cc31ee
+	return 0;
0cc31ee
+}
0cc31ee
+
0cc31ee
+static void path_count_init(void)
0cc31ee
+{
0cc31ee
+	int i;
0cc31ee
+
0cc31ee
+	for (i = 0; i < PATH_ARR_SIZE; i++)
0cc31ee
+		path_count[i] = 0;
0cc31ee
+}
0cc31ee
+
0cc31ee
+static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
0cc31ee
+{
0cc31ee
+	int error = 0;
0cc31ee
+	struct file *file = priv;
0cc31ee
+	struct file *child_file;
0cc31ee
+	struct epitem *epi;
0cc31ee
+
0cc31ee
+	list_for_each_entry(epi, &file->f_ep_links, fllink) {
0cc31ee
+		child_file = epi->ep->file;
0cc31ee
+		if (is_file_epoll(child_file)) {
0cc31ee
+			if (list_empty(&child_file->f_ep_links)) {
0cc31ee
+				if (path_count_inc(call_nests)) {
0cc31ee
+					error = -1;
0cc31ee
+					break;
0cc31ee
+				}
0cc31ee
+			} else {
0cc31ee
+				error = ep_call_nested(&poll_loop_ncalls,
0cc31ee
+							EP_MAX_NESTS,
0cc31ee
+							reverse_path_check_proc,
0cc31ee
+							child_file, child_file,
0cc31ee
+							current);
0cc31ee
+			}
0cc31ee
+			if (error != 0)
0cc31ee
+				break;
0cc31ee
+		} else {
0cc31ee
+			printk(KERN_ERR "reverse_path_check_proc: "
0cc31ee
+				"file is not an ep!\n");
0cc31ee
+		}
0cc31ee
+	}
0cc31ee
+	return error;
0cc31ee
+}
0cc31ee
+
0cc31ee
+/**
0cc31ee
+ * reverse_path_check - The tfile_check_list is list of file *, which have
0cc31ee
+ *                      links that are proposed to be newly added. We need to
0cc31ee
+ *                      make sure that those added links don't add too many
0cc31ee
+ *                      paths such that we will spend all our time waking up
0cc31ee
+ *                      eventpoll objects.
0cc31ee
+ *
0cc31ee
+ * Returns: Returns zero if the proposed links don't create too many paths,
0cc31ee
+ *	    -1 otherwise.
0cc31ee
+ */
0cc31ee
+static int reverse_path_check(void)
0cc31ee
+{
0cc31ee
+	int length = 0;
0cc31ee
+	int error = 0;
0cc31ee
+	struct file *current_file;
0cc31ee
+
0cc31ee
+	/* let's call this for all tfiles */
0cc31ee
+	list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
0cc31ee
+		length++;
0cc31ee
+		path_count_init();
0cc31ee
+		error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
0cc31ee
+					reverse_path_check_proc, current_file,
0cc31ee
+					current_file, current);
0cc31ee
+		if (error)
0cc31ee
+			break;
0cc31ee
+	}
0cc31ee
+	return error;
0cc31ee
+}
0cc31ee
+
0cc31ee
 /*
0cc31ee
  * Must be called with "mtx" held.
0cc31ee
  */
0cc31ee
@@ -987,6 +1089,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
0cc31ee
 	 */
0cc31ee
 	ep_rbtree_insert(ep, epi);
0cc31ee
 
0cc31ee
+	/* now check if we've created too many backpaths */
0cc31ee
+	error = -EINVAL;
0cc31ee
+	if (reverse_path_check())
0cc31ee
+		goto error_remove_epi;
0cc31ee
+
0cc31ee
 	/* We have to drop the new item inside our item list to keep track of it */
0cc31ee
 	spin_lock_irqsave(&ep->lock, flags);
0cc31ee
 
0cc31ee
@@ -1011,6 +1118,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
0cc31ee
 
0cc31ee
 	return 0;
0cc31ee
 
0cc31ee
+error_remove_epi:
0cc31ee
+	spin_lock(&tfile->f_lock);
0cc31ee
+	if (ep_is_linked(&epi->fllink))
0cc31ee
+		list_del_init(&epi->fllink);
0cc31ee
+	spin_unlock(&tfile->f_lock);
0cc31ee
+
0cc31ee
+	rb_erase(&epi->rbn, &ep->rbr);
0cc31ee
+
0cc31ee
 error_unregister:
0cc31ee
 	ep_unregister_pollwait(ep, epi);
0cc31ee
 
0cc31ee
@@ -1275,18 +1390,35 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
0cc31ee
 	int error = 0;
0cc31ee
 	struct file *file = priv;
0cc31ee
 	struct eventpoll *ep = file->private_data;
0cc31ee
+	struct eventpoll *ep_tovisit;
0cc31ee
 	struct rb_node *rbp;
0cc31ee
 	struct epitem *epi;
0cc31ee
 
0cc31ee
 	mutex_lock_nested(&ep->mtx, call_nests + 1);
0cc31ee
+	ep->visited = 1;
0cc31ee
+	list_add(&ep->visitedllink, &visited_list);
0cc31ee
 	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
0cc31ee
 		epi = rb_entry(rbp, struct epitem, rbn);
0cc31ee
 		if (unlikely(is_file_epoll(epi->ffd.file))) {
0cc31ee
+			ep_tovisit = epi->ffd.file->private_data;
0cc31ee
+			if (ep_tovisit->visited)
0cc31ee
+				continue;
0cc31ee
 			error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
0cc31ee
-					       ep_loop_check_proc, epi->ffd.file,
0cc31ee
-					       epi->ffd.file->private_data, current);
0cc31ee
+					ep_loop_check_proc, epi->ffd.file,
0cc31ee
+					ep_tovisit, current);
0cc31ee
 			if (error != 0)
0cc31ee
 				break;
0cc31ee
+		} else {
0cc31ee
+			/* if we've reached a file that is not associated with
0cc31ee
+			 * an ep, then then we need to check if the newly added
0cc31ee
+			 * links are going to add too many wakeup paths. We do
0cc31ee
+			 * this by adding it to the tfile_check_list, if it's
0cc31ee
+			 * not already there, and calling reverse_path_check()
0cc31ee
+			 * during ep_insert()
0cc31ee
+			 */
0cc31ee
+			if (list_empty(&epi->ffd.file->f_tfile_llink))
0cc31ee
+				list_add(&epi->ffd.file->f_tfile_llink,
0cc31ee
+					 &tfile_check_list);
0cc31ee
 		}
0cc31ee
 	}
0cc31ee
 	mutex_unlock(&ep->mtx);
0cc31ee
@@ -1307,8 +1439,30 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
0cc31ee
  */
0cc31ee
 static int ep_loop_check(struct eventpoll *ep, struct file *file)
0cc31ee
 {
0cc31ee
-	return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
0cc31ee
+	int ret;
0cc31ee
+	struct eventpoll *ep_cur, *ep_next;
0cc31ee
+
0cc31ee
+	ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
0cc31ee
 			      ep_loop_check_proc, file, ep, current);
0cc31ee
+	/* clear visited list */
0cc31ee
+	list_for_each_entry_safe(ep_cur, ep_next, &visited_list, visitedllink) {
0cc31ee
+		ep_cur->visited = 0;
0cc31ee
+		list_del(&ep_cur->visitedllink);
0cc31ee
+	}
0cc31ee
+	return ret;
0cc31ee
+}
0cc31ee
+
0cc31ee
+static void clear_tfile_check_list(void)
0cc31ee
+{
0cc31ee
+	struct file *file;
0cc31ee
+
0cc31ee
+	/* first clear the tfile_check_list */
0cc31ee
+	while (!list_empty(&tfile_check_list)) {
0cc31ee
+		file = list_first_entry(&tfile_check_list, struct file,
0cc31ee
+					f_tfile_llink);
0cc31ee
+		list_del_init(&file->f_tfile_llink);
0cc31ee
+	}
0cc31ee
+	INIT_LIST_HEAD(&tfile_check_list);
0cc31ee
 }
0cc31ee
 
0cc31ee
 /*
0cc31ee
@@ -1316,8 +1470,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
0cc31ee
  */
0cc31ee
 SYSCALL_DEFINE1(epoll_create1, int, flags)
0cc31ee
 {
0cc31ee
-	int error;
0cc31ee
+	int error, fd;
0cc31ee
 	struct eventpoll *ep = NULL;
0cc31ee
+	struct file *file;
0cc31ee
 
0cc31ee
 	/* Check the EPOLL_* constant for consistency.  */
0cc31ee
 	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
0cc31ee
@@ -1334,11 +1489,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
0cc31ee
 	 * Creates all the items needed to setup an eventpoll file. That is,
0cc31ee
 	 * a file structure and a free file descriptor.
0cc31ee
 	 */
0cc31ee
-	error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
0cc31ee
+	fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
0cc31ee
+	if (fd < 0) {
0cc31ee
+		error = fd;
0cc31ee
+		goto out_free_ep;
0cc31ee
+	}
0cc31ee
+	file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
0cc31ee
 				 O_RDWR | (flags & O_CLOEXEC));
0cc31ee
-	if (error < 0)
0cc31ee
-		ep_free(ep);
0cc31ee
-
0cc31ee
+	if (IS_ERR(file)) {
0cc31ee
+		error = PTR_ERR(file);
0cc31ee
+		goto out_free_fd;
0cc31ee
+	}
0cc31ee
+	fd_install(fd, file);
0cc31ee
+	ep->file = file;
0cc31ee
+	return fd;
0cc31ee
+
0cc31ee
+out_free_fd:
0cc31ee
+	put_unused_fd(fd);
0cc31ee
+out_free_ep:
0cc31ee
+	ep_free(ep);
0cc31ee
 	return error;
0cc31ee
 }
0cc31ee
 
0cc31ee
@@ -1404,21 +1573,27 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
0cc31ee
 	/*
0cc31ee
 	 * When we insert an epoll file descriptor, inside another epoll file
0cc31ee
 	 * descriptor, there is the change of creating closed loops, which are
0cc31ee
-	 * better be handled here, than in more critical paths.
0cc31ee
+	 * better be handled here, than in more critical paths. While we are
0cc31ee
+	 * checking for loops we also determine the list of files reachable
0cc31ee
+	 * and hang them on the tfile_check_list, so we can check that we
0cc31ee
+	 * haven't created too many possible wakeup paths.
0cc31ee
 	 *
0cc31ee
-	 * We hold epmutex across the loop check and the insert in this case, in
0cc31ee
-	 * order to prevent two separate inserts from racing and each doing the
0cc31ee
-	 * insert "at the same time" such that ep_loop_check passes on both
0cc31ee
-	 * before either one does the insert, thereby creating a cycle.
0cc31ee
+	 * We need to hold the epmutex across both ep_insert and ep_remove
0cc31ee
+	 * b/c we want to make sure we are looking at a coherent view of
0cc31ee
+	 * epoll network.
0cc31ee
 	 */
0cc31ee
-	if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
0cc31ee
+	if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
0cc31ee
 		mutex_lock(&epmutex);
0cc31ee
 		did_lock_epmutex = 1;
0cc31ee
-		error = -ELOOP;
0cc31ee
-		if (ep_loop_check(ep, tfile) != 0)
0cc31ee
-			goto error_tgt_fput;
0cc31ee
 	}
0cc31ee
-
0cc31ee
+	if (op == EPOLL_CTL_ADD) {
0cc31ee
+		if (is_file_epoll(tfile)) {
0cc31ee
+			error = -ELOOP;
0cc31ee
+			if (ep_loop_check(ep, tfile) != 0)
0cc31ee
+				goto error_tgt_fput;
0cc31ee
+		} else
0cc31ee
+			list_add(&tfile->f_tfile_llink, &tfile_check_list);
0cc31ee
+	}
0cc31ee
 
0cc31ee
 	mutex_lock_nested(&ep->mtx, 0);
0cc31ee
 
0cc31ee
@@ -1437,6 +1612,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
0cc31ee
 			error = ep_insert(ep, &epds, tfile, fd);
0cc31ee
 		} else
0cc31ee
 			error = -EEXIST;
0cc31ee
+		clear_tfile_check_list();
0cc31ee
 		break;
0cc31ee
 	case EPOLL_CTL_DEL:
0cc31ee
 		if (epi)
0cc31ee
@@ -1455,7 +1631,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
0cc31ee
 	mutex_unlock(&ep->mtx);
0cc31ee
 
0cc31ee
 error_tgt_fput:
0cc31ee
-	if (unlikely(did_lock_epmutex))
0cc31ee
+	if (did_lock_epmutex)
0cc31ee
 		mutex_unlock(&epmutex);
0cc31ee
 
0cc31ee
 	fput(tfile);
0cc31ee
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
0cc31ee
index f362733..657ab55 100644
0cc31ee
--- a/include/linux/eventpoll.h
0cc31ee
+++ b/include/linux/eventpoll.h
0cc31ee
@@ -61,6 +61,7 @@ struct file;
0cc31ee
 static inline void eventpoll_init_file(struct file *file)
0cc31ee
 {
0cc31ee
 	INIT_LIST_HEAD(&file->f_ep_links);
0cc31ee
+	INIT_LIST_HEAD(&file->f_tfile_llink);
0cc31ee
 }
0cc31ee
 
0cc31ee
 
0cc31ee
diff --git a/include/linux/fs.h b/include/linux/fs.h
0cc31ee
index 277f497..93778e0 100644
0cc31ee
--- a/include/linux/fs.h
0cc31ee
+++ b/include/linux/fs.h
0cc31ee
@@ -985,6 +985,7 @@ struct file {
0cc31ee
 #ifdef CONFIG_EPOLL
0cc31ee
 	/* Used by fs/eventpoll.c to link all the hooks to this file */
0cc31ee
 	struct list_head	f_ep_links;
0cc31ee
+	struct list_head	f_tfile_llink;
0cc31ee
 #endif /* #ifdef CONFIG_EPOLL */
0cc31ee
 	struct address_space	*f_mapping;
0cc31ee
 #ifdef CONFIG_DEBUG_WRITECOUNT
0cc31ee
-- 
0cc31ee
1.7.6.4
0cc31ee