|
Kyle McMartin |
a7e4f1c |
From owner-linux-mm@kvack.org Wed May 11 11:29:53 2011
|
|
Kyle McMartin |
a7e4f1c |
From: Mel Gorman <mgorman@suse.de>
|
|
Kyle McMartin |
a7e4f1c |
To: Andrew Morton <akpm@linux-foundation.org>
|
|
Kyle McMartin |
a7e4f1c |
Subject: [PATCH 2/3] mm: slub: Do not take expensive steps for SLUBs speculative high-order allocations
|
|
Kyle McMartin |
a7e4f1c |
Date: Wed, 11 May 2011 16:29:32 +0100
|
|
Kyle McMartin |
a7e4f1c |
Message-Id: <1305127773-10570-3-git-send-email-mgorman@suse.de>
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
To avoid locking and per-cpu overhead, SLUB optimisically uses
|
|
Kyle McMartin |
a7e4f1c |
high-order allocations and falls back to lower allocations if they
|
|
Kyle McMartin |
a7e4f1c |
fail. However, by simply trying to allocate, the caller can enter
|
|
Kyle McMartin |
a7e4f1c |
compaction or reclaim - both of which are likely to cost more than the
|
|
Kyle McMartin |
a7e4f1c |
benefit of using high-order pages in SLUB. On a desktop system, two
|
|
Kyle McMartin |
a7e4f1c |
users report that the system is getting stalled with kswapd using large
|
|
Kyle McMartin |
a7e4f1c |
amounts of CPU.
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
This patch prevents SLUB taking any expensive steps when trying to
|
|
Kyle McMartin |
a7e4f1c |
use high-order allocations. Instead, it is expected to fall back to
|
|
Kyle McMartin |
a7e4f1c |
smaller orders more aggressively. Testing from users was somewhat
|
|
Kyle McMartin |
a7e4f1c |
inconclusive on how much this helped but local tests showed it made
|
|
Kyle McMartin |
a7e4f1c |
a positive difference. It makes sense that falling back to order-0
|
|
Kyle McMartin |
a7e4f1c |
allocations is faster than entering compaction or direct reclaim.
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
Signed-off-yet: Mel Gorman <mgorman@suse.de>
|
|
Kyle McMartin |
a7e4f1c |
---
|
|
Kyle McMartin |
a7e4f1c |
mm/page_alloc.c | 3 ++-
|
|
Kyle McMartin |
a7e4f1c |
mm/slub.c | 3 ++-
|
|
Kyle McMartin |
a7e4f1c |
2 files changed, 4 insertions(+), 2 deletions(-)
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
Kyle McMartin |
a7e4f1c |
index 9f8a97b..057f1e2 100644
|
|
Kyle McMartin |
a7e4f1c |
--- a/mm/page_alloc.c
|
|
Kyle McMartin |
a7e4f1c |
+++ b/mm/page_alloc.c
|
|
Kyle McMartin |
a7e4f1c |
@@ -1972,6 +1972,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
Kyle McMartin |
a7e4f1c |
{
|
|
Kyle McMartin |
a7e4f1c |
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
|
|
Kyle McMartin |
a7e4f1c |
const gfp_t wait = gfp_mask & __GFP_WAIT;
|
|
Kyle McMartin |
a7e4f1c |
+ const gfp_t can_wake_kswapd = !(gfp_mask & __GFP_NO_KSWAPD);
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
|
|
Kyle McMartin |
a7e4f1c |
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
|
|
Kyle McMartin |
a7e4f1c |
@@ -1984,7 +1985,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
Kyle McMartin |
a7e4f1c |
*/
|
|
Kyle McMartin |
a7e4f1c |
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
- if (!wait) {
|
|
Kyle McMartin |
a7e4f1c |
+ if (!wait && can_wake_kswapd) {
|
|
Kyle McMartin |
a7e4f1c |
/*
|
|
Kyle McMartin |
a7e4f1c |
* Not worth trying to allocate harder for
|
|
Kyle McMartin |
a7e4f1c |
* __GFP_NOMEMALLOC even if it can't schedule.
|
|
Kyle McMartin |
a7e4f1c |
diff --git a/mm/slub.c b/mm/slub.c
|
|
Kyle McMartin |
a7e4f1c |
index 98c358d..1071723 100644
|
|
Kyle McMartin |
a7e4f1c |
--- a/mm/slub.c
|
|
Kyle McMartin |
a7e4f1c |
+++ b/mm/slub.c
|
|
Kyle McMartin |
a7e4f1c |
@@ -1170,7 +1170,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
Kyle McMartin |
a7e4f1c |
* Let the initial higher-order allocation fail under memory pressure
|
|
Kyle McMartin |
a7e4f1c |
* so we fall-back to the minimum order allocation.
|
|
Kyle McMartin |
a7e4f1c |
*/
|
|
Kyle McMartin |
a7e4f1c |
- alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY | __GFP_NO_KSWAPD) & ~__GFP_NOFAIL;
|
|
Kyle McMartin |
a7e4f1c |
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY | __GFP_NO_KSWAPD) &
|
|
Kyle McMartin |
a7e4f1c |
+ ~(__GFP_NOFAIL | __GFP_WAIT);
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
page = alloc_slab_page(alloc_gfp, node, oo);
|
|
Kyle McMartin |
a7e4f1c |
if (unlikely(!page)) {
|
|
Kyle McMartin |
a7e4f1c |
--
|
|
Kyle McMartin |
a7e4f1c |
1.7.3.4
|
|
Kyle McMartin |
a7e4f1c |
|
|
Kyle McMartin |
a7e4f1c |
--
|
|
Kyle McMartin |
a7e4f1c |
To unsubscribe, send a message with 'unsubscribe linux-mm' in
|
|
Kyle McMartin |
a7e4f1c |
the body to majordomo@kvack.org. For more info on Linux MM,
|
|
Kyle McMartin |
a7e4f1c |
see: http://www.linux-mm.org/ .
|
|
Kyle McMartin |
a7e4f1c |
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
|
|
Kyle McMartin |
a7e4f1c |
Don't email: email@kvack.org
|
|
Kyle McMartin |
a7e4f1c |
|