7131f4d
From bf973073d98660edf35e01e6984029e46eb85368 Mon Sep 17 00:00:00 2001
7131f4d
From: "dslomov@chromium.org"
7131f4d
 <dslomov@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
7131f4d
Date: Mon, 13 Jan 2014 13:00:09 +0000
7131f4d
Subject: [PATCH] Use unsigned integer arithmetic in Zone::NewExpand.
7131f4d
7131f4d
    BUG=328202
7131f4d
    R=jkummerow@chromium.org
7131f4d
    LOG=N
7131f4d
7131f4d
    Review URL: https://codereview.chromium.org/108783005
7131f4d
7131f4d
    git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@18564 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
7131f4d
---
7131f4d
 src/zone.cc | 29 +++++++++++++++++++----------
7131f4d
 1 file changed, 19 insertions(+), 10 deletions(-)
7131f4d
7131f4d
diff --git a/src/zone.cc b/src/zone.cc
7131f4d
index 51b8113..c12978f 100644
7131f4d
--- a/src/zone.cc
7131f4d
+++ b/src/zone.cc
7131f4d
@@ -175,25 +175,31 @@ Address Zone::NewExpand(int size) {
7131f4d
   // except that we employ a maximum segment size when we delete. This
7131f4d
   // is to avoid excessive malloc() and free() overhead.
7131f4d
   Segment* head = segment_head_;
7131f4d
-  int old_size = (head == NULL) ? 0 : head->size();
7131f4d
-  static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
7131f4d
-  int new_size_no_overhead = size + (old_size << 1);
7131f4d
-  int new_size = kSegmentOverhead + new_size_no_overhead;
7131f4d
+  const size_t old_size = (head == NULL) ? 0 : head->size();
7131f4d
+  static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
7131f4d
+  const size_t new_size_no_overhead = size + (old_size << 1);
7131f4d
+  size_t new_size = kSegmentOverhead + new_size_no_overhead;
7131f4d
+  const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size);
7131f4d
   // Guard against integer overflow.
7131f4d
-  if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
7131f4d
+  if (new_size_no_overhead < static_cast<size_t>(size) ||
7131f4d
+      new_size < static_cast<size_t>(kSegmentOverhead)) {
7131f4d
     V8::FatalProcessOutOfMemory("Zone");
7131f4d
     return NULL;
7131f4d
   }
7131f4d
-  if (new_size < kMinimumSegmentSize) {
7131f4d
+  if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
7131f4d
     new_size = kMinimumSegmentSize;
7131f4d
-  } else if (new_size > kMaximumSegmentSize) {
7131f4d
+  } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
7131f4d
     // Limit the size of new segments to avoid growing the segment size
7131f4d
     // exponentially, thus putting pressure on contiguous virtual address space.
7131f4d
     // All the while making sure to allocate a segment large enough to hold the
7131f4d
     // requested size.
7131f4d
-    new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
7131f4d
+    new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
7131f4d
   }
7131f4d
-  Segment* segment = NewSegment(new_size);
7131f4d
+  if (new_size > INT_MAX) {
7131f4d
+    V8::FatalProcessOutOfMemory("Zone");
7131f4d
+    return NULL;
7131f4d
+  }
7131f4d
+  Segment* segment = NewSegment(static_cast<int>(new_size));
7131f4d
   if (segment == NULL) {
7131f4d
     V8::FatalProcessOutOfMemory("Zone");
7131f4d
     return NULL;
7131f4d
@@ -203,7 +209,10 @@ Address Zone::NewExpand(int size) {
7131f4d
   Address result = RoundUp(segment->start(), kAlignment);
7131f4d
   position_ = result + size;
7131f4d
   // Check for address overflow.
7131f4d
-  if (position_ < result) {
7131f4d
+  // (Should not happen since the segment is guaranteed to accomodate
7131f4d
+  // size bytes + header and alignment padding)
7131f4d
+  if (reinterpret_cast<uintptr_t>(position_)
7131f4d
+      < reinterpret_cast<uintptr_t>(result)) {
7131f4d
     V8::FatalProcessOutOfMemory("Zone");
7131f4d
     return NULL;
7131f4d
   }
7131f4d
-- 
7131f4d
1.8.5.3
7131f4d