Blob Blame History Raw
diff -up chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h.orig chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h
--- chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h.orig	2024-04-12 21:56:54.000000000 +0200
+++ chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h	2024-04-14 20:53:16.043867871 +0200
@@ -147,7 +147,13 @@ struct PartitionBucket {
   // Returns a slot number starting from the beginning of the slot span.
   PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
     // See the static assertion for `kReciprocalShift` above.
-    PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
+    // TODO(casey.smalley@arm.com): triggers on Aarch64/Linux
+    // systems with 64k system pages. Constants need to be
+    // adjusted to prevent different parts of the allocator
+    // from overlapping. For now this will allow 64k pages
+    // to function on Aarch64/Linux systems, albeit not
+    // very efficiently.
+    PA_DCHECK(internal::SystemPageSize() == (size_t{1} << 16) || offset_in_slot_span <= kMaxBucketed);
     PA_DCHECK(slot_size <= kMaxBucketed);
 
     const size_t offset_in_slot =
diff -up chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h.orig chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h
--- chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h.orig	2024-04-14 20:53:16.043867871 +0200
+++ chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h	2024-04-14 21:39:18.147052845 +0200
@@ -17,10 +17,8 @@ namespace partition_alloc::internal {
 // PartitionPageSize() is 4 times the OS page size.
 static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
 #elif defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
-// System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
-// currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
-// so we use the 16 kiB maximum (64 kiB will crash).
-static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
+// System page size can be 4, 16, or 64 kiB on Linux on AArch64.
+static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 16) / kSmallestBucket;
 #else
 // A slot span can "span" multiple PartitionPages, but then its slot size is
 // larger, so it doesn't have as many slots.
diff -up chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc.orig chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc
--- chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc.orig	2024-04-14 20:53:16.044867889 +0200
+++ chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc	2024-04-14 21:50:57.926716718 +0200
@@ -952,12 +952,11 @@ void PartitionRoot::Init(PartitionOption
              (internal::SystemPageSize() == (size_t{1} << 14)));
 #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
     // Check runtime pagesize. Though the code is currently the same, it is
-    // not merged with the IS_APPLE case above as a 1 << 16 case needs to be
-    // added here in the future, to allow 64 kiB pagesize. That is only
-    // supported on Linux on arm64, not on IS_APPLE, but not yet present here
-    // as the rest of the PartitionAlloc does not currently support it.
+    // not merged with the IS_APPLE case above as a 1 << 16 case is only
+    // supported on Linux on AArch64.
     PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
-             (internal::SystemPageSize() == (size_t{1} << 14)));
+             (internal::SystemPageSize() == (size_t{1} << 14)) ||
+             (internal::SystemPageSize() == (size_t{1} << 16)));
 #endif
 
     ::partition_alloc::internal::ScopedGuard guard{lock_};
diff -up chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page.h.me chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page.h
--- chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page.h.me	2024-04-14 22:08:40.655011536 +0200
+++ chromium-124.0.6367.49/base/allocator/partition_allocator/src/partition_alloc/partition_page.h	2024-04-14 22:12:46.665731577 +0200
@@ -86,27 +86,25 @@ struct SlotSpanMetadata {
 
   // CHECK()ed in AllocNewSlotSpan().
   // The maximum number of bits needed to cover all currently supported OSes.
-  static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
+  static constexpr size_t kMaxSlotsPerSlotSpanBits = 15;
   static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
 
-  // |marked_full| isn't equivalent to being full. Slot span is marked as full
-  // iff it isn't on the active slot span list (or any other list).
-  uint32_t marked_full : 1;
   // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
   // be further differentiated by checking existence of the freelist.
-  uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
-  uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
+  uint16_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
+  uint16_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
+
+  // |marked_full| isn't equivalent to being full. Slot span is marked as full
+  // iff it isn't on the active slot span list (or any other list).
+  bool marked_full : 1;
 
  private:
-  const uint32_t can_store_raw_size_ : 1;
-  uint32_t freelist_is_sorted_ : 1;
-  uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
+  const uint8_t can_store_raw_size_ : 1;
+  uint8_t freelist_is_sorted_ : 1;
   // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
   // used.
-  uint16_t in_empty_cache_ : 1;
-  uint16_t empty_cache_index_
-      : kMaxEmptyCacheIndexBits;  // < kMaxFreeableSpans.
-  uint16_t unused2_ : (16 - 1 - kMaxEmptyCacheIndexBits);
+  bool in_empty_cache_ : 1;
+  uint8_t empty_cache_index_ : kEmptyCacheIndexBits;  // < kMaxFreeableSpans.
   // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
   // in PartitionPage which has 2B worth of fields and must fit in 32B.
 
@@ -246,18 +244,13 @@ static_assert(sizeof(SlotSpanMetadata) <
               "SlotSpanMetadata must fit into a Page Metadata slot.");
 
 inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
-    : marked_full(0),
-      num_allocated_slots(0),
+    : num_allocated_slots(0),
       num_unprovisioned_slots(0),
+      marked_full(0),
       can_store_raw_size_(false),
       freelist_is_sorted_(true),
-      unused1_(0),
       in_empty_cache_(0),
-      empty_cache_index_(0),
-      unused2_(0) {
-  (void)unused1_;
-  (void)unused2_;
-}
+      empty_cache_index_(0) {}
 
 inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
 
@@ -752,7 +745,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata::
 
   size_t num_slots_per_span = bucket->get_slots_per_span();
   PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
-  num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
+  num_unprovisioned_slots = static_cast<uint16_t>(num_slots_per_span);
   PA_DCHECK(num_unprovisioned_slots);
 
   ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();