scudo: Fix MTE error reporting for zero-sized allocations. am: c727119a49 am: 3cd2ce3aa7 am: a2ccaa8b5f

Original change: https://android-review.googlesource.com/c/platform/external/scudo/+/1707708

Change-Id: If92d16992eea5b16c41b3717ee07e58d77c76968
diff --git a/standalone/combined.h b/standalone/combined.h
index 95fc8d9..8080d67 100644
--- a/standalone/combined.h
+++ b/standalone/combined.h
@@ -437,7 +437,7 @@
           if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
             PrevEnd = NextPage;
           TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
-          resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd);
+          resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
           if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
             // If an allocation needs to be zeroed (i.e. calloc) we can normally
             // avoid zeroing the memory now since we can rely on memory having
@@ -643,7 +643,7 @@
           if (ClassId) {
             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
-                              BlockEnd);
+                              NewSize, BlockEnd);
             storePrimaryAllocationStackMaybe(Options, OldPtr);
           } else {
             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
@@ -1143,6 +1143,27 @@
     return Offset + Chunk::getHeaderSize();
   }
 
+  // Set the tag of the granule past the end of the allocation to 0, to catch
+  // linear overflows even if a previous larger allocation used the same block
+  // and tag. Only do this if the granule past the end is in our block, because
+  // this would otherwise lead to a SEGV if the allocation covers the entire
+  // block and our block is at the end of a mapping. The tag of the next block's
+  // header granule will be set to 0, so it will serve the purpose of catching
+  // linear overflows in this case.
+  //
+  // For allocations of size 0 we do not end up storing the address tag to the
+  // memory tag space, which getInlineErrorInfo() normally relies on to match
+  // address tags against chunks. To allow matching in this case we store the
+  // address tag in the first byte of the chunk.
+  void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
+    uptr UntaggedEnd = untagPointer(End);
+    if (UntaggedEnd != BlockEnd) {
+      storeTag(UntaggedEnd);
+      if (Size == 0)
+        *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
+    }
+  }
+
   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
                            uptr BlockEnd) {
     // Prepare the granule before the chunk to store the chunk header by setting
@@ -1155,25 +1176,17 @@
     uptr TaggedBegin, TaggedEnd;
     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
 
-    // Finally, set the tag of the granule past the end of the allocation to 0,
-    // to catch linear overflows even if a previous larger allocation used the
-    // same block and tag. Only do this if the granule past the end is in our
-    // block, because this would otherwise lead to a SEGV if the allocation
-    // covers the entire block and our block is at the end of a mapping. The tag
-    // of the next block's header granule will be set to 0, so it will serve the
-    // purpose of catching linear overflows in this case.
-    uptr UntaggedEnd = untagPointer(TaggedEnd);
-    if (UntaggedEnd != BlockEnd)
-      storeTag(UntaggedEnd);
+    storeEndMarker(TaggedEnd, Size, BlockEnd);
     return reinterpret_cast<void *>(TaggedBegin);
   }
 
-  void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
+  void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
+                         uptr BlockEnd) {
     uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
     uptr RoundNewPtr;
     if (RoundOldPtr >= NewPtr) {
       // If the allocation is shrinking we just need to set the tag past the end
-      // of the allocation to 0. See explanation in prepareTaggedChunk above.
+      // of the allocation to 0. See explanation in storeEndMarker() above.
       RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
     } else {
       // Set the memory tag of the region
@@ -1181,10 +1194,7 @@
       // to the pointer tag stored in OldPtr.
       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
     }
-
-    uptr UntaggedNewPtr = untagPointer(RoundNewPtr);
-    if (UntaggedNewPtr != BlockEnd)
-      storeTag(UntaggedNewPtr);
+    storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
   }
 
   void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
@@ -1293,6 +1303,12 @@
       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
           ChunkBegin - Chunk::getHeaderSize());
       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
+
+      // Allocations of size 0 will have stashed the tag in the first byte of
+      // the chunk, see storeEndMarker().
+      if (Header->SizeOrUnusedBytes == 0)
+        *Tag = static_cast<u8>(*ChunkBegin);
+
       return true;
     };