Reflow paragraphs in comments.

This is intended as a clean up after the big clang-format commit
(r280751), which unfortunately resulted in many of the comment
paragraphs in LLDB being very hard to read.

FYI, the script I used was:

import textwrap
import commands
import os
import sys
import re
tmp = "%s.tmp"%sys.argv[1]
out = open(tmp, "w+")
with open(sys.argv[1], "r") as f:
  header = ""
  text = ""
  comment = re.compile(r'^( *//) ([^ ].*)$')
  special = re.compile(r'^((([A-Z]+[: ])|([0-9]+ )).*)|(.*;)$')
  for line in f:
      match = comment.match(line)
      if match and not special.match(match.group(2)):
          # skip intentionally short comments.
          if not text and len(match.group(2)) < 40:
              out.write(line)
              continue

          if text:
              text += " " + match.group(2)
          else:
              header = match.group(1)
              text = match.group(2)

          continue

      if text:
          filled = textwrap.wrap(text, width=(78-len(header)),
                                 break_long_words=False)
          for l in filled:
              out.write(header+" "+l+'\n')
              text = ""

      out.write(line)

os.rename(tmp, sys.argv[1])

Differential Revision: https://reviews.llvm.org/D46144

llvm-svn: 331197
diff --git a/lldb/source/Plugins/ObjectContainer/BSD-Archive/ObjectContainerBSDArchive.cpp b/lldb/source/Plugins/ObjectContainer/BSD-Archive/ObjectContainerBSDArchive.cpp
index d7bef83..275f1fa 100644
--- a/lldb/source/Plugins/ObjectContainer/BSD-Archive/ObjectContainerBSDArchive.cpp
+++ b/lldb/source/Plugins/ObjectContainer/BSD-Archive/ObjectContainerBSDArchive.cpp
@@ -89,9 +89,9 @@
 
   str.assign((const char *)data.GetData(&offset, 16), 16);
   if (str.find("#1/") == 0) {
-    // If the name is longer than 16 bytes, or contains an embedded space
-    // then it will use this format where the length of the name is
-    // here and the name characters are after this header.
+    // If the name is longer than 16 bytes, or contains an embedded space then
+    // it will use this format where the length of the name is here and the
+    // name characters are after this header.
     ar_name_len = strtoul(str.c_str() + 3, &err, 10);
   } else {
     // Strip off any trailing spaces.
@@ -203,8 +203,8 @@
   shared_ptr archive_sp;
   Archive::Map &archive_map = Archive::GetArchiveCache();
   Archive::Map::iterator pos = archive_map.find(file);
-  // Don't cache a value for "archive_map.end()" below since we might
-  // delete an archive entry...
+  // Don't cache a value for "archive_map.end()" below since we might delete an
+  // archive entry...
   while (pos != archive_map.end() && pos->first == file) {
     bool match = true;
     if (arch.IsValid() &&
@@ -217,14 +217,13 @@
       if (pos->second->GetModificationTime() == time) {
         return pos->second;
       } else {
-        // We have a file at the same path with the same architecture
-        // whose modification time doesn't match. It doesn't make sense
-        // for us to continue to use this BSD archive since we cache only
-        // the object info which consists of file time info and also the
-        // file offset and file size of any contained objects. Since
-        // this information is now out of date, we won't get the correct
-        // information if we go and extract the file data, so we should
-        // remove the old and outdated entry.
+        // We have a file at the same path with the same architecture whose
+        // modification time doesn't match. It doesn't make sense for us to
+        // continue to use this BSD archive since we cache only the object info
+        // which consists of file time info and also the file offset and file
+        // size of any contained objects. Since this information is now out of
+        // date, we won't get the correct information if we go and extract the
+        // file data, so we should remove the old and outdated entry.
         archive_map.erase(pos);
         pos = archive_map.find(file);
         continue; // Continue to next iteration so we don't increment pos
@@ -295,9 +294,9 @@
     return nullptr;
 
   if (data_sp) {
-    // We have data, which means this is the first 512 bytes of the file
-    // Check to see if the magic bytes match and if they do, read the entire
-    // table of contents for the archive and cache it
+    // We have data, which means this is the first 512 bytes of the file Check
+    // to see if the magic bytes match and if they do, read the entire table of
+    // contents for the archive and cache it
     DataExtractor data;
     data.SetData(data_sp, data_offset, length);
     if (file && data_sp && ObjectContainerBSDArchive::MagicBytesMatch(data)) {
@@ -389,8 +388,8 @@
             m_file, module_sp->GetArchitecture(),
             module_sp->GetModificationTime(), m_offset, m_data);
       }
-      // Clear the m_data that contains the entire archive
-      // data and let our m_archive_sp hold onto the data.
+      // Clear the m_data that contains the entire archive data and let our
+      // m_archive_sp hold onto the data.
       m_data.Clear();
     }
   }
@@ -453,9 +452,9 @@
     lldb::offset_t data_offset, lldb::offset_t file_offset,
     lldb::offset_t file_size, lldb_private::ModuleSpecList &specs) {
 
-  // We have data, which means this is the first 512 bytes of the file
-  // Check to see if the magic bytes match and if they do, read the entire
-  // table of contents for the archive and cache it
+  // We have data, which means this is the first 512 bytes of the file Check to
+  // see if the magic bytes match and if they do, read the entire table of
+  // contents for the archive and cache it
   DataExtractor data;
   data.SetData(data_sp, data_offset, data_sp->GetByteSize());
   if (!file || !data_sp || !ObjectContainerBSDArchive::MagicBytesMatch(data))
@@ -505,8 +504,8 @@
   const size_t end_count = specs.GetSize();
   size_t num_specs_added = end_count - initial_count;
   if (set_archive_arch && num_specs_added > 0) {
-    // The archive was created but we didn't have an architecture
-    // so we need to set it
+    // The archive was created but we didn't have an architecture so we need to
+    // set it
     for (size_t i = initial_count; i < end_count; ++i) {
       ModuleSpec module_spec;
       if (specs.GetModuleSpecAtIndex(i, module_spec)) {
diff --git a/lldb/source/Plugins/ObjectContainer/Universal-Mach-O/ObjectContainerUniversalMachO.cpp b/lldb/source/Plugins/ObjectContainer/Universal-Mach-O/ObjectContainerUniversalMachO.cpp
index 0266bbe..4c48d64 100644
--- a/lldb/source/Plugins/ObjectContainer/Universal-Mach-O/ObjectContainerUniversalMachO.cpp
+++ b/lldb/source/Plugins/ObjectContainer/Universal-Mach-O/ObjectContainerUniversalMachO.cpp
@@ -44,8 +44,8 @@
     const lldb::ModuleSP &module_sp, DataBufferSP &data_sp,
     lldb::offset_t data_offset, const FileSpec *file,
     lldb::offset_t file_offset, lldb::offset_t length) {
-  // We get data when we aren't trying to look for cached container information,
-  // so only try and look for an architecture slice if we get data
+  // We get data when we aren't trying to look for cached container
+  // information, so only try and look for an architecture slice if we get data
   if (data_sp) {
     DataExtractor data;
     data.SetData(data_sp, data_offset, length);
@@ -81,8 +81,8 @@
 
 bool ObjectContainerUniversalMachO::ParseHeader() {
   bool success = ParseHeader(m_data, m_header, m_fat_archs);
-  // We no longer need any data, we parsed all we needed to parse
-  // and cached it in m_header and m_fat_archs
+  // We no longer need any data, we parsed all we needed to parse and cached it
+  // in m_header and m_fat_archs
   m_data.Clear();
   return success;
 }
@@ -92,8 +92,7 @@
     std::vector<llvm::MachO::fat_arch> &fat_archs) {
   bool success = false;
   // Store the file offset for this universal file as we could have a universal
-  // .o file
-  // in a BSD archive, or be contained in another kind of object.
+  // .o file in a BSD archive, or be contained in another kind of object.
   // Universal mach-o files always have their headers in big endian.
   lldb::offset_t offset = 0;
   data.SetByteOrder(eByteOrderBig);