Doxygenize the documentation comments

There were many // comment used to document classes, functions etc. but
those are not picked up by doxygen which expects /// comments. This
converts many comments from // to /// in order to improve the generated
documentation.

BUG=
R=jvoung@chromium.org, kschimpf@google.com

Review URL: https://codereview.chromium.org/1216963007.
diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h
index b543208..64672c4 100644
--- a/src/IceTargetLowering.h
+++ b/src/IceTargetLowering.h
@@ -6,16 +6,17 @@
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
-//
-// This file declares the TargetLowering, LoweringContext, and
-// TargetDataLowering classes.  TargetLowering is an abstract class
-// used to drive the translation/lowering process.  LoweringContext
-// maintains a context for lowering each instruction, offering
-// conveniences such as iterating over non-deleted instructions.
-// TargetDataLowering is an abstract class used to drive the
-// lowering/emission of global initializers, external global
-// declarations, and internal constant pools.
-//
+///
+/// \file
+/// This file declares the TargetLowering, LoweringContext, and
+/// TargetDataLowering classes.  TargetLowering is an abstract class
+/// used to drive the translation/lowering process.  LoweringContext
+/// maintains a context for lowering each instruction, offering
+/// conveniences such as iterating over non-deleted instructions.
+/// TargetDataLowering is an abstract class used to drive the
+/// lowering/emission of global initializers, external global
+/// declarations, and internal constant pools.
+///
 //===----------------------------------------------------------------------===//
 
 #ifndef SUBZERO_SRC_ICETARGETLOWERING_H
@@ -28,12 +29,12 @@
 
 namespace Ice {
 
-// LoweringContext makes it easy to iterate through non-deleted
-// instructions in a node, and insert new (lowered) instructions at
-// the current point.  Along with the instruction list container and
-// associated iterators, it holds the current node, which is needed
-// when inserting new instructions in order to track whether variables
-// are used as single-block or multi-block.
+/// LoweringContext makes it easy to iterate through non-deleted
+/// instructions in a node, and insert new (lowered) instructions at
+/// the current point.  Along with the instruction list container and
+/// associated iterators, it holds the current node, which is needed
+/// when inserting new instructions in order to track whether variables
+/// are used as single-block or multi-block.
 class LoweringContext {
   LoweringContext(const LoweringContext &) = delete;
   LoweringContext &operator=(const LoweringContext &) = delete;
@@ -66,24 +67,24 @@
   void setInsertPoint(const InstList::iterator &Position) { Next = Position; }
 
 private:
-  // Node is the argument to Inst::updateVars().
+  /// Node is the argument to Inst::updateVars().
   CfgNode *Node = nullptr;
   Inst *LastInserted = nullptr;
-  // Cur points to the current instruction being considered.  It is
-  // guaranteed to point to a non-deleted instruction, or to be End.
+  /// Cur points to the current instruction being considered.  It is
+  /// guaranteed to point to a non-deleted instruction, or to be End.
   InstList::iterator Cur;
-  // Next doubles as a pointer to the next valid instruction (if any),
-  // and the new-instruction insertion point.  It is also updated for
-  // the caller in case the lowering consumes more than one high-level
-  // instruction.  It is guaranteed to point to a non-deleted
-  // instruction after Cur, or to be End.  TODO: Consider separating
-  // the notion of "next valid instruction" and "new instruction
-  // insertion point", to avoid confusion when previously-deleted
-  // instructions come between the two points.
+  /// Next doubles as a pointer to the next valid instruction (if any),
+  /// and the new-instruction insertion point.  It is also updated for
+  /// the caller in case the lowering consumes more than one high-level
+  /// instruction.  It is guaranteed to point to a non-deleted
+  /// instruction after Cur, or to be End.  TODO: Consider separating
+  /// the notion of "next valid instruction" and "new instruction
+  /// insertion point", to avoid confusion when previously-deleted
+  /// instructions come between the two points.
   InstList::iterator Next;
-  // Begin is a copy of Insts.begin(), used if iterators are moved backward.
+  /// Begin is a copy of Insts.begin(), used if iterators are moved backward.
   InstList::iterator Begin;
-  // End is a copy of Insts.end(), used if Next needs to be advanced.
+  /// End is a copy of Insts.end(), used if Next needs to be advanced.
   InstList::iterator End;
 
   void skipDeleted(InstList::iterator &I) const;
@@ -129,37 +130,37 @@
     Func->setError("Target doesn't specify O2 lowering steps.");
   }
 
-  // Tries to do address mode optimization on a single instruction.
+  /// Tries to do address mode optimization on a single instruction.
   void doAddressOpt();
-  // Randomly insert NOPs.
+  /// Randomly insert NOPs.
   void doNopInsertion();
-  // Lowers a single non-Phi instruction.
+  /// Lowers a single non-Phi instruction.
   void lower();
-  // Does preliminary lowering of the set of Phi instructions in the
-  // current node.  The main intention is to do what's needed to keep
-  // the unlowered Phi instructions consistent with the lowered
-  // non-Phi instructions, e.g. to lower 64-bit operands on a 32-bit
-  // target.
+  /// Does preliminary lowering of the set of Phi instructions in the
+  /// current node.  The main intention is to do what's needed to keep
+  /// the unlowered Phi instructions consistent with the lowered
+  /// non-Phi instructions, e.g. to lower 64-bit operands on a 32-bit
+  /// target.
   virtual void prelowerPhis() {}
-  // Lowers a list of "parallel" assignment instructions representing
-  // a topological sort of the Phi instructions.
+  /// Lowers a list of "parallel" assignment instructions representing
+  /// a topological sort of the Phi instructions.
   virtual void lowerPhiAssignments(CfgNode *Node,
                                    const AssignList &Assignments) = 0;
-  // Tries to do branch optimization on a single instruction.  Returns
-  // true if some optimization was done.
+  /// Tries to do branch optimization on a single instruction.  Returns
+  /// true if some optimization was done.
   virtual bool doBranchOpt(Inst * /*I*/, const CfgNode * /*NextNode*/) {
     return false;
   }
 
   virtual SizeT getNumRegisters() const = 0;
-  // Returns a variable pre-colored to the specified physical
-  // register.  This is generally used to get very direct access to
-  // the register such as in the prolog or epilog or for marking
-  // scratch registers as killed by a call.  If a Type is not
-  // provided, a target-specific default type is used.
+  /// Returns a variable pre-colored to the specified physical
+  /// register.  This is generally used to get very direct access to
+  /// the register such as in the prolog or epilog or for marking
+  /// scratch registers as killed by a call.  If a Type is not
+  /// provided, a target-specific default type is used.
   virtual Variable *getPhysicalRegister(SizeT RegNum,
                                         Type Ty = IceType_void) = 0;
-  // Returns a printable name for the register.
+  /// Returns a printable name for the register.
   virtual IceString getRegName(SizeT RegNum, Type Ty) const = 0;
 
   virtual bool hasFramePointer() const { return false; }
@@ -167,8 +168,8 @@
   virtual size_t typeWidthInBytesOnStack(Type Ty) const = 0;
 
   bool hasComputedFrame() const { return HasComputedFrame; }
-  // Returns true if this function calls a function that has the
-  // "returns twice" attribute.
+  /// Returns true if this function calls a function that has the
+  /// "returns twice" attribute.
   bool callsReturnsTwice() const { return CallsReturnsTwice; }
   void setCallsReturnsTwice(bool RetTwice) { CallsReturnsTwice = RetTwice; }
   int32_t getStackAdjustment() const { return StackAdjustment; }
@@ -196,10 +197,10 @@
       llvm::SmallVectorImpl<int32_t> &Permutation,
       const llvm::SmallBitVector &ExcludeRegisters) const = 0;
 
-  // Save/restore any mutable state for the situation where code
-  // emission needs multiple passes, such as sandboxing or relaxation.
-  // Subclasses may provide their own implementation, but should be
-  // sure to also call the parent class's methods.
+  /// Save/restore any mutable state for the situation where code
+  /// emission needs multiple passes, such as sandboxing or relaxation.
+  /// Subclasses may provide their own implementation, but should be
+  /// sure to also call the parent class's methods.
   virtual void snapshotEmitState() {
     SnapshotStackAdjustment = StackAdjustment;
   }
@@ -219,7 +220,7 @@
   virtual void emit(const ConstantFloat *C) const = 0;
   virtual void emit(const ConstantDouble *C) const = 0;
 
-  // Performs target-specific argument lowering.
+  /// Performs target-specific argument lowering.
   virtual void lowerArguments() = 0;
 
   virtual void initNodeForLowering(CfgNode *) {}
@@ -253,30 +254,30 @@
   virtual void doAddressOptLoad() {}
   virtual void doAddressOptStore() {}
   virtual void randomlyInsertNop(float Probability) = 0;
-  // This gives the target an opportunity to post-process the lowered
-  // expansion before returning.
+  /// This gives the target an opportunity to post-process the lowered
+  /// expansion before returning.
   virtual void postLower() {}
 
-  // Find two-address non-SSA instructions and set the DestNonKillable flag
-  // to keep liveness analysis consistent.
+  /// Find two-address non-SSA instructions and set the DestNonKillable flag
+  /// to keep liveness analysis consistent.
   void inferTwoAddress();
 
-  // Make a pass over the Cfg to determine which variables need stack slots
-  // and place them in a sorted list (SortedSpilledVariables). Among those,
-  // vars, classify the spill variables as local to the basic block vs
-  // global (multi-block) in order to compute the parameters GlobalsSize
-  // and SpillAreaSizeBytes (represents locals or general vars if the
-  // coalescing of locals is disallowed) along with alignments required
-  // for variables in each area. We rely on accurate VMetadata in order to
-  // classify a variable as global vs local (otherwise the variable is
-  // conservatively global). The in-args should be initialized to 0.
-  //
-  // This is only a pre-pass and the actual stack slot assignment is
-  // handled separately.
-  //
-  // There may be target-specific Variable types, which will be handled
-  // by TargetVarHook. If the TargetVarHook returns true, then the variable
-  // is skipped and not considered with the rest of the spilled variables.
+  /// Make a pass over the Cfg to determine which variables need stack slots
+  /// and place them in a sorted list (SortedSpilledVariables). Among those,
+  /// vars, classify the spill variables as local to the basic block vs
+  /// global (multi-block) in order to compute the parameters GlobalsSize
+  /// and SpillAreaSizeBytes (represents locals or general vars if the
+  /// coalescing of locals is disallowed) along with alignments required
+  /// for variables in each area. We rely on accurate VMetadata in order to
+  /// classify a variable as global vs local (otherwise the variable is
+  /// conservatively global). The in-args should be initialized to 0.
+  ///
+  /// This is only a pre-pass and the actual stack slot assignment is
+  /// handled separately.
+  ///
+  /// There may be target-specific Variable types, which will be handled
+  /// by TargetVarHook. If the TargetVarHook returns true, then the variable
+  /// is skipped and not considered with the rest of the spilled variables.
   void getVarStackSlotParams(VarList &SortedSpilledVariables,
                              llvm::SmallBitVector &RegsUsed,
                              size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
@@ -284,9 +285,9 @@
                              uint32_t *LocalsSlotsAlignmentBytes,
                              std::function<bool(Variable *)> TargetVarHook);
 
-  // Calculate the amount of padding needed to align the local and global
-  // areas to the required alignment.  This assumes the globals/locals layout
-  // used by getVarStackSlotParams and assignVarStackSlots.
+  /// Calculate the amount of padding needed to align the local and global
+  /// areas to the required alignment.  This assumes the globals/locals layout
+  /// used by getVarStackSlotParams and assignVarStackSlots.
   void alignStackSpillAreas(uint32_t SpillAreaStartOffset,
                             uint32_t SpillAreaAlignmentBytes,
                             size_t GlobalsSize,
@@ -294,24 +295,24 @@
                             uint32_t *SpillAreaPaddingBytes,
                             uint32_t *LocalsSlotsPaddingBytes);
 
-  // Make a pass through the SortedSpilledVariables and actually assign
-  // stack slots. SpillAreaPaddingBytes takes into account stack alignment
-  // padding. The SpillArea starts after that amount of padding.
-  // This matches the scheme in getVarStackSlotParams, where there may
-  // be a separate multi-block global var spill area and a local var
-  // spill area.
+  /// Make a pass through the SortedSpilledVariables and actually assign
+  /// stack slots. SpillAreaPaddingBytes takes into account stack alignment
+  /// padding. The SpillArea starts after that amount of padding.
+  /// This matches the scheme in getVarStackSlotParams, where there may
+  /// be a separate multi-block global var spill area and a local var
+  /// spill area.
   void assignVarStackSlots(VarList &SortedSpilledVariables,
                            size_t SpillAreaPaddingBytes,
                            size_t SpillAreaSizeBytes,
                            size_t GlobalsAndSubsequentPaddingSize,
                            bool UsesFramePointer);
 
-  // Sort the variables in Source based on required alignment.
-  // The variables with the largest alignment need are placed in the front
-  // of the Dest list.
+  /// Sort the variables in Source based on required alignment.
+  /// The variables with the largest alignment need are placed in the front
+  /// of the Dest list.
   void sortVarsByAlignment(VarList &Dest, const VarList &Source) const;
 
-  // Make a call to an external helper function.
+  /// Make a call to an external helper function.
   InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
                            SizeT MaxSrcs);
 
@@ -325,8 +326,8 @@
   GlobalContext *Ctx;
   bool HasComputedFrame = false;
   bool CallsReturnsTwice = false;
-  // StackAdjustment keeps track of the current stack offset from its
-  // natural location, as arguments are pushed for a function call.
+  /// StackAdjustment keeps track of the current stack offset from its
+  /// natural location, as arguments are pushed for a function call.
   int32_t StackAdjustment = 0;
   SizeT NextLabelNumber = 0;
   LoweringContext Context;
@@ -373,9 +374,9 @@
   int32_t SnapshotStackAdjustment = 0;
 };
 
-// TargetDataLowering is used for "lowering" data including initializers
-// for global variables, and the internal constant pools.  It is separated
-// out from TargetLowering because it does not require a Cfg.
+/// TargetDataLowering is used for "lowering" data including initializers
+/// for global variables, and the internal constant pools.  It is separated
+/// out from TargetLowering because it does not require a Cfg.
 class TargetDataLowering {
   TargetDataLowering() = delete;
   TargetDataLowering(const TargetDataLowering &) = delete;
@@ -393,18 +394,18 @@
   void emitGlobal(const VariableDeclaration &Var,
                   const IceString &SectionSuffix);
 
-  // For now, we assume .long is the right directive for emitting 4 byte
-  // emit global relocations. However, LLVM MIPS usually uses .4byte instead.
-  // Perhaps there is some difference when the location is unaligned.
+  /// For now, we assume .long is the right directive for emitting 4 byte
+  /// emit global relocations. However, LLVM MIPS usually uses .4byte instead.
+  /// Perhaps there is some difference when the location is unaligned.
   static const char *getEmit32Directive() { return ".long"; }
 
   explicit TargetDataLowering(GlobalContext *Ctx) : Ctx(Ctx) {}
   GlobalContext *Ctx;
 };
 
-// TargetHeaderLowering is used to "lower" the header of an output file.
-// It writes out the target-specific header attributes. E.g., for ARM
-// this writes out the build attributes (float ABI, etc.).
+/// TargetHeaderLowering is used to "lower" the header of an output file.
+/// It writes out the target-specific header attributes. E.g., for ARM
+/// this writes out the build attributes (float ABI, etc.).
 class TargetHeaderLowering {
   TargetHeaderLowering() = delete;
   TargetHeaderLowering(const TargetHeaderLowering &) = delete;