Instead of passing in an unsigned value for the optimization level, use an enum,
which better identifies what the optimization is doing. And is more flexible for
future uses.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@70440 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/CellSPU/SPU.h b/lib/Target/CellSPU/SPU.h
index 5c62bc3..77a062e 100644
--- a/lib/Target/CellSPU/SPU.h
+++ b/lib/Target/CellSPU/SPU.h
@@ -16,6 +16,7 @@
#define LLVM_TARGET_IBMCELLSPU_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
class SPUTargetMachine;
@@ -25,7 +26,8 @@
FunctionPass *createSPUISelDag(SPUTargetMachine &TM);
FunctionPass *createSPUAsmPrinterPass(raw_ostream &o,
SPUTargetMachine &tm,
- unsigned OptLevel, bool verbose);
+ CodeGenOpt::Level OptLevel,
+ bool verbose);
/*--== Utility functions/predicates/etc used all over the place: --==*/
//! Predicate test for a signed 10-bit value