Add CodeGenTypes::GetFunctionType overload for getting the effective
type of a call.

Change NeXT runtime to use this instead of trying to bitcasting
internally (which doesn't respect the ABI).

Fix subtle bug, use of ConvertTypeRecursive instead of ConvertType is
bad inside GetFunctionType.


git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@56050 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index d89dfa6..f9cc4ef 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -18,6 +18,8 @@
 #include "llvm/ADT/SmallSet.h"
 #include <vector>
 
+#include "CGCall.h"
+
 namespace llvm {
   class FunctionType;
   class Module;
@@ -42,7 +44,6 @@
   class Type;
 
 namespace CodeGen {
-  class CGFunctionInfo;
   class CodeGenTypes;
 
   /// CGRecordLayout - This class handles struct and union layout info while 
@@ -143,6 +144,17 @@
 
   /// GetFunctionType - Get the LLVM function type from Info.
   const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
+  /// GetFunctionType - Get the LLVM function type from Info. 
+  /// \param IsVariadic Should the resulting type be variadic?
+  const llvm::FunctionType *GetFunctionType(const CGCallInfo &Info,
+                                            bool IsVariadic);
+
+  /// GetFunctionType - Get the LLVM function type for the given types
+  /// and variadicness.
+  // FIXME: Do we even need IsVariadic here?
+  const llvm::FunctionType *GetFunctionType(ArgTypeIterator begin,
+                                            ArgTypeIterator end,
+                                            bool IsVariadic);
   
   void CollectObjCIvarTypes(ObjCInterfaceDecl *ObjCClass,
                             std::vector<const llvm::Type*> &IvarTypes);