In generated shaders, output +INF and -INF as largest single precision floating point number.

C++ streams seem to use the representation 1.$ for INF and that isn't valid syntax in GLSL or HLSL.

Also preserve the sign of INF in constant expressions that divide by zero. I can't figure out what to do about 0/0 because the shader models we are using do not support NaN. Treating it as +INF as before.
Review URL: https://codereview.appspot.com/7057046

git-svn-id: https://angleproject.googlecode.com/svn/branches/dx11proto@1706 736b8ea6-26fd-11df-bfd4-992fa37f6226
diff --git a/src/compiler/OutputGLSLBase.cpp b/src/compiler/OutputGLSLBase.cpp
index 3c3c9de..dbdc1f6 100644
--- a/src/compiler/OutputGLSLBase.cpp
+++ b/src/compiler/OutputGLSLBase.cpp
@@ -7,6 +7,8 @@
 #include "compiler/OutputGLSLBase.h"
 #include "compiler/debug.h"
 
+#include <limits.h>
+
 namespace
 {
 TString arrayBrackets(const TType& type)
@@ -155,7 +157,7 @@
         {
             switch (pConstUnion->getType())
             {
-                case EbtFloat: out << pConstUnion->getFConst(); break;
+                case EbtFloat: out << std::min(FLT_MAX, std::max(-FLT_MAX, pConstUnion->getFConst())); break;
                 case EbtInt: out << pConstUnion->getIConst(); break;
                 case EbtBool: out << pConstUnion->getBConst(); break;
                 default: UNREACHABLE();