Upgrade jsoncpp to version 1.0.0 to match the copy used by Skia's testing tools.

This mirrors the contents of the upstream repository at...
https://github.com/open-source-parsers/jsoncpp/commit/7165f6ac4c482e68475c9e1dac086f9e12fff0d0

Bug: 17997234
Change-Id: I87a8312f9d030e5027f4ca5b1568f8374a856632
diff --git a/test/rununittests.py b/test/rununittests.py
new file mode 100644
index 0000000..6279f80
--- /dev/null
+++ b/test/rununittests.py
@@ -0,0 +1,74 @@
+from __future__ import print_function
+from glob import glob
+import sys
+import os
+import os.path
+import subprocess
+import optparse
+
+VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
+
+class TestProxy(object):
+    def __init__( self, test_exe_path, use_valgrind=False ):
+        self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
+        self.use_valgrind = use_valgrind
+
+    def run( self, options ):
+        if self.use_valgrind:
+            cmd = VALGRIND_CMD.split()
+        else:
+            cmd = []
+        cmd.extend( [self.test_exe_path, '--test-auto'] + options )
+        process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+        stdout = process.communicate()[0]
+        if process.returncode:
+            return False, stdout
+        return True, stdout
+
+def runAllTests( exe_path, use_valgrind=False ):
+    test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
+    status, test_names = test_proxy.run( ['--list-tests'] )
+    if not status:
+        print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
+        return 1
+    test_names = [name.strip() for name in test_names.strip().split('\n')]
+    failures = []
+    for name in test_names:
+        print('TESTING %s:' % name, end=' ')
+        succeed, result = test_proxy.run( ['--test', name] )
+        if succeed:
+            print('OK')
+        else:
+            failures.append( (name, result) )
+            print('FAILED')
+    failed_count = len(failures)
+    pass_count = len(test_names) - failed_count
+    if failed_count:
+        print()
+        for name, result in failures:
+            print(result)
+        print('%d/%d tests passed (%d failure(s))' % (
+            pass_count, len(test_names), failed_count))
+        return 1
+    else:
+        print('All %d tests passed' % len(test_names))
+        return 0
+
+def main():
+    from optparse import OptionParser
+    parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
+    parser.add_option("--valgrind",
+                  action="store_true", dest="valgrind", default=False,
+                  help="run all the tests using valgrind to detect memory leaks")
+    parser.enable_interspersed_args()
+    options, args = parser.parse_args()
+
+    if len(args) != 1:
+        parser.error( 'Must provides at least path to test_lib_json executable.' )
+        sys.exit( 1 )
+
+    exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
+    sys.exit( exit_code )
+
+if __name__ == '__main__':
+    main()