camera2: Move ITS tests to CTS verifier.

Bug: 17994909

Change-Id: Ie788b1ae3a6b079e37a3472c46aed3dfdcfffe2c
diff --git a/apps/CameraITS/tests/inprog/scene2/README b/apps/CameraITS/tests/inprog/scene2/README
new file mode 100644
index 0000000..3a0953f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/README
@@ -0,0 +1,8 @@
+Scene 2 requires a camera lab with controlled illuminants, for example
+light sources capable of producing D65, D50, A, TL84, etc. illumination.
+Specific charts may also be required, for example grey cards, color
+checker charts, and resolution charts. The individual tests will specify
+the setup that they require.
+
+If a test requires that the camera be in any particular orientaion, it will
+specify this too. Otherwise, the camara can be in either portrait or lanscape.
diff --git a/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
new file mode 100644
index 0000000..0c96ca7
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
@@ -0,0 +1,94 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.dng
+import its.objects
+import numpy
+import os.path
+
+def main():
+    """Test that the DNG tags are internally self-consistent.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Assumes that illuminant 1 is D65, and illuminant 2 is standard A.
+        # TODO: Generalize DNG tags check for any provided illuminants.
+        illum_code = [21, 17] # D65, A
+        illum_str = ['D65', 'A']
+        ref_str = ['android.sensor.referenceIlluminant%d'%(i) for i in [1,2]]
+        cm_str = ['android.sensor.colorTransform%d'%(i) for i in [1,2]]
+        fm_str = ['android.sensor.forwardMatrix%d'%(i) for i in [1,2]]
+        cal_str = ['android.sensor.calibrationTransform%d'%(i) for i in [1,2]]
+        dng_illum = [its.dng.D65, its.dng.A]
+
+        for i in [0,1]:
+            assert(props[ref_str[i]] == illum_code[i])
+            raw_input("\n[Point camera at grey card under %s and press ENTER]"%(
+                    illum_str[i]))
+
+            cam.do_3a(do_af=False)
+            cap = cam.do_capture(its.objects.auto_capture_request())
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            ccm = its.objects.rational_to_float(
+                    cap["metadata"]["android.colorCorrection.transform"])
+            cal = its.objects.rational_to_float(props[cal_str[i]])
+            print "HAL reported gains:\n", numpy.array(gains)
+            print "HAL reported ccm:\n", numpy.array(ccm).reshape(3,3)
+            print "HAL reported cal:\n", numpy.array(cal).reshape(3,3)
+
+            # Dump the image.
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_%s.jpg" % (NAME, illum_str[i]))
+
+            # Compute the matrices that are expected under this illuminant from
+            # the HAL-reported WB gains, CCM, and calibration matrix.
+            cm, fm = its.dng.compute_cm_fm(dng_illum[i], gains, ccm, cal)
+            asn = its.dng.compute_asn(dng_illum[i], cal, cm)
+            print "Expected ColorMatrix:\n", cm
+            print "Expected ForwardMatrix:\n", fm
+            print "Expected AsShotNeutral:\n", asn
+
+            # Get the matrices that are reported by the HAL for this
+            # illuminant.
+            cm_ref = numpy.array(its.objects.rational_to_float(
+                    props[cm_str[i]])).reshape(3,3)
+            fm_ref = numpy.array(its.objects.rational_to_float(
+                    props[fm_str[i]])).reshape(3,3)
+            asn_ref = numpy.array(its.objects.rational_to_float(
+                    cap['metadata']['android.sensor.neutralColorPoint']))
+            print "Reported ColorMatrix:\n", cm_ref
+            print "Reported ForwardMatrix:\n", fm_ref
+            print "Reported AsShotNeutral:\n", asn_ref
+
+            # The color matrix may be scaled (between the reported and
+            # expected values).
+            cm_scale = cm.mean(1).mean(0) / cm_ref.mean(1).mean(0)
+            print "ColorMatrix scale factor:", cm_scale
+
+            # Compute the deltas between reported and expected.
+            print "Ratios in ColorMatrix:\n", cm / cm_ref
+            print "Deltas in ColorMatrix (after normalizing):\n", cm/cm_scale - cm_ref
+            print "Deltas in ForwardMatrix:\n", fm - fm_ref
+            print "Deltas in AsShotNeutral:\n", asn - asn_ref
+
+            # TODO: Add pass/fail test on DNG matrices.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_3a_remote.py b/apps/CameraITS/tests/inprog/test_3a_remote.py
new file mode 100644
index 0000000..c76ff6d
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_3a_remote.py
@@ -0,0 +1,70 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pprint
+import math
+import numpy
+import matplotlib.pyplot
+import mpl_toolkits.mplot3d
+
+def main():
+    """Run 3A remotely (from this script).
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        w_map = props["android.lens.info.shadingMapSize"]["width"]
+        h_map = props["android.lens.info.shadingMapSize"]["height"]
+
+        # TODO: Test for 3A convergence, and exit this test once converged.
+
+        triggered = False
+        while True:
+            req = its.objects.auto_capture_request()
+            req["android.statistics.lensShadingMapMode"] = 1
+            req['android.control.aePrecaptureTrigger'] = (0 if triggered else 1)
+            req['android.control.afTrigger'] = (0 if triggered else 1)
+            triggered = True
+
+            cap = cam.do_capture(req)
+
+            ae_state = cap["metadata"]["android.control.aeState"]
+            awb_state = cap["metadata"]["android.control.awbState"]
+            af_state = cap["metadata"]["android.control.afState"]
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            transform = cap["metadata"]["android.colorCorrection.transform"]
+            exp_time = cap["metadata"]['android.sensor.exposureTime']
+            lsc_map = cap["metadata"]["android.statistics.lensShadingMap"]
+            foc_dist = cap["metadata"]['android.lens.focusDistance']
+            foc_range = cap["metadata"]['android.lens.focusRange']
+
+            print "States (AE,AWB,AF):", ae_state, awb_state, af_state
+            print "Gains:", gains
+            print "Transform:", [its.objects.rational_to_float(t)
+                                 for t in transform]
+            print "AE region:", cap["metadata"]['android.control.aeRegions']
+            print "AF region:", cap["metadata"]['android.control.afRegions']
+            print "AWB region:", cap["metadata"]['android.control.awbRegions']
+            print "LSC map:", w_map, h_map, lsc_map[:8]
+            print "Focus (dist,range):", foc_dist, foc_range
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_black_level.py b/apps/CameraITS/tests/inprog/test_black_level.py
new file mode 100644
index 0000000..37dab94
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_black_level.py
@@ -0,0 +1,99 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Black level consistence test.
+
+    Test: capture dark frames and check if black level correction is done
+    correctly.
+    1. Black level should be roughly consistent for repeating shots.
+    2. Noise distribution should be roughly centered at black level.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_REPEAT = 3
+    NUM_STEPS = 3
+
+    # Only check the center part where LSC has little effects.
+    R = 200
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    ymodes = []
+    umodes = []
+    vmodes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
+        sensitivities = [sens_range[0] + i*sens_step for i in range(NUM_STEPS)]
+        print "Sensitivities:", sensitivities
+
+        for si, s in enumerate(sensitivities):
+            for rep in xrange(NUM_REPEAT):
+                req = its.objects.manual_capture_request(100, 1*1000*1000)
+                req["android.blackLevel.lock"] = True
+                req["android.sensor.sensitivity"] = s
+                cap = cam.do_capture(req)
+                yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+                w = cap["width"]
+                h = cap["height"]
+
+                # Magnify the noise in saved images to help visualize.
+                its.image.write_image(yimg * 2,
+                                      "%s_s=%05d_y.jpg" % (NAME, s), True)
+                its.image.write_image(numpy.absolute(uimg - 0.5) * 2,
+                                      "%s_s=%05d_u.jpg" % (NAME, s), True)
+
+                yimg = yimg[w/2-R:w/2+R, h/2-R:h/2+R]
+                uimg = uimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                vimg = vimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                yhist,_ = numpy.histogram(yimg*255, 256, (0,256))
+                ymodes.append(numpy.argmax(yhist))
+                uhist,_ = numpy.histogram(uimg*255, 256, (0,256))
+                umodes.append(numpy.argmax(uhist))
+                vhist,_ = numpy.histogram(vimg*255, 256, (0,256))
+                vmodes.append(numpy.argmax(vhist))
+
+                # Take 32 bins from Y, U, and V.
+                # Histograms of U and V are cropped at the center of 128.
+                pylab.plot(range(32), yhist.tolist()[0:32], 'rgb'[si])
+                pylab.plot(range(32), uhist.tolist()[112:144], 'rgb'[si]+'--')
+                pylab.plot(range(32), vhist.tolist()[112:144], 'rgb'[si]+'--')
+
+    pylab.xlabel("DN: Y[0:32], U[112:144], V[112:144]")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    print "Y black levels:", ymodes
+    print "U black levels:", umodes
+    print "V black levels:", vmodes
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_blc_lsc.py b/apps/CameraITS/tests/inprog/test_blc_lsc.py
new file mode 100644
index 0000000..ce120a2
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_blc_lsc.py
@@ -0,0 +1,106 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that BLC and LSC look reasonable.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    r_means_center = []
+    g_means_center = []
+    b_means_center = []
+    r_means_corner = []
+    g_means_corner = []
+    b_means_corner = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        expt_range = props['android.sensor.info.exposureTimeRange']
+
+        # Get AE+AWB lock first, so the auto values in the capture result are
+        # populated properly.
+        r = [[0,0,1,1,1]]
+        ae_sen,ae_exp,awb_gains,awb_transform,_ \
+                = cam.do_3a(r,r,r,do_af=False,get_results=True)
+        print "AE:", ae_sen, ae_exp / 1000000.0
+        print "AWB:", awb_gains, awb_transform
+
+        # Set analog gain (sensitivity) to 800
+        ae_exp = ae_exp * ae_sen / 800
+        ae_sen = 800
+
+        # Capture range of exposures from 1/100x to 4x of AE estimate.
+        exposures = [ae_exp*x/100.0 for x in [1]+range(10,401,40)]
+        exposures = [e for e in exposures
+                     if e >= expt_range[0] and e <= expt_range[1]]
+
+        # Convert the transform back to rational.
+        awb_transform_rat = its.objects.float_to_rational(awb_transform)
+
+        # Linear tonemap
+        tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])
+
+        reqs = []
+        for e in exposures:
+            req = its.objects.manual_capture_request(ae_sen,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = tmap
+            req["android.tonemap.curveGreen"] = tmap
+            req["android.tonemap.curveBlue"] = tmap
+            req["android.colorCorrection.transform"] = awb_transform_rat
+            req["android.colorCorrection.gains"] = awb_gains
+            reqs.append(req)
+
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg"%(NAME, i))
+
+            tile_center = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_center)
+            r_means_center.append(rgb_means[0])
+            g_means_center.append(rgb_means[1])
+            b_means_center.append(rgb_means[2])
+
+            tile_corner = its.image.get_image_patch(img, 0.0, 0.0, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_corner)
+            r_means_corner.append(rgb_means[0])
+            g_means_corner.append(rgb_means[1])
+            b_means_corner.append(rgb_means[2])
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_center, 'r')
+    pylab.plot(exposures, g_means_center, 'g')
+    pylab.plot(exposures, b_means_center, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_center.png" % (NAME))
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_corner, 'r')
+    pylab.plot(exposures, g_means_corner, 'g')
+    pylab.plot(exposures, b_means_corner, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_corner.png" % (NAME))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
new file mode 100644
index 0000000..f3d49be
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
@@ -0,0 +1,93 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 50
+    BURSTS = 5
+    FRAMES = BURST_LEN * BURSTS
+
+    SPREAD_THRESH = 0.03
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at the smallest resolution.
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        w,h = fmt["width"], fmt["height"]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        # Also build a 4D array, which is an array of all RGB images.
+        r_means = []
+        g_means = []
+        b_means = []
+        imgs = numpy.empty([FRAMES,h,w,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN, [fmt])
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.convert_capture_to_rgb_image(cap)
+                tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
+                means = its.image.compute_image_means(tile)
+                r_means.append(means[0])
+                g_means.append(means[1])
+                b_means.append(means[2])
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print spread
+            assert(spread < SPREAD_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
new file mode 100644
index 0000000..a8d1d45
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
@@ -0,0 +1,91 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import numpy
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 6
+    BURSTS = 2
+    FRAMES = BURST_LEN * BURSTS
+
+    DELTA_THRESH = 0.1
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at full resolution.
+        props = cam.get_camera_properties()
+        w,h = its.objects.get_available_output_sizes("yuv", props)[0]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Build a 4D array, which is an array of all RGB images after down-
+        # scaling them by a factor of 4x4.
+        imgs = numpy.empty([FRAMES,h/4,w/4,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN)
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.downscale_image(
+                        its.image.convert_capture_to_rgb_image(cap), 4)
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Compute the deltas of each image from the mean image; this test
+        # passes if none of the deltas are large.
+        print "Computing frame differences"
+        delta_maxes = []
+        for i in range(FRAMES):
+            deltas = (imgs[i] - img_mean).reshape(h*w*3/16)
+            delta_max_pos = numpy.max(deltas)
+            delta_max_neg = numpy.min(deltas)
+            delta_maxes.append(max(abs(delta_max_pos), abs(delta_max_neg)))
+        max_delta_max = max(delta_maxes)
+        print "Frame %d has largest diff %f" % (
+                delta_maxes.index(max_delta_max), max_delta_max)
+        assert(max_delta_max < DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_crop_region.py b/apps/CameraITS/tests/inprog/test_crop_region.py
new file mode 100644
index 0000000..396603f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_crop_region.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import its.image
+import its.device
+import its.objects
+
+
+def main():
+    """Takes shots with different sensor crop regions.
+    """
+    name = os.path.basename(__file__).split(".")[0]
+
+    # Regions specified here in x,y,w,h normalized form.
+    regions = [[0.0, 0.0, 0.5, 0.5], # top left
+               [0.0, 0.5, 0.5, 0.5], # bottom left
+               [0.1, 0.9, 0.5, 1.0]] # right side (top + bottom)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        r = props['android.sensor.info.pixelArraySize']
+        w = r['width']
+        h = r['height']
+
+        # Capture a full frame first.
+        reqs = [its.objects.auto_capture_request()]
+        print "Capturing img0 with the full sensor region"
+
+        # Capture a frame for each of the regions.
+        for i,region in enumerate(regions):
+            req = its.objects.auto_capture_request()
+            req['android.scaler.cropRegion'] = {
+                    "left": int(region[0] * w),
+                    "top": int(region[1] * h),
+                    "right": int((region[0]+region[2])*w),
+                    "bottom": int((region[1]+region[3])*h)}
+            reqs.append(req)
+            crop = req['android.scaler.cropRegion']
+            print "Capturing img%d with crop: %d,%d %dx%d"%(i+1,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+        cam.do_3a()
+        caps = cam.do_capture(reqs)
+
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            crop = cap["metadata"]['android.scaler.cropRegion']
+            its.image.write_image(img, "%s_img%d.jpg"%(name,i))
+            print "Captured img%d with crop: %d,%d %dx%d"%(i,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_ev_compensation.py b/apps/CameraITS/tests/inprog/test_ev_compensation.py
new file mode 100644
index 0000000..f9b0cd3
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_ev_compensation.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Tests that EV compensation is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    MAX_LUMA_DELTA_THRESH = 0.01
+    AVG_LUMA_DELTA_THRESH = 0.001
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        cam.do_3a()
+
+        # Capture auto shots, but with a linear tonemap.
+        req = its.objects.auto_capture_request()
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveGreen"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveBlue"] = (0.0, 0.0, 1.0, 1.0)
+
+        evs = range(-4,5)
+        lumas = []
+        for ev in evs:
+            req['android.control.aeExposureCompensation'] = ev
+            cap = cam.do_capture(req)
+            y = its.image.convert_capture_to_planes(cap)[0]
+            tile = its.image.get_image_patch(y, 0.45,0.45,0.1,0.1)
+            lumas.append(its.image.compute_image_means(tile)[0])
+
+        ev_step_size_in_stops = its.objects.rational_to_float(
+                props['android.control.aeCompensationStep'])
+        luma_increase_per_step = pow(2, ev_step_size_in_stops)
+        expected_lumas = [lumas[0] * pow(luma_increase_per_step, i) \
+                for i in range(len(evs))]
+
+        pylab.plot(evs, lumas, 'r')
+        pylab.plot(evs, expected_lumas, 'b')
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        luma_diffs = [expected_lumas[i] - lumas[i] for i in range(len(evs))]
+        max_diff = max(luma_diffs)
+        avg_diff = sum(luma_diffs) / len(luma_diffs)
+        print "Max delta between modeled and measured lumas:", max_diff
+        print "Avg delta between modeled and measured lumas:", avg_diff
+        assert(max_diff < MAX_LUMA_DELTA_THRESH)
+        assert(avg_diff < AVG_LUMA_DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_faces.py b/apps/CameraITS/tests/inprog/test_faces.py
new file mode 100644
index 0000000..228dac8
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_faces.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test face detection.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        cam.do_3a()
+        req = its.objects.auto_capture_request()
+        req['android.statistics.faceDetectMode'] = 2
+        caps = cam.do_capture([req]*5)
+        for i,cap in enumerate(caps):
+            md = cap['metadata']
+            print "Frame %d face metadata:" % i
+            print "  Ids:", md['android.statistics.faceIds']
+            print "  Landmarks:", md['android.statistics.faceLandmarks']
+            print "  Rectangles:", md['android.statistics.faceRectangles']
+            print "  Scores:", md['android.statistics.faceScores']
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_black_level_lock.py b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
new file mode 100644
index 0000000..7d0be92
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
@@ -0,0 +1,76 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Test that when the black level is locked, it doesn't change.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter and checks if the black level changes.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_STEPS = 5
+
+    req = {
+        "android.blackLevel.lock": True,
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 10*1000*1000
+        }
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    modes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sensitivities = range(sens_range[0],
+                              sens_range[1]+1,
+                              int((sens_range[1] - sens_range[0]) / NUM_STEPS))
+        for si, s in enumerate(sensitivities):
+            req["android.sensor.sensitivity"] = s
+            cap = cam.do_capture(req)
+            yimg,_,_ = its.image.convert_capture_to_planes(cap)
+            hist,_ = numpy.histogram(yimg*255, 256, (0,256))
+            modes.append(numpy.argmax(hist))
+
+            # Add this histogram to a plot; solid for shots without BL
+            # lock, dashes for shots with BL lock
+            pylab.plot(range(16), hist.tolist()[:16])
+
+    pylab.xlabel("Luma DN, showing [0:16] out of full [0:256] range")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    # Check that the black levels are all the same.
+    print "Black levels:", modes
+    assert(all([modes[i] == modes[0] for i in range(len(modes))]))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_edge_mode.py b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
new file mode 100644
index 0000000..e928f21
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
@@ -0,0 +1,48 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.edge.mode parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    req = {
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 30*1000*1000,
+        "android.sensor.sensitivity": 100
+        }
+
+    with its.device.ItsSession() as cam:
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        for e in [0,1,2]:
+            req["android.edge.mode"] = e
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, e))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_test_patterns.py b/apps/CameraITS/tests/inprog/test_test_patterns.py
new file mode 100644
index 0000000..f75b141
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_test_patterns.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test sensor test patterns.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        caps = []
+        for i in range(1,6):
+            req = its.objects.manual_capture_request(100, 10*1000*1000)
+            req['android.sensor.testPatternData'] = [40, 100, 160, 220]
+            req['android.sensor.testPatternMode'] = i
+
+            # Capture the shot twice, and use the second one, so the pattern
+            # will have stabilized.
+            caps = cam.do_capture([req]*2)
+
+            img = its.image.convert_capture_to_rgb_image(caps[1])
+            its.image.write_image(img, "%s_pattern=%d.jpg" % (NAME, i))
+
+if __name__ == '__main__':
+    main()
+