camera2: Move ITS tests to CTS verifier.

Bug: 17994909

Change-Id: Ie788b1ae3a6b079e37a3472c46aed3dfdcfffe2c
diff --git a/apps/CameraITS/tests/inprog/scene2/README b/apps/CameraITS/tests/inprog/scene2/README
new file mode 100644
index 0000000..3a0953f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/README
@@ -0,0 +1,8 @@
+Scene 2 requires a camera lab with controlled illuminants, for example
+light sources capable of producing D65, D50, A, TL84, etc. illumination.
+Specific charts may also be required, for example grey cards, color
+checker charts, and resolution charts. The individual tests will specify
+the setup that they require.
+
+If a test requires that the camera be in any particular orientaion, it will
+specify this too. Otherwise, the camara can be in either portrait or lanscape.
diff --git a/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
new file mode 100644
index 0000000..0c96ca7
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
@@ -0,0 +1,94 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.dng
+import its.objects
+import numpy
+import os.path
+
+def main():
+    """Test that the DNG tags are internally self-consistent.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Assumes that illuminant 1 is D65, and illuminant 2 is standard A.
+        # TODO: Generalize DNG tags check for any provided illuminants.
+        illum_code = [21, 17] # D65, A
+        illum_str = ['D65', 'A']
+        ref_str = ['android.sensor.referenceIlluminant%d'%(i) for i in [1,2]]
+        cm_str = ['android.sensor.colorTransform%d'%(i) for i in [1,2]]
+        fm_str = ['android.sensor.forwardMatrix%d'%(i) for i in [1,2]]
+        cal_str = ['android.sensor.calibrationTransform%d'%(i) for i in [1,2]]
+        dng_illum = [its.dng.D65, its.dng.A]
+
+        for i in [0,1]:
+            assert(props[ref_str[i]] == illum_code[i])
+            raw_input("\n[Point camera at grey card under %s and press ENTER]"%(
+                    illum_str[i]))
+
+            cam.do_3a(do_af=False)
+            cap = cam.do_capture(its.objects.auto_capture_request())
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            ccm = its.objects.rational_to_float(
+                    cap["metadata"]["android.colorCorrection.transform"])
+            cal = its.objects.rational_to_float(props[cal_str[i]])
+            print "HAL reported gains:\n", numpy.array(gains)
+            print "HAL reported ccm:\n", numpy.array(ccm).reshape(3,3)
+            print "HAL reported cal:\n", numpy.array(cal).reshape(3,3)
+
+            # Dump the image.
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_%s.jpg" % (NAME, illum_str[i]))
+
+            # Compute the matrices that are expected under this illuminant from
+            # the HAL-reported WB gains, CCM, and calibration matrix.
+            cm, fm = its.dng.compute_cm_fm(dng_illum[i], gains, ccm, cal)
+            asn = its.dng.compute_asn(dng_illum[i], cal, cm)
+            print "Expected ColorMatrix:\n", cm
+            print "Expected ForwardMatrix:\n", fm
+            print "Expected AsShotNeutral:\n", asn
+
+            # Get the matrices that are reported by the HAL for this
+            # illuminant.
+            cm_ref = numpy.array(its.objects.rational_to_float(
+                    props[cm_str[i]])).reshape(3,3)
+            fm_ref = numpy.array(its.objects.rational_to_float(
+                    props[fm_str[i]])).reshape(3,3)
+            asn_ref = numpy.array(its.objects.rational_to_float(
+                    cap['metadata']['android.sensor.neutralColorPoint']))
+            print "Reported ColorMatrix:\n", cm_ref
+            print "Reported ForwardMatrix:\n", fm_ref
+            print "Reported AsShotNeutral:\n", asn_ref
+
+            # The color matrix may be scaled (between the reported and
+            # expected values).
+            cm_scale = cm.mean(1).mean(0) / cm_ref.mean(1).mean(0)
+            print "ColorMatrix scale factor:", cm_scale
+
+            # Compute the deltas between reported and expected.
+            print "Ratios in ColorMatrix:\n", cm / cm_ref
+            print "Deltas in ColorMatrix (after normalizing):\n", cm/cm_scale - cm_ref
+            print "Deltas in ForwardMatrix:\n", fm - fm_ref
+            print "Deltas in AsShotNeutral:\n", asn - asn_ref
+
+            # TODO: Add pass/fail test on DNG matrices.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_3a_remote.py b/apps/CameraITS/tests/inprog/test_3a_remote.py
new file mode 100644
index 0000000..c76ff6d
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_3a_remote.py
@@ -0,0 +1,70 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pprint
+import math
+import numpy
+import matplotlib.pyplot
+import mpl_toolkits.mplot3d
+
+def main():
+    """Run 3A remotely (from this script).
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        w_map = props["android.lens.info.shadingMapSize"]["width"]
+        h_map = props["android.lens.info.shadingMapSize"]["height"]
+
+        # TODO: Test for 3A convergence, and exit this test once converged.
+
+        triggered = False
+        while True:
+            req = its.objects.auto_capture_request()
+            req["android.statistics.lensShadingMapMode"] = 1
+            req['android.control.aePrecaptureTrigger'] = (0 if triggered else 1)
+            req['android.control.afTrigger'] = (0 if triggered else 1)
+            triggered = True
+
+            cap = cam.do_capture(req)
+
+            ae_state = cap["metadata"]["android.control.aeState"]
+            awb_state = cap["metadata"]["android.control.awbState"]
+            af_state = cap["metadata"]["android.control.afState"]
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            transform = cap["metadata"]["android.colorCorrection.transform"]
+            exp_time = cap["metadata"]['android.sensor.exposureTime']
+            lsc_map = cap["metadata"]["android.statistics.lensShadingMap"]
+            foc_dist = cap["metadata"]['android.lens.focusDistance']
+            foc_range = cap["metadata"]['android.lens.focusRange']
+
+            print "States (AE,AWB,AF):", ae_state, awb_state, af_state
+            print "Gains:", gains
+            print "Transform:", [its.objects.rational_to_float(t)
+                                 for t in transform]
+            print "AE region:", cap["metadata"]['android.control.aeRegions']
+            print "AF region:", cap["metadata"]['android.control.afRegions']
+            print "AWB region:", cap["metadata"]['android.control.awbRegions']
+            print "LSC map:", w_map, h_map, lsc_map[:8]
+            print "Focus (dist,range):", foc_dist, foc_range
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_black_level.py b/apps/CameraITS/tests/inprog/test_black_level.py
new file mode 100644
index 0000000..37dab94
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_black_level.py
@@ -0,0 +1,99 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Black level consistence test.
+
+    Test: capture dark frames and check if black level correction is done
+    correctly.
+    1. Black level should be roughly consistent for repeating shots.
+    2. Noise distribution should be roughly centered at black level.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_REPEAT = 3
+    NUM_STEPS = 3
+
+    # Only check the center part where LSC has little effects.
+    R = 200
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    ymodes = []
+    umodes = []
+    vmodes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
+        sensitivities = [sens_range[0] + i*sens_step for i in range(NUM_STEPS)]
+        print "Sensitivities:", sensitivities
+
+        for si, s in enumerate(sensitivities):
+            for rep in xrange(NUM_REPEAT):
+                req = its.objects.manual_capture_request(100, 1*1000*1000)
+                req["android.blackLevel.lock"] = True
+                req["android.sensor.sensitivity"] = s
+                cap = cam.do_capture(req)
+                yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+                w = cap["width"]
+                h = cap["height"]
+
+                # Magnify the noise in saved images to help visualize.
+                its.image.write_image(yimg * 2,
+                                      "%s_s=%05d_y.jpg" % (NAME, s), True)
+                its.image.write_image(numpy.absolute(uimg - 0.5) * 2,
+                                      "%s_s=%05d_u.jpg" % (NAME, s), True)
+
+                yimg = yimg[w/2-R:w/2+R, h/2-R:h/2+R]
+                uimg = uimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                vimg = vimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                yhist,_ = numpy.histogram(yimg*255, 256, (0,256))
+                ymodes.append(numpy.argmax(yhist))
+                uhist,_ = numpy.histogram(uimg*255, 256, (0,256))
+                umodes.append(numpy.argmax(uhist))
+                vhist,_ = numpy.histogram(vimg*255, 256, (0,256))
+                vmodes.append(numpy.argmax(vhist))
+
+                # Take 32 bins from Y, U, and V.
+                # Histograms of U and V are cropped at the center of 128.
+                pylab.plot(range(32), yhist.tolist()[0:32], 'rgb'[si])
+                pylab.plot(range(32), uhist.tolist()[112:144], 'rgb'[si]+'--')
+                pylab.plot(range(32), vhist.tolist()[112:144], 'rgb'[si]+'--')
+
+    pylab.xlabel("DN: Y[0:32], U[112:144], V[112:144]")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    print "Y black levels:", ymodes
+    print "U black levels:", umodes
+    print "V black levels:", vmodes
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_blc_lsc.py b/apps/CameraITS/tests/inprog/test_blc_lsc.py
new file mode 100644
index 0000000..ce120a2
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_blc_lsc.py
@@ -0,0 +1,106 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that BLC and LSC look reasonable.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    r_means_center = []
+    g_means_center = []
+    b_means_center = []
+    r_means_corner = []
+    g_means_corner = []
+    b_means_corner = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        expt_range = props['android.sensor.info.exposureTimeRange']
+
+        # Get AE+AWB lock first, so the auto values in the capture result are
+        # populated properly.
+        r = [[0,0,1,1,1]]
+        ae_sen,ae_exp,awb_gains,awb_transform,_ \
+                = cam.do_3a(r,r,r,do_af=False,get_results=True)
+        print "AE:", ae_sen, ae_exp / 1000000.0
+        print "AWB:", awb_gains, awb_transform
+
+        # Set analog gain (sensitivity) to 800
+        ae_exp = ae_exp * ae_sen / 800
+        ae_sen = 800
+
+        # Capture range of exposures from 1/100x to 4x of AE estimate.
+        exposures = [ae_exp*x/100.0 for x in [1]+range(10,401,40)]
+        exposures = [e for e in exposures
+                     if e >= expt_range[0] and e <= expt_range[1]]
+
+        # Convert the transform back to rational.
+        awb_transform_rat = its.objects.float_to_rational(awb_transform)
+
+        # Linear tonemap
+        tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])
+
+        reqs = []
+        for e in exposures:
+            req = its.objects.manual_capture_request(ae_sen,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = tmap
+            req["android.tonemap.curveGreen"] = tmap
+            req["android.tonemap.curveBlue"] = tmap
+            req["android.colorCorrection.transform"] = awb_transform_rat
+            req["android.colorCorrection.gains"] = awb_gains
+            reqs.append(req)
+
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg"%(NAME, i))
+
+            tile_center = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_center)
+            r_means_center.append(rgb_means[0])
+            g_means_center.append(rgb_means[1])
+            b_means_center.append(rgb_means[2])
+
+            tile_corner = its.image.get_image_patch(img, 0.0, 0.0, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_corner)
+            r_means_corner.append(rgb_means[0])
+            g_means_corner.append(rgb_means[1])
+            b_means_corner.append(rgb_means[2])
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_center, 'r')
+    pylab.plot(exposures, g_means_center, 'g')
+    pylab.plot(exposures, b_means_center, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_center.png" % (NAME))
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_corner, 'r')
+    pylab.plot(exposures, g_means_corner, 'g')
+    pylab.plot(exposures, b_means_corner, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_corner.png" % (NAME))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
new file mode 100644
index 0000000..f3d49be
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
@@ -0,0 +1,93 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 50
+    BURSTS = 5
+    FRAMES = BURST_LEN * BURSTS
+
+    SPREAD_THRESH = 0.03
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at the smallest resolution.
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        w,h = fmt["width"], fmt["height"]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        # Also build a 4D array, which is an array of all RGB images.
+        r_means = []
+        g_means = []
+        b_means = []
+        imgs = numpy.empty([FRAMES,h,w,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN, [fmt])
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.convert_capture_to_rgb_image(cap)
+                tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
+                means = its.image.compute_image_means(tile)
+                r_means.append(means[0])
+                g_means.append(means[1])
+                b_means.append(means[2])
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print spread
+            assert(spread < SPREAD_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
new file mode 100644
index 0000000..a8d1d45
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
@@ -0,0 +1,91 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import numpy
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 6
+    BURSTS = 2
+    FRAMES = BURST_LEN * BURSTS
+
+    DELTA_THRESH = 0.1
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at full resolution.
+        props = cam.get_camera_properties()
+        w,h = its.objects.get_available_output_sizes("yuv", props)[0]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Build a 4D array, which is an array of all RGB images after down-
+        # scaling them by a factor of 4x4.
+        imgs = numpy.empty([FRAMES,h/4,w/4,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN)
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.downscale_image(
+                        its.image.convert_capture_to_rgb_image(cap), 4)
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Compute the deltas of each image from the mean image; this test
+        # passes if none of the deltas are large.
+        print "Computing frame differences"
+        delta_maxes = []
+        for i in range(FRAMES):
+            deltas = (imgs[i] - img_mean).reshape(h*w*3/16)
+            delta_max_pos = numpy.max(deltas)
+            delta_max_neg = numpy.min(deltas)
+            delta_maxes.append(max(abs(delta_max_pos), abs(delta_max_neg)))
+        max_delta_max = max(delta_maxes)
+        print "Frame %d has largest diff %f" % (
+                delta_maxes.index(max_delta_max), max_delta_max)
+        assert(max_delta_max < DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_crop_region.py b/apps/CameraITS/tests/inprog/test_crop_region.py
new file mode 100644
index 0000000..396603f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_crop_region.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import its.image
+import its.device
+import its.objects
+
+
+def main():
+    """Takes shots with different sensor crop regions.
+    """
+    name = os.path.basename(__file__).split(".")[0]
+
+    # Regions specified here in x,y,w,h normalized form.
+    regions = [[0.0, 0.0, 0.5, 0.5], # top left
+               [0.0, 0.5, 0.5, 0.5], # bottom left
+               [0.1, 0.9, 0.5, 1.0]] # right side (top + bottom)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        r = props['android.sensor.info.pixelArraySize']
+        w = r['width']
+        h = r['height']
+
+        # Capture a full frame first.
+        reqs = [its.objects.auto_capture_request()]
+        print "Capturing img0 with the full sensor region"
+
+        # Capture a frame for each of the regions.
+        for i,region in enumerate(regions):
+            req = its.objects.auto_capture_request()
+            req['android.scaler.cropRegion'] = {
+                    "left": int(region[0] * w),
+                    "top": int(region[1] * h),
+                    "right": int((region[0]+region[2])*w),
+                    "bottom": int((region[1]+region[3])*h)}
+            reqs.append(req)
+            crop = req['android.scaler.cropRegion']
+            print "Capturing img%d with crop: %d,%d %dx%d"%(i+1,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+        cam.do_3a()
+        caps = cam.do_capture(reqs)
+
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            crop = cap["metadata"]['android.scaler.cropRegion']
+            its.image.write_image(img, "%s_img%d.jpg"%(name,i))
+            print "Captured img%d with crop: %d,%d %dx%d"%(i,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_ev_compensation.py b/apps/CameraITS/tests/inprog/test_ev_compensation.py
new file mode 100644
index 0000000..f9b0cd3
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_ev_compensation.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Tests that EV compensation is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    MAX_LUMA_DELTA_THRESH = 0.01
+    AVG_LUMA_DELTA_THRESH = 0.001
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        cam.do_3a()
+
+        # Capture auto shots, but with a linear tonemap.
+        req = its.objects.auto_capture_request()
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveGreen"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveBlue"] = (0.0, 0.0, 1.0, 1.0)
+
+        evs = range(-4,5)
+        lumas = []
+        for ev in evs:
+            req['android.control.aeExposureCompensation'] = ev
+            cap = cam.do_capture(req)
+            y = its.image.convert_capture_to_planes(cap)[0]
+            tile = its.image.get_image_patch(y, 0.45,0.45,0.1,0.1)
+            lumas.append(its.image.compute_image_means(tile)[0])
+
+        ev_step_size_in_stops = its.objects.rational_to_float(
+                props['android.control.aeCompensationStep'])
+        luma_increase_per_step = pow(2, ev_step_size_in_stops)
+        expected_lumas = [lumas[0] * pow(luma_increase_per_step, i) \
+                for i in range(len(evs))]
+
+        pylab.plot(evs, lumas, 'r')
+        pylab.plot(evs, expected_lumas, 'b')
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        luma_diffs = [expected_lumas[i] - lumas[i] for i in range(len(evs))]
+        max_diff = max(luma_diffs)
+        avg_diff = sum(luma_diffs) / len(luma_diffs)
+        print "Max delta between modeled and measured lumas:", max_diff
+        print "Avg delta between modeled and measured lumas:", avg_diff
+        assert(max_diff < MAX_LUMA_DELTA_THRESH)
+        assert(avg_diff < AVG_LUMA_DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_faces.py b/apps/CameraITS/tests/inprog/test_faces.py
new file mode 100644
index 0000000..228dac8
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_faces.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test face detection.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        cam.do_3a()
+        req = its.objects.auto_capture_request()
+        req['android.statistics.faceDetectMode'] = 2
+        caps = cam.do_capture([req]*5)
+        for i,cap in enumerate(caps):
+            md = cap['metadata']
+            print "Frame %d face metadata:" % i
+            print "  Ids:", md['android.statistics.faceIds']
+            print "  Landmarks:", md['android.statistics.faceLandmarks']
+            print "  Rectangles:", md['android.statistics.faceRectangles']
+            print "  Scores:", md['android.statistics.faceScores']
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_black_level_lock.py b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
new file mode 100644
index 0000000..7d0be92
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
@@ -0,0 +1,76 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Test that when the black level is locked, it doesn't change.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter and checks if the black level changes.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_STEPS = 5
+
+    req = {
+        "android.blackLevel.lock": True,
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 10*1000*1000
+        }
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    modes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sensitivities = range(sens_range[0],
+                              sens_range[1]+1,
+                              int((sens_range[1] - sens_range[0]) / NUM_STEPS))
+        for si, s in enumerate(sensitivities):
+            req["android.sensor.sensitivity"] = s
+            cap = cam.do_capture(req)
+            yimg,_,_ = its.image.convert_capture_to_planes(cap)
+            hist,_ = numpy.histogram(yimg*255, 256, (0,256))
+            modes.append(numpy.argmax(hist))
+
+            # Add this histogram to a plot; solid for shots without BL
+            # lock, dashes for shots with BL lock
+            pylab.plot(range(16), hist.tolist()[:16])
+
+    pylab.xlabel("Luma DN, showing [0:16] out of full [0:256] range")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    # Check that the black levels are all the same.
+    print "Black levels:", modes
+    assert(all([modes[i] == modes[0] for i in range(len(modes))]))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_edge_mode.py b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
new file mode 100644
index 0000000..e928f21
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
@@ -0,0 +1,48 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.edge.mode parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    req = {
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 30*1000*1000,
+        "android.sensor.sensitivity": 100
+        }
+
+    with its.device.ItsSession() as cam:
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        for e in [0,1,2]:
+            req["android.edge.mode"] = e
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, e))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_test_patterns.py b/apps/CameraITS/tests/inprog/test_test_patterns.py
new file mode 100644
index 0000000..f75b141
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_test_patterns.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test sensor test patterns.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        caps = []
+        for i in range(1,6):
+            req = its.objects.manual_capture_request(100, 10*1000*1000)
+            req['android.sensor.testPatternData'] = [40, 100, 160, 220]
+            req['android.sensor.testPatternMode'] = i
+
+            # Capture the shot twice, and use the second one, so the pattern
+            # will have stabilized.
+            caps = cam.do_capture([req]*2)
+
+            img = its.image.convert_capture_to_rgb_image(caps[1])
+            its.image.write_image(img, "%s_pattern=%d.jpg" % (NAME, i))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/README b/apps/CameraITS/tests/scene0/README
new file mode 100644
index 0000000..50be04a
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/README
@@ -0,0 +1,3 @@
+This scene has no requirements; scene 0 tests don't actually
+look at the image content, and the camera can be pointed at
+any target (or even flat on the desk).
diff --git a/apps/CameraITS/tests/scene0/test_camera_properties.py b/apps/CameraITS/tests/scene0/test_camera_properties.py
new file mode 100644
index 0000000..05fc364
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_camera_properties.py
@@ -0,0 +1,43 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.caps
+import its.device
+import its.objects
+import pprint
+
+def main():
+    """Basic test to query and print out camera properties.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        pprint.pprint(props)
+
+        # Test that a handful of required keys are present.
+        if its.caps.manual_sensor(props):
+            assert(props.has_key('android.sensor.info.sensitivityRange'))
+
+        assert(props.has_key('android.sensor.orientation'))
+        assert(props.has_key('android.scaler.streamConfigurationMap'))
+        assert(props.has_key('android.lens.facing'))
+
+        print "JPG sizes:", its.objects.get_available_output_sizes("jpg", props)
+        print "RAW sizes:", its.objects.get_available_output_sizes("raw", props)
+        print "YUV sizes:", its.objects.get_available_output_sizes("yuv", props)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_capture_result_dump.py b/apps/CameraITS/tests/scene0/test_capture_result_dump.py
new file mode 100644
index 0000000..c8b1f8f
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_capture_result_dump.py
@@ -0,0 +1,42 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.caps
+import its.image
+import its.device
+import its.objects
+import its.target
+import pprint
+
+def main():
+    """Test that a capture result is returned from a manual capture; dump it.
+    """
+
+    with its.device.ItsSession() as cam:
+        # Arbitrary capture request exposure values; image content is not
+        # important for this test, only the metadata.
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        req,fmt = its.objects.get_fastest_manual_capture_settings(props)
+        cap = cam.do_capture(req, fmt)
+        pprint.pprint(cap["metadata"])
+
+        # No pass/fail check; test passes if it completes.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_gyro_bias.py b/apps/CameraITS/tests/scene0/test_gyro_bias.py
new file mode 100644
index 0000000..64a5ff0
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_gyro_bias.py
@@ -0,0 +1,80 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import time
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Test if the gyro has stable output when device is stationary.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Number of samples averaged together, in the plot.
+    N = 20
+
+    # Pass/fail thresholds for gyro drift
+    MEAN_THRESH = 0.01
+    VAR_THRESH = 0.001
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        # Only run test if the appropriate caps are claimed.
+        if not its.caps.sensor_fusion(props):
+            print "Test skipped"
+            return
+
+        print "Collecting gyro events"
+        cam.start_sensor_events()
+        time.sleep(5)
+        gyro_events = cam.get_sensor_events()["gyro"]
+
+    nevents = (len(gyro_events) / N) * N
+    gyro_events = gyro_events[:nevents]
+    times = numpy.array([(e["time"] - gyro_events[0]["time"])/1000000000.0
+                         for e in gyro_events])
+    xs = numpy.array([e["x"] for e in gyro_events])
+    ys = numpy.array([e["y"] for e in gyro_events])
+    zs = numpy.array([e["z"] for e in gyro_events])
+
+    # Group samples into size-N groups and average each together, to get rid
+    # of individual rnadom spikes in the data.
+    times = times[N/2::N]
+    xs = xs.reshape(nevents/N, N).mean(1)
+    ys = ys.reshape(nevents/N, N).mean(1)
+    zs = zs.reshape(nevents/N, N).mean(1)
+
+    pylab.plot(times, xs, 'r', label="x")
+    pylab.plot(times, ys, 'g', label="y")
+    pylab.plot(times, zs, 'b', label="z")
+    pylab.xlabel("Time (seconds)")
+    pylab.ylabel("Gyro readings (mean of %d samples)"%(N))
+    pylab.legend()
+    matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
+
+    for samples in [xs,ys,zs]:
+        assert(samples.mean() < MEAN_THRESH)
+        assert(numpy.var(samples) < VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_jitter.py b/apps/CameraITS/tests/scene0/test_jitter.py
new file mode 100644
index 0000000..29b3047
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_jitter.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Measure jitter in camera timestamps.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Pass/fail thresholds
+    MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames
+    MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas
+    MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        req, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        caps = cam.do_capture([req]*50, [fmt])
+
+        # Print out the millisecond delta between the start of each exposure
+        tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]
+        deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]
+        deltas_ms = [d/1000000.0 for d in deltas]
+        avg = sum(deltas_ms) / len(deltas_ms)
+        var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg
+        range0 = min(deltas_ms) - avg
+        range1 = max(deltas_ms) - avg
+        print "Average:", avg
+        print "Variance:", var
+        print "Jitter range:", range0, "to", range1
+
+        # Draw a plot.
+        pylab.plot(range(len(deltas_ms)), deltas_ms)
+        matplotlib.pyplot.savefig("%s_deltas.png" % (NAME))
+
+        # Test for pass/fail.
+        assert(avg > MIN_AVG_FRAME_DELTA)
+        assert(var < MAX_VAR_FRAME_DELTA)
+        assert(abs(range0) < MAX_FRAME_DELTA_JITTER)
+        assert(abs(range1) < MAX_FRAME_DELTA_JITTER)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_metadata.py b/apps/CameraITS/tests/scene0/test_metadata.py
new file mode 100644
index 0000000..b4ca4cb
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_metadata.py
@@ -0,0 +1,98 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import its.target
+import its.caps
+
+def main():
+    """Test the validity of some metadata entries.
+
+    Looks at capture results and at the camera characteristics objects.
+    """
+    global md, props, failed
+
+    with its.device.ItsSession() as cam:
+        # Arbitrary capture request exposure values; image content is not
+        # important for this test, only the metadata.
+        props = cam.get_camera_properties()
+        auto_req = its.objects.auto_capture_request()
+        cap = cam.do_capture(auto_req)
+        md = cap["metadata"]
+
+    print "Hardware level"
+    print "  Legacy:", its.caps.legacy(props)
+    print "  Limited:", its.caps.limited(props)
+    print "  Full:", its.caps.full(props)
+    print "Capabilities"
+    print "  Manual sensor:", its.caps.manual_sensor(props)
+    print "  Manual post-proc:", its.caps.manual_post_proc(props)
+    print "  Raw:", its.caps.raw(props)
+    print "  Sensor fusion:", its.caps.sensor_fusion(props)
+
+    # Test: hardware level should be a valid value.
+    check('props.has_key("android.info.supportedHardwareLevel")')
+    check('props["android.info.supportedHardwareLevel"] is not None')
+    check('props["android.info.supportedHardwareLevel"] in [0,1,2]')
+    full = getval('props["android.info.supportedHardwareLevel"]') == 1
+
+    # Test: rollingShutterSkew, and frameDuration tags must all be present,
+    # and rollingShutterSkew must be greater than zero and smaller than all
+    # of the possible frame durations.
+    check('md.has_key("android.sensor.frameDuration")')
+    check('md["android.sensor.frameDuration"] is not None')
+    check('md.has_key("android.sensor.rollingShutterSkew")')
+    check('md["android.sensor.rollingShutterSkew"] is not None')
+    check('md["android.sensor.frameDuration"] > '
+          'md["android.sensor.rollingShutterSkew"] > 0')
+
+    # Test: timestampSource must be a valid value.
+    check('props.has_key("android.sensor.info.timestampSource")')
+    check('props["android.sensor.info.timestampSource"] is not None')
+    check('props["android.sensor.info.timestampSource"] in [0,1]')
+
+    # Test: croppingType must be a valid value, and for full devices, it
+    # must be FREEFORM=1.
+    check('props.has_key("android.scaler.croppingType")')
+    check('props["android.scaler.croppingType"] is not None')
+    check('props["android.scaler.croppingType"] in [0,1]')
+    if full:
+        check('props["android.scaler.croppingType"] == 1')
+
+    assert(not failed)
+
+def getval(expr, default=None):
+    try:
+        return eval(expr)
+    except:
+        return default
+
+failed = False
+def check(expr):
+    global md, props, failed
+    try:
+        if eval(expr):
+            print "Passed>", expr
+        else:
+            print "Failed>>", expr
+            failed = True
+    except:
+        print "Failed>>", expr
+        failed = True
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
new file mode 100644
index 0000000..eb9a3c1
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
@@ -0,0 +1,49 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+
+def main():
+    """Test that the android.sensor.sensitivity parameter is applied properly
+    within a burst. Inspects the output metadata only (not the image data).
+    """
+
+    NUM_STEPS = 3
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / NUM_STEPS
+        sens_list = range(sens_range[0], sens_range[1], sens_step)
+        e = min(props['android.sensor.info.exposureTimeRange'])
+        reqs = [its.objects.manual_capture_request(s,e) for s in sens_list]
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+
+        caps = cam.do_capture(reqs, fmt)
+        for i,cap in enumerate(caps):
+            s_req = sens_list[i]
+            s_res = cap["metadata"]["android.sensor.sensitivity"]
+            assert(s_req == s_res)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_sensor_events.py b/apps/CameraITS/tests/scene0/test_sensor_events.py
new file mode 100644
index 0000000..61f0383
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_sensor_events.py
@@ -0,0 +1,44 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import time
+
+def main():
+    """Basic test to query and print out sensor events.
+
+    Test will only work if the screen is on (i.e.) the device isn't in standby.
+    Pass if some of each event are received.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        # Only run test if the appropriate caps are claimed.
+        if not its.caps.sensor_fusion(props):
+            print "Test skipped"
+            return
+
+        cam.start_sensor_events()
+        time.sleep(1)
+        events = cam.get_sensor_events()
+        print "Events over 1s: %d gyro, %d accel, %d mag"%(
+                len(events["gyro"]), len(events["accel"]), len(events["mag"]))
+        assert(len(events["gyro"]) > 0)
+        assert(len(events["accel"]) > 0)
+        assert(len(events["mag"]) > 0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_unified_timestamps.py b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
new file mode 100644
index 0000000..cdc9567
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.objects
+import its.caps
+import time
+
+def main():
+    """Test if image and motion sensor events are in the same time domain.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Only run test if the appropriate caps are claimed.
+        if not its.caps.sensor_fusion(props):
+            print "Test skipped"
+            return
+
+        # Get the timestamp of a captured image.
+        req, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        cap = cam.do_capture(req, fmt)
+        ts_image0 = cap['metadata']['android.sensor.timestamp']
+
+        # Get the timestamps of motion events.
+        print "Reading sensor measurements"
+        cam.start_sensor_events()
+        time.sleep(0.5)
+        events = cam.get_sensor_events()
+        assert(len(events["gyro"]) > 0)
+        assert(len(events["accel"]) > 0)
+        assert(len(events["mag"]) > 0)
+        ts_gyro0 = events["gyro"][0]["time"]
+        ts_gyro1 = events["gyro"][-1]["time"]
+        ts_accel0 = events["accel"][0]["time"]
+        ts_accel1 = events["accel"][-1]["time"]
+        ts_mag0 = events["mag"][0]["time"]
+        ts_mag1 = events["mag"][-1]["time"]
+
+        # Get the timestamp of another image.
+        cap = cam.do_capture(req, fmt)
+        ts_image1 = cap['metadata']['android.sensor.timestamp']
+
+        print "Image timestamps:", ts_image0, ts_image1
+        print "Gyro timestamps:", ts_gyro0, ts_gyro1
+        print "Accel timestamps:", ts_accel0, ts_accel1
+        print "Mag timestamps:", ts_mag0, ts_mag1
+
+        # The motion timestamps must be between the two image timestamps.
+        assert ts_image0 < min(ts_gyro0, ts_accel0, ts_mag0) < ts_image1
+        assert ts_image0 < max(ts_gyro1, ts_accel1, ts_mag1) < ts_image1
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/README b/apps/CameraITS/tests/scene1/README
new file mode 100755
index 0000000..93543d9
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/README
@@ -0,0 +1,16 @@
+Scene 1 description:
+* Camera on tripod in portrait or landscape orientation
+* Scene mostly filled by grey card, with white background behind grey card
+* Illuminated by simple light source, for example a desk lamp
+* Uniformity of lighting and target positioning need not be precise
+
+This is intended to be a very simple setup that can be recreated on an
+engineer's desk without any large or expensive equipment. The tests for this
+scene in general only look at a patch in the middle of the image (which is
+assumed to be within the bounds of the grey card).
+
+Note that the scene should not be completely uniform; don't have the grey card
+100% fill the field of view and use a high quality uniform light source, for
+example, and don't use a diffuser on top of the camera to simulate a grey
+scene.
+
diff --git a/apps/CameraITS/tests/scene1/test_3a.py b/apps/CameraITS/tests/scene1/test_3a.py
new file mode 100644
index 0000000..b53fc73
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_3a.py
@@ -0,0 +1,42 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+
+def main():
+    """Basic test for bring-up of 3A.
+
+    To pass, 3A must converge. Check that the returned 3A values are legal.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.read_3a(props):
+            print "Test skipped"
+            return
+
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        print "AE: sensitivity %d, exposure %dms" % (sens, exp/1000000)
+        print "AWB: gains", gains, "transform", xform
+        print "AF: distance", focus
+        assert(sens > 0)
+        assert(exp > 0)
+        assert(len(gains) == 4)
+        assert(len(xform) == 9)
+        assert(focus >= 0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py b/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py
new file mode 100644
index 0000000..59b7db1
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.target
+
+def main():
+    """Test the AE state machine when using the precapture trigger.
+    """
+
+    INACTIVE = 0
+    SEARCHING = 1
+    CONVERGED = 2
+    LOCKED = 3
+    FLASHREQUIRED = 4
+    PRECAPTURE = 5
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+
+        # Capture 5 manual requests, with AE disabled, and the last request
+        # has an AE precapture trigger (which should be ignored since AE is
+        # disabled).
+        manual_reqs = []
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        manual_req = its.objects.manual_capture_request(s,e)
+        manual_req['android.control.aeMode'] = 0 # Off
+        manual_reqs += [manual_req]*4
+        precap_req = its.objects.manual_capture_request(s,e)
+        precap_req['android.control.aeMode'] = 0 # Off
+        precap_req['android.control.aePrecaptureTrigger'] = 1 # Start
+        manual_reqs.append(precap_req)
+        caps = cam.do_capture(manual_reqs, fmt)
+        for cap in caps:
+            assert(cap['metadata']['android.control.aeState'] == INACTIVE)
+
+        # Capture an auto request and verify the AE state; no trigger.
+        auto_req = its.objects.auto_capture_request()
+        auto_req['android.control.aeMode'] = 1  # On
+        cap = cam.do_capture(auto_req, fmt)
+        state = cap['metadata']['android.control.aeState']
+        print "AE state after auto request:", state
+        assert(state in [SEARCHING, CONVERGED])
+
+        # Capture with auto request with a precapture trigger.
+        auto_req['android.control.aePrecaptureTrigger'] = 1  # Start
+        cap = cam.do_capture(auto_req, fmt)
+        state = cap['metadata']['android.control.aeState']
+        print "AE state after auto request with precapture trigger:", state
+        assert(state in [SEARCHING, CONVERGED, PRECAPTURE])
+
+        # Capture some more auto requests, and AE should converge.
+        auto_req['android.control.aePrecaptureTrigger'] = 0
+        caps = cam.do_capture([auto_req]*5, fmt)
+        state = caps[-1]['metadata']['android.control.aeState']
+        print "AE state after auto request:", state
+        assert(state == CONVERGED)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
new file mode 100644
index 0000000..a9d5ce4
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
@@ -0,0 +1,95 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import math
+
+def main():
+    """Capture auto and manual shots that should look the same.
+
+    Manual shots taken with just manual WB, and also with manual WB+tonemap.
+
+    In all cases, the general color/look of the shots should be the same,
+    however there can be variations in brightness/contrast due to different
+    "auto" ISP blocks that may be disabled in the manual flows.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.manual_sensor(props) or
+            not its.caps.manual_post_proc(props)):
+            print "Test skipped"
+            return
+
+        # Converge 3A and get the estimates.
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        xform_rat = its.objects.float_to_rational(xform)
+        print "AE sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
+        print "AWB gains", gains
+        print "AWB transform", xform
+        print "AF distance", focus
+
+        # Auto capture.
+        req = its.objects.auto_capture_request()
+        cap_auto = cam.do_capture(req)
+        img_auto = its.image.convert_capture_to_rgb_image(cap_auto)
+        its.image.write_image(img_auto, "%s_auto.jpg" % (NAME))
+        xform_a = its.objects.rational_to_float(
+                cap_auto["metadata"]["android.colorCorrection.transform"])
+        gains_a = cap_auto["metadata"]["android.colorCorrection.gains"]
+        print "Auto gains:", gains_a
+        print "Auto transform:", xform_a
+
+        # Manual capture 1: WB
+        req = its.objects.manual_capture_request(sens, exp)
+        req["android.colorCorrection.transform"] = xform_rat
+        req["android.colorCorrection.gains"] = gains
+        cap_man1 = cam.do_capture(req)
+        img_man1 = its.image.convert_capture_to_rgb_image(cap_man1)
+        its.image.write_image(img_man1, "%s_manual_wb.jpg" % (NAME))
+        xform_m1 = its.objects.rational_to_float(
+                cap_man1["metadata"]["android.colorCorrection.transform"])
+        gains_m1 = cap_man1["metadata"]["android.colorCorrection.gains"]
+        print "Manual wb gains:", gains_m1
+        print "Manual wb transform:", xform_m1
+
+        # Manual capture 2: WB + tonemap
+        gamma = sum([[i/63.0,math.pow(i/63.0,1/2.2)] for i in xrange(64)],[])
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = gamma
+        req["android.tonemap.curveGreen"] = gamma
+        req["android.tonemap.curveBlue"] = gamma
+        cap_man2 = cam.do_capture(req)
+        img_man2 = its.image.convert_capture_to_rgb_image(cap_man2)
+        its.image.write_image(img_man2, "%s_manual_wb_tm.jpg" % (NAME))
+        xform_m2 = its.objects.rational_to_float(
+                cap_man2["metadata"]["android.colorCorrection.transform"])
+        gains_m2 = cap_man2["metadata"]["android.colorCorrection.gains"]
+        print "Manual wb+tm gains:", gains_m2
+        print "Manual wb+tm transform:", xform_m2
+
+        # Check that the WB gains and transform reported in each capture
+        # result match with the original AWB estimate from do_3a.
+        for g,x in [(gains_a,xform_a),(gains_m1,xform_m1),(gains_m2,xform_m2)]:
+            assert(all([abs(xform[i] - x[i]) < 0.05 for i in range(9)]))
+            assert(all([abs(gains[i] - g[i]) < 0.05 for i in range(4)]))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_black_white.py b/apps/CameraITS/tests/scene1/test_black_white.py
new file mode 100644
index 0000000..e471602
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_black_white.py
@@ -0,0 +1,86 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the device will produce full black+white images.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        expt_range = props['android.sensor.info.exposureTimeRange']
+        sens_range = props['android.sensor.info.sensitivityRange']
+
+        # Take a shot with very low ISO and exposure time. Expect it to
+        # be black.
+        print "Black shot: sens = %d, exp time = %.4fms" % (
+                sens_range[0], expt_range[0]/1000000.0)
+        req = its.objects.manual_capture_request(sens_range[0], expt_range[0])
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_black.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        black_means = its.image.compute_image_means(tile)
+        r_means.append(black_means[0])
+        g_means.append(black_means[1])
+        b_means.append(black_means[2])
+        print "Dark pixel means:", black_means
+
+        # Take a shot with very high ISO and exposure time. Expect it to
+        # be white.
+        print "White shot: sens = %d, exp time = %.2fms" % (
+                sens_range[1], expt_range[1]/1000000.0)
+        req = its.objects.manual_capture_request(sens_range[1], expt_range[1])
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_white.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        white_means = its.image.compute_image_means(tile)
+        r_means.append(white_means[0])
+        g_means.append(white_means[1])
+        b_means.append(white_means[2])
+        print "Bright pixel means:", white_means
+
+        # Draw a plot.
+        pylab.plot([0,1], r_means, 'r')
+        pylab.plot([0,1], g_means, 'g')
+        pylab.plot([0,1], b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        for val in black_means:
+            assert(val < 0.025)
+        for val in white_means:
+            assert(val > 0.975)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
new file mode 100644
index 0000000..3858c0c
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
@@ -0,0 +1,86 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import numpy
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts. Uses manual
+    capture settings.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 50
+    BURSTS = 5
+    FRAMES = BURST_LEN * BURSTS
+
+    SPREAD_THRESH = 0.03
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at the smallest resolution.
+        props = cam.get_camera_properties()
+        if not its.caps.manual_sensor(props):
+            print "Test skipped"
+            return
+
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s, e)
+        w,h = fmt["width"], fmt["height"]
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        # Also build a 4D array, which is an array of all RGB images.
+        r_means = []
+        g_means = []
+        b_means = []
+        imgs = numpy.empty([FRAMES,h,w,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN, [fmt])
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.convert_capture_to_rgb_image(cap)
+                tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
+                means = its.image.compute_image_means(tile)
+                r_means.append(means[0])
+                g_means.append(means[1])
+                b_means.append(means[2])
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print spread
+            assert(spread < SPREAD_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_capture_result.py b/apps/CameraITS/tests/scene1/test_capture_result.py
new file mode 100644
index 0000000..304e811
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_capture_result.py
@@ -0,0 +1,214 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+import matplotlib.pyplot
+
+# Required for 3d plot to work
+import mpl_toolkits.mplot3d
+
+def main():
+    """Test that valid data comes back in CaptureResult objects.
+    """
+    global NAME, auto_req, manual_req, w_map, h_map
+    global manual_tonemap, manual_transform, manual_gains, manual_region
+    global manual_exp_time, manual_sensitivity, manual_gains_ok
+
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.manual_sensor(props) or
+            not its.caps.manual_post_proc(props)):
+            print "Test skipped"
+            return
+
+        manual_tonemap = [0,0, 1,1] # Linear
+        manual_transform = its.objects.int_to_rational([1,2,3, 4,5,6, 7,8,9])
+        manual_gains = [1,2,3,4]
+        manual_region = [{"x":8,"y":8,"width":128,"height":128,"weight":1}]
+        manual_exp_time = min(props['android.sensor.info.exposureTimeRange'])
+        manual_sensitivity = min(props['android.sensor.info.sensitivityRange'])
+
+        # The camera HAL may not support different gains for two G channels.
+        manual_gains_ok = [[1,2,3,4],[1,2,2,4],[1,3,3,4]]
+
+        auto_req = its.objects.auto_capture_request()
+        auto_req["android.statistics.lensShadingMapMode"] = 1
+
+        manual_req = {
+            "android.control.mode": 0,
+            "android.control.aeMode": 0,
+            "android.control.awbMode": 0,
+            "android.control.afMode": 0,
+            "android.sensor.frameDuration": 0,
+            "android.sensor.sensitivity": manual_sensitivity,
+            "android.sensor.exposureTime": manual_exp_time,
+            "android.colorCorrection.mode": 0,
+            "android.colorCorrection.transform": manual_transform,
+            "android.colorCorrection.gains": manual_gains,
+            "android.tonemap.mode": 0,
+            "android.tonemap.curveRed": manual_tonemap,
+            "android.tonemap.curveGreen": manual_tonemap,
+            "android.tonemap.curveBlue": manual_tonemap,
+            "android.control.aeRegions": manual_region,
+            "android.control.afRegions": manual_region,
+            "android.control.awbRegions": manual_region,
+            "android.statistics.lensShadingMapMode":1
+            }
+
+        w_map = props["android.lens.info.shadingMapSize"]["width"]
+        h_map = props["android.lens.info.shadingMapSize"]["height"]
+
+        print "Testing auto capture results"
+        lsc_map_auto = test_auto(cam, w_map, h_map)
+        print "Testing manual capture results"
+        test_manual(cam, w_map, h_map, lsc_map_auto)
+        print "Testing auto capture results again"
+        test_auto(cam, w_map, h_map)
+
+# A very loose definition for two floats being close to each other;
+# there may be different interpolation and rounding used to get the
+# two values, and all this test is looking at is whether there is
+# something obviously broken; it's not looking for a perfect match.
+def is_close_float(n1, n2):
+    return abs(n1 - n2) < 0.05
+
+def is_close_rational(n1, n2):
+    return is_close_float(its.objects.rational_to_float(n1),
+                          its.objects.rational_to_float(n2))
+
+def draw_lsc_plot(w_map, h_map, lsc_map, name):
+    for ch in range(4):
+        fig = matplotlib.pyplot.figure()
+        ax = fig.gca(projection='3d')
+        xs = numpy.array([range(w_map)] * h_map).reshape(h_map, w_map)
+        ys = numpy.array([[i]*w_map for i in range(h_map)]).reshape(
+                h_map, w_map)
+        zs = numpy.array(lsc_map[ch::4]).reshape(h_map, w_map)
+        ax.plot_wireframe(xs, ys, zs)
+        matplotlib.pyplot.savefig("%s_plot_lsc_%s_ch%d.png"%(NAME,name,ch))
+
+def test_auto(cam, w_map, h_map):
+    # Get 3A lock first, so the auto values in the capture result are
+    # populated properly.
+    rect = [[0,0,1,1,1]]
+    cam.do_3a(rect, rect, rect, do_af=False)
+
+    cap = cam.do_capture(auto_req)
+    cap_res = cap["metadata"]
+
+    gains = cap_res["android.colorCorrection.gains"]
+    transform = cap_res["android.colorCorrection.transform"]
+    exp_time = cap_res['android.sensor.exposureTime']
+    lsc_map = cap_res["android.statistics.lensShadingMap"]
+    ctrl_mode = cap_res["android.control.mode"]
+
+    print "Control mode:", ctrl_mode
+    print "Gains:", gains
+    print "Transform:", [its.objects.rational_to_float(t)
+                         for t in transform]
+    print "AE region:", cap_res['android.control.aeRegions']
+    print "AF region:", cap_res['android.control.afRegions']
+    print "AWB region:", cap_res['android.control.awbRegions']
+    print "LSC map:", w_map, h_map, lsc_map[:8]
+
+    assert(ctrl_mode == 1)
+
+    # Color correction gain and transform must be valid.
+    assert(len(gains) == 4)
+    assert(len(transform) == 9)
+    assert(all([g > 0 for g in gains]))
+    assert(all([t["denominator"] != 0 for t in transform]))
+
+    # Color correction should not match the manual settings.
+    assert(any([not is_close_float(gains[i], manual_gains[i])
+                for i in xrange(4)]))
+    assert(any([not is_close_rational(transform[i], manual_transform[i])
+                for i in xrange(9)]))
+
+    # Exposure time must be valid.
+    assert(exp_time > 0)
+
+    # Lens shading map must be valid.
+    assert(w_map > 0 and h_map > 0 and w_map * h_map * 4 == len(lsc_map))
+    assert(all([m >= 1 for m in lsc_map]))
+
+    draw_lsc_plot(w_map, h_map, lsc_map, "auto")
+
+    return lsc_map
+
+def test_manual(cam, w_map, h_map, lsc_map_auto):
+    cap = cam.do_capture(manual_req)
+    cap_res = cap["metadata"]
+
+    gains = cap_res["android.colorCorrection.gains"]
+    transform = cap_res["android.colorCorrection.transform"]
+    curves = [cap_res["android.tonemap.curveRed"],
+              cap_res["android.tonemap.curveGreen"],
+              cap_res["android.tonemap.curveBlue"]]
+    exp_time = cap_res['android.sensor.exposureTime']
+    lsc_map = cap_res["android.statistics.lensShadingMap"]
+    ctrl_mode = cap_res["android.control.mode"]
+
+    print "Control mode:", ctrl_mode
+    print "Gains:", gains
+    print "Transform:", [its.objects.rational_to_float(t)
+                         for t in transform]
+    print "Tonemap:", curves[0][1::16]
+    print "AE region:", cap_res['android.control.aeRegions']
+    print "AF region:", cap_res['android.control.afRegions']
+    print "AWB region:", cap_res['android.control.awbRegions']
+    print "LSC map:", w_map, h_map, lsc_map[:8]
+
+    assert(ctrl_mode == 0)
+
+    # Color correction gain and transform must be valid.
+    # Color correction gains and transform should be the same size and
+    # values as the manually set values.
+    assert(len(gains) == 4)
+    assert(len(transform) == 9)
+    assert( all([is_close_float(gains[i], manual_gains_ok[0][i])
+                 for i in xrange(4)]) or
+            all([is_close_float(gains[i], manual_gains_ok[1][i])
+                 for i in xrange(4)]) or
+            all([is_close_float(gains[i], manual_gains_ok[2][i])
+                 for i in xrange(4)]))
+    assert(all([is_close_rational(transform[i], manual_transform[i])
+                for i in xrange(9)]))
+
+    # Tonemap must be valid.
+    # The returned tonemap must be linear.
+    for c in curves:
+        assert(len(c) > 0)
+        assert(all([is_close_float(c[i], c[i+1])
+                    for i in xrange(0,len(c),2)]))
+
+    # Exposure time must be close to the requested exposure time.
+    assert(is_close_float(exp_time/1000000.0, manual_exp_time/1000000.0))
+
+    # Lens shading map must be valid.
+    assert(w_map > 0 and h_map > 0 and w_map * h_map * 4 == len(lsc_map))
+    assert(all([m >= 1 for m in lsc_map]))
+
+    draw_lsc_plot(w_map, h_map, lsc_map, "manual")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_crop_region_raw.py b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
new file mode 100644
index 0000000..94c8e2b
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
@@ -0,0 +1,116 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import numpy
+import os.path
+
+def main():
+    """Test that raw streams are not croppable.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    DIFF_THRESH = 0.05
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.compute_target_exposure(props) or
+            not its.caps.raw16(props)):
+            print "Test skipped"
+            return
+
+        a = props['android.sensor.info.activeArraySize']
+        ax, ay = a["left"], a["top"]
+        aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+        print "Active sensor region: (%d,%d %dx%d)" % (ax, ay, aw, ah)
+
+        # Capture without a crop region.
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s,e, True)
+        cap1_raw, cap1_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        # Capture with a center crop region.
+        req["android.scaler.cropRegion"] = {
+                "top": ay + ah/3,
+                "left": ax + aw/3,
+                "right": ax + 2*aw/3,
+                "bottom": ay + 2*ah/3}
+        cap2_raw, cap2_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        reported_crops = []
+        imgs = {}
+        for s,cap in [("yuv_full",cap1_yuv), ("raw_full",cap1_raw),
+                ("yuv_crop",cap2_yuv), ("raw_crop",cap2_raw)]:
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_%s.jpg" % (NAME, s))
+            r = cap["metadata"]["android.scaler.cropRegion"]
+            x, y = a["left"], a["top"]
+            w, h = a["right"] - a["left"], a["bottom"] - a["top"]
+            reported_crops.append((x,y,w,h))
+            imgs[s] = img
+            print "Crop on %s: (%d,%d %dx%d)" % (s, x,y,w,h)
+
+        # The metadata should report uncropped for all shots (since there is
+        # at least 1 uncropped stream in each case).
+        for (x,y,w,h) in reported_crops:
+            assert((ax,ay,aw,ah) == (x,y,w,h))
+
+        # Also check the image content; 3 of the 4 shots should match.
+        # Note that all the shots are RGB below; the variable names correspond
+        # to what was captured.
+        # Average the images down 4x4 -> 1 prior to comparison to smooth out
+        # noise.
+        # Shrink the YUV images an additional 2x2 -> 1 to account for the size
+        # reduction that the raw images went through in the RGB conversion.
+        imgs2 = {}
+        for s,img in imgs.iteritems():
+            h,w,ch = img.shape
+            m = 4
+            if s in ["yuv_full", "yuv_crop"]:
+                m = 8
+            img = img.reshape(h/m,m,w/m,m,3).mean(3).mean(1).reshape(h/m,w/m,3)
+            imgs2[s] = img
+            print s, img.shape
+
+        # Strip any border pixels from the raw shots (since the raw images may
+        # be larger than the YUV images). Assume a symmetric padded border.
+        xpad = (imgs2["raw_full"].shape[1] - imgs2["yuv_full"].shape[1]) / 2
+        ypad = (imgs2["raw_full"].shape[0] - imgs2["yuv_full"].shape[0]) / 2
+        wyuv = imgs2["yuv_full"].shape[1]
+        hyuv = imgs2["yuv_full"].shape[0]
+        imgs2["raw_full"]=imgs2["raw_full"][ypad:ypad+hyuv:,xpad:xpad+wyuv:,::]
+        imgs2["raw_crop"]=imgs2["raw_crop"][ypad:ypad+hyuv:,xpad:xpad+wyuv:,::]
+        print "Stripping padding before comparison:", xpad, ypad
+
+        for s,img in imgs2.iteritems():
+            its.image.write_image(img, "%s_comp_%s.jpg" % (NAME, s))
+
+        # Compute image diffs.
+        diff_yuv = numpy.fabs((imgs2["yuv_full"] - imgs2["yuv_crop"])).mean()
+        diff_raw = numpy.fabs((imgs2["raw_full"] - imgs2["raw_crop"])).mean()
+        print "YUV diff (crop vs. non-crop):", diff_yuv
+        print "RAW diff (crop vs. non-crop):", diff_raw
+
+        assert(diff_yuv > DIFF_THRESH)
+        assert(diff_raw < DIFF_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_crop_regions.py b/apps/CameraITS/tests/scene1/test_crop_regions.py
new file mode 100644
index 0000000..da0cd0a
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_crop_regions.py
@@ -0,0 +1,106 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import numpy
+
+def main():
+    """Test that crop regions work.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # A list of 5 regions, specified in normalized (x,y,w,h) coords.
+    # The regions correspond to: TL, TR, BL, BR, CENT
+    REGIONS = [(0.0, 0.0, 0.5, 0.5),
+               (0.5, 0.0, 0.5, 0.5),
+               (0.0, 0.5, 0.5, 0.5),
+               (0.5, 0.5, 0.5, 0.5),
+               (0.25, 0.25, 0.5, 0.5)]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        a = props['android.sensor.info.activeArraySize']
+        ax, ay = a["left"], a["top"]
+        aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        print "Active sensor region (%d,%d %dx%d)" % (ax, ay, aw, ah)
+
+        # Uses a 2x digital zoom.
+        assert(props['android.scaler.availableMaxDigitalZoom'] >= 2)
+
+        # Capture a full frame.
+        req = its.objects.manual_capture_request(s,e)
+        cap_full = cam.do_capture(req)
+        img_full = its.image.convert_capture_to_rgb_image(cap_full)
+        its.image.write_image(img_full, "%s_full.jpg" % (NAME))
+        wfull, hfull = cap_full["width"], cap_full["height"]
+
+        # Capture a burst of crop region frames.
+        # Note that each region is 1/2x1/2 of the full frame, and is digitally
+        # zoomed into the full size output image, so must be downscaled (below)
+        # by 2x when compared to a tile of the full image.
+        reqs = []
+        for x,y,w,h in REGIONS:
+            req = its.objects.manual_capture_request(s,e)
+            req["android.scaler.cropRegion"] = {
+                    "top": int(ah * y),
+                    "left": int(aw * x),
+                    "right": int(aw * (x + w)),
+                    "bottom": int(ah * (y + h))}
+            reqs.append(req)
+        caps_regions = cam.do_capture(reqs)
+        match_failed = False
+        for i,cap in enumerate(caps_regions):
+            a = cap["metadata"]["android.scaler.cropRegion"]
+            ax, ay = a["left"], a["top"]
+            aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+
+            # Match this crop image against each of the five regions of
+            # the full image, to find the best match (which should be
+            # the region that corresponds to this crop image).
+            img_crop = its.image.convert_capture_to_rgb_image(cap)
+            img_crop = its.image.downscale_image(img_crop, 2)
+            its.image.write_image(img_crop, "%s_crop%d.jpg" % (NAME, i))
+            min_diff = None
+            min_diff_region = None
+            for j,(x,y,w,h) in enumerate(REGIONS):
+                tile_full = its.image.get_image_patch(img_full, x,y,w,h)
+                wtest = min(tile_full.shape[1], aw)
+                htest = min(tile_full.shape[0], ah)
+                tile_full = tile_full[0:htest:, 0:wtest:, ::]
+                tile_crop = img_crop[0:htest:, 0:wtest:, ::]
+                its.image.write_image(tile_full, "%s_fullregion%d.jpg"%(NAME,j))
+                diff = numpy.fabs(tile_full - tile_crop).mean()
+                if min_diff is None or diff < min_diff:
+                    min_diff = diff
+                    min_diff_region = j
+            if i != min_diff_region:
+                match_failed = True
+            print "Crop image %d (%d,%d %dx%d) best match with region %d"%(
+                    i, ax, ay, aw, ah, min_diff_region)
+
+        assert(not match_failed)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_exposure.py b/apps/CameraITS/tests/scene1/test_exposure.py
new file mode 100644
index 0000000..8676358
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_exposure.py
@@ -0,0 +1,92 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import numpy
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that a constant exposure is seen as ISO and exposure time vary.
+
+    Take a series of shots that have ISO and exposure time chosen to balance
+    each other; result should be the same brightness, but over the sequence
+    the images should get noisier.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_OUTLIER_DIFF = 0.1
+    THRESHOLD_MIN_LEVEL = 0.1
+    THRESHOLD_MAX_LEVEL = 0.9
+    THRESHOLD_MAX_ABS_GRAD = 0.001
+
+    mults = []
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        e,s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        expt_range = props['android.sensor.info.exposureTimeRange']
+        sens_range = props['android.sensor.info.sensitivityRange']
+
+        m = 1
+        while s*m < sens_range[1] and e/m > expt_range[0]:
+            mults.append(m)
+            req = its.objects.manual_capture_request(s*m, e/m)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mult=%02d.jpg" % (NAME, m))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+            m = m + 4
+
+    # Draw a plot.
+    pylab.plot(mults, r_means, 'r')
+    pylab.plot(mults, g_means, 'g')
+    pylab.plot(mults, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Check for linearity. For each R,G,B channel, fit a line y=mx+b, and
+    # assert that the gradient is close to 0 (flat) and that there are no
+    # crazy outliers. Also ensure that the images aren't clamped to 0 or 1
+    # (which would make them look like flat lines).
+    for chan in xrange(3):
+        values = [r_means, g_means, b_means][chan]
+        m, b = numpy.polyfit(mults, values, 1).tolist()
+        print "Channel %d line fit (y = mx+b): m = %f, b = %f" % (chan, m, b)
+        assert(abs(m) < THRESHOLD_MAX_ABS_GRAD)
+        assert(b > THRESHOLD_MIN_LEVEL and b < THRESHOLD_MAX_LEVEL)
+        for v in values:
+            assert(v > THRESHOLD_MIN_LEVEL and v < THRESHOLD_MAX_LEVEL)
+            assert(abs(v - b) < THRESHOLD_MAX_OUTLIER_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_format_combos.py b/apps/CameraITS/tests/scene1/test_format_combos.py
new file mode 100644
index 0000000..a021102
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_format_combos.py
@@ -0,0 +1,126 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.error
+import its.target
+import sys
+import os
+import os.path
+
+# Change this to True, to have the test break at the first failure.
+stop_at_first_failure = False
+
+def main():
+    """Test different combinations of output formats.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        if (not its.caps.compute_target_exposure(props) or
+            not its.caps.raw16(props)):
+            print "Test skipped"
+            return
+
+        successes = []
+        failures = []
+
+        # Two different requests: auto, and manual.
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req_aut = its.objects.auto_capture_request()
+        req_man = its.objects.manual_capture_request(s, e)
+        reqs = [req_aut, # R0
+                req_man] # R1
+
+        # 10 different combos of output formats; some are single surfaces, and
+        # some are multiple surfaces.
+        wyuv,hyuv = its.objects.get_available_output_sizes("yuv", props)[-1]
+        wjpg,hjpg = its.objects.get_available_output_sizes("jpg", props)[-1]
+        fmt_yuv_prev = {"format":"yuv", "width":wyuv, "height":hyuv}
+        fmt_yuv_full = {"format":"yuv"}
+        fmt_jpg_prev = {"format":"jpeg","width":wjpg, "height":hjpg}
+        fmt_jpg_full = {"format":"jpeg"}
+        fmt_raw_full = {"format":"raw"}
+        fmt_combos =[
+                [fmt_yuv_prev],                             # F0
+                [fmt_yuv_full],                             # F1
+                [fmt_jpg_prev],                             # F2
+                [fmt_jpg_full],                             # F3
+                [fmt_raw_full],                             # F4
+                [fmt_yuv_prev, fmt_jpg_prev],               # F5
+                [fmt_yuv_prev, fmt_jpg_full],               # F6
+                [fmt_yuv_prev, fmt_raw_full],               # F7
+                [fmt_yuv_prev, fmt_jpg_prev, fmt_raw_full], # F8
+                [fmt_yuv_prev, fmt_jpg_full, fmt_raw_full]] # F9
+
+        # Two different burst lengths: single frame, and 3 frames.
+        burst_lens = [1, # B0
+                      3] # B1
+
+        # There are 2x10x2=40 different combinations. Run through them all.
+        n = 0
+        for r,req in enumerate(reqs):
+            for f,fmt_combo in enumerate(fmt_combos):
+                for b,burst_len in enumerate(burst_lens):
+                    try:
+                        caps = cam.do_capture([req]*burst_len, fmt_combo)
+                        successes.append((n,r,f,b))
+                        print "==> Success[%02d]: R%d F%d B%d" % (n,r,f,b)
+
+                        # Dump the captures out to jpegs.
+                        if not isinstance(caps, list):
+                            caps = [caps]
+                        elif isinstance(caps[0], list):
+                            caps = sum(caps, [])
+                        for c,cap in enumerate(caps):
+                            img = its.image.convert_capture_to_rgb_image(cap,
+                                    props=props)
+                            its.image.write_image(img,
+                                    "%s_n%02d_r%d_f%d_b%d_c%d.jpg"%(NAME,n,r,f,b,c))
+
+                    except Exception as e:
+                        print e
+                        print "==> Failure[%02d]: R%d F%d B%d" % (n,r,f,b)
+                        failures.append((n,r,f,b))
+                        if stop_at_first_failure:
+                            sys.exit(0)
+                    n += 1
+
+        num_fail = len(failures)
+        num_success = len(successes)
+        num_total = len(reqs)*len(fmt_combos)*len(burst_lens)
+        num_not_run = num_total - num_success - num_fail
+
+        print "\nFailures (%d / %d):" % (num_fail, num_total)
+        for (n,r,f,b) in failures:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        print "\nSuccesses (%d / %d):" % (num_success, num_total)
+        for (n,r,f,b) in successes:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        if num_not_run > 0:
+            print "\nNumber of tests not run: %d / %d" % (num_not_run, num_total)
+        print ""
+
+        # The test passes if all the combinations successfully capture.
+        assert(num_fail == 0)
+        assert(num_success == num_total)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_jpeg.py b/apps/CameraITS/tests/scene1/test_jpeg.py
new file mode 100644
index 0000000..bc2d64e
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_jpeg.py
@@ -0,0 +1,64 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test that converted YUV images and device JPEG images look the same.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.01
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        # YUV
+        size = its.objects.get_available_output_sizes("yuv", props)[0]
+        out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_fmt=yuv.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # JPEG
+        size = its.objects.get_available_output_sizes("jpg", props)[0]
+        out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+        its.image.write_image(img, "%s_fmt=jpg.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_latching.py b/apps/CameraITS/tests/scene1/test_latching.py
new file mode 100644
index 0000000..bef41ac
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_latching.py
@@ -0,0 +1,91 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that settings latch on the right frame.
+
+    Takes a bunch of shots using back-to-back requests, varying the capture
+    request parameters between shots. Checks that the images that come back
+    have the expected properties.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.full(props):
+            print "Test skipped"
+            return
+
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 2.0
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        reqs = [
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            ]
+
+        caps = cam.do_capture(reqs, fmt)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+        # Draw a plot.
+        idxs = range(len(r_means))
+        pylab.plot(idxs, r_means, 'r')
+        pylab.plot(idxs, g_means, 'g')
+        pylab.plot(idxs, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        g_avg = sum(g_means) / len(g_means)
+        g_ratios = [g / g_avg for g in g_means]
+        g_hilo = [g>1.0 for g in g_ratios]
+        assert(g_hilo == [False, False, True, True, False, False, True,
+                          False, True, False, True, False, True, True])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_linearity.py b/apps/CameraITS/tests/scene1/test_linearity.py
new file mode 100644
index 0000000..fed0324
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_linearity.py
@@ -0,0 +1,99 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import numpy
+import math
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that device processing can be inverted to linear pixels.
+
+    Captures a sequence of shots with the device pointed at a uniform
+    target. Attempts to invert all the ISP processing to get back to
+    linear R,G,B pixel data.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    RESIDUAL_THRESHOLD = 0.00005
+
+    # The HAL3.2 spec requires that curves up to 64 control points in length
+    # must be supported.
+    L = 64
+    LM1 = float(L-1)
+
+    gamma_lut = numpy.array(
+            sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
+    inv_gamma_lut = numpy.array(
+            sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        s /= 2
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sensitivities = [s*1.0/3.0, s*2.0/3.0, s, s*4.0/3.0, s*5.0/3.0]
+        sensitivities = [s for s in sensitivities
+                if s > sens_range[0] and s < sens_range[1]]
+
+        req = its.objects.manual_capture_request(0, e)
+        req["android.blackLevel.lock"] = True
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = gamma_lut.tolist()
+        req["android.tonemap.curveGreen"] = gamma_lut.tolist()
+        req["android.tonemap.curveBlue"] = gamma_lut.tolist()
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        for sens in sensitivities:
+            req["android.sensor.sensitivity"] = sens
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_sens=%04d.jpg" % (NAME, sens))
+            img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+        pylab.plot(sensitivities, r_means, 'r')
+        pylab.plot(sensitivities, g_means, 'g')
+        pylab.plot(sensitivities, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        # Check that each plot is actually linear.
+        for means in [r_means, g_means, b_means]:
+            line,residuals,_,_,_  = numpy.polyfit(range(5),means,1,full=True)
+            print "Line: m=%f, b=%f, resid=%f"%(line[0], line[1], residuals[0])
+            assert(residuals[0] < RESIDUAL_THRESHOLD)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_locked_burst.py b/apps/CameraITS/tests/scene1/test_locked_burst.py
new file mode 100644
index 0000000..83cfa25
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_locked_burst.py
@@ -0,0 +1,89 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import numpy
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test 3A lock + YUV burst (using auto settings).
+
+    This is a test that is designed to pass even on limited devices that
+    don't have MANUAL_SENSOR or PER_FRAME_CONTROLS. (They must be able to
+    capture bursts with full res @ full frame rate to pass, however).
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 10
+    SPREAD_THRESH = 0.005
+    FPS_MAX_DIFF = 2.0
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Converge 3A prior to capture.
+        cam.do_3a(do_af=False, lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        r_means = []
+        g_means = []
+        b_means = []
+        caps = cam.do_capture([req]*BURST_LEN)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_frame%d.jpg"%(NAME,i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means = its.image.compute_image_means(tile)
+            r_means.append(means[0])
+            g_means.append(means[1])
+            b_means.append(means[2])
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print "Patch mean spread", spread
+            assert(spread < SPREAD_THRESH)
+
+        # Also ensure that the burst was at full frame rate.
+        fmt_code = 0x23
+        configs = props['android.scaler.streamConfigurationMap']\
+                       ['availableStreamConfigurations']
+        min_duration = None
+        for cfg in configs:
+            if cfg['format'] == fmt_code and cfg['input'] == False and \
+                    cfg['width'] == caps[0]["width"] and \
+                    cfg['height'] == caps[0]["height"]:
+                min_duration = cfg["minFrameDuration"]
+        assert(min_duration is not None)
+        tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]
+        deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]
+        actual_fps = 1.0 / (max(deltas) / 1000000000.0)
+        max_fps = 1.0 / (min_duration / 1000000000.0)
+        print "FPS measured %.1f, max advertized %.1f" %(actual_fps, max_fps)
+        assert(max_fps - FPS_MAX_DIFF <= actual_fps <= max_fps)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_color_correction.py b/apps/CameraITS/tests/scene1/test_param_color_correction.py
new file mode 100644
index 0000000..82f2342
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_color_correction.py
@@ -0,0 +1,105 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.colorCorrection.* params are applied when set.
+
+    Takes shots with different transform and gains values, and tests that
+    they look correspondingly different. The transform and gains are chosen
+    to make the output go redder or bluer.
+
+    Uses a linear tonemap.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_DIFF = 0.1
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        # Baseline request
+        e, s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        req = its.objects.manual_capture_request(s, e, True)
+        req["android.colorCorrection.mode"] = 0
+
+        # Transforms:
+        # 1. Identity
+        # 2. Identity
+        # 3. Boost blue
+        transforms = [its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
+                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
+                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,2])]
+
+        # Gains:
+        # 1. Unit
+        # 2. Boost red
+        # 3. Unit
+        gains = [[1,1,1,1], [2,1,1,1], [1,1,1,1]]
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        # Capture requests:
+        # 1. With unit gains, and identity transform.
+        # 2. With a higher red gain, and identity transform.
+        # 3. With unit gains, and a transform that boosts blue.
+        for i in range(len(transforms)):
+            req["android.colorCorrection.transform"] = transforms[i]
+            req["android.colorCorrection.gains"] = gains[i]
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+            ratios = [rgb_means[0] / rgb_means[1], rgb_means[2] / rgb_means[1]]
+            print "Means = ", rgb_means, "   Ratios =", ratios
+
+        # Draw a plot.
+        domain = range(len(transforms))
+        pylab.plot(domain, r_means, 'r')
+        pylab.plot(domain, g_means, 'g')
+        pylab.plot(domain, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        # Expect G0 == G1 == G2, R0 == 0.5*R1 == R2, B0 == B1 == 0.5*B2
+        # Also need to ensure that the imasge is not clamped to white/black.
+        assert(all(g_means[i] > 0.2 and g_means[i] < 0.8 for i in xrange(3)))
+        assert(abs(g_means[1] - g_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(g_means[2] - g_means[1]) < THRESHOLD_MAX_DIFF)
+        assert(abs(r_means[2] - r_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(r_means[1] - 2.0 * r_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(b_means[1] - b_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(b_means[2] - 2.0 * b_means[0]) < THRESHOLD_MAX_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_exposure_time.py b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
new file mode 100644
index 0000000..390fd3c
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
@@ -0,0 +1,69 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.sensor.exposureTime parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    exp_times = []
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        e,s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        for i,e_mult in enumerate([0.8, 0.9, 1.0, 1.1, 1.2]):
+            req = its.objects.manual_capture_request(s, e * e_mult, True)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_frame%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            exp_times.append(e * e_mult)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+    # Draw a plot.
+    pylab.plot(exp_times, r_means, 'r')
+    pylab.plot(exp_times, g_means, 'g')
+    pylab.plot(exp_times, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Test for pass/fail: check that each shot is brighter than the previous.
+    for means in [r_means, g_means, b_means]:
+        for i in range(len(means)-1):
+            assert(means[i+1] > means[i])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_flash_mode.py b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
new file mode 100644
index 0000000..6d1be4f
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
@@ -0,0 +1,66 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+
+def main():
+    """Test that the android.flash.mode parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        flash_modes_reported = []
+        flash_states_reported = []
+        g_means = []
+
+        # Manually set the exposure to be a little on the dark side, so that
+        # it should be obvious whether the flash fired or not, and use a
+        # linear tonemap.
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 4
+        req = its.objects.manual_capture_request(s, e, True)
+
+        for f in [0,1,2]:
+            req["android.flash.mode"] = f
+            cap = cam.do_capture(req)
+            flash_modes_reported.append(cap["metadata"]["android.flash.mode"])
+            flash_states_reported.append(cap["metadata"]["android.flash.state"])
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, f))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            g_means.append(rgb[1])
+
+        assert(flash_modes_reported == [0,1,2])
+        assert(flash_states_reported[0] not in [3,4])
+        assert(flash_states_reported[1] in [3,4])
+        assert(flash_states_reported[2] in [3,4])
+
+        print "G brightnesses:", g_means
+        assert(g_means[1] > g_means[0])
+        assert(g_means[2] > g_means[0])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_noise_reduction.py b/apps/CameraITS/tests/scene1/test_param_noise_reduction.py
new file mode 100644
index 0000000..618f8a7
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_noise_reduction.py
@@ -0,0 +1,100 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.noiseReduction.mode param is applied when set.
+
+    Capture images with the camera dimly lit. Uses a high analog gain to
+    ensure the captured image is noisy.
+
+    Captures three images, for NR off, "fast", and "high quality".
+    Also captures an image with low gain and NR off, and uses the variance
+    of this as the baseline.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # List of variances for Y,U,V.
+    variances = [[],[],[]]
+
+    # Reference (baseline) variance for each of Y,U,V.
+    ref_variance = []
+
+    nr_modes_reported = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        # NR mode 0 with low gain
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s, e)
+        req["android.noiseReduction.mode"] = 0
+        cap = cam.do_capture(req)
+        its.image.write_image(
+                its.image.convert_capture_to_rgb_image(cap),
+                "%s_low_gain.jpg" % (NAME))
+        planes = its.image.convert_capture_to_planes(cap)
+        for j in range(3):
+            img = planes[j]
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            ref_variance.append(its.image.compute_image_variances(tile)[0])
+        print "Ref variances:", ref_variance
+
+        for i in range(3):
+            # NR modes 0, 1, 2 with high gain
+            e, s = its.target.get_target_exposure_combos(cam)["maxSensitivity"]
+            req = its.objects.manual_capture_request(s, e)
+            req["android.noiseReduction.mode"] = i
+            cap = cam.do_capture(req)
+            nr_modes_reported.append(
+                    cap["metadata"]["android.noiseReduction.mode"])
+            its.image.write_image(
+                    its.image.convert_capture_to_rgb_image(cap),
+                    "%s_high_gain_nr=%d.jpg" % (NAME, i))
+            planes = its.image.convert_capture_to_planes(cap)
+            for j in range(3):
+                img = planes[j]
+                tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+                variance = its.image.compute_image_variances(tile)[0]
+                variances[j].append(variance / ref_variance[j])
+        print "Variances with NR mode [0,1,2]:", variances
+
+    # Draw a plot.
+    for j in range(3):
+        pylab.plot(range(3), variances[j], "rgb"[j])
+    matplotlib.pyplot.savefig("%s_plot_variances.png" % (NAME))
+
+    assert(nr_modes_reported == [0,1,2])
+
+    # Check that the variance of the NR=0 image is higher than for the
+    # NR=1 and NR=2 images.
+    for j in range(3):
+        for i in range(1,3):
+            assert(variances[j][i] < variances[j][0])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_sensitivity.py b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
new file mode 100644
index 0000000..c26e9f9
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
@@ -0,0 +1,74 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.sensor.sensitivity parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_STEPS = 5
+
+    sensitivities = None
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
+        sensitivities = [sens_range[0] + i * sens_step for i in range(NUM_STEPS)]
+
+        for s in sensitivities:
+            req = its.objects.manual_capture_request(s, expt)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_iso=%04d.jpg" % (NAME, s))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+    # Draw a plot.
+    pylab.plot(sensitivities, r_means, 'r')
+    pylab.plot(sensitivities, g_means, 'g')
+    pylab.plot(sensitivities, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Test for pass/fail: check that each shot is brighter than the previous.
+    for means in [r_means, g_means, b_means]:
+        for i in range(len(means)-1):
+            assert(means[i+1] > means[i])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
new file mode 100644
index 0000000..fbd452c
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
@@ -0,0 +1,104 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os
+import os.path
+
+def main():
+    """Test that the android.tonemap.mode param is applied.
+
+    Applies different tonemap curves to each R,G,B channel, and checks
+    that the output images are modified as expected.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_RATIO_MIN_DIFF = 0.1
+    THRESHOLD_DIFF_MAX_DIFF = 0.05
+
+    # The HAL3.2 spec requires that curves up to 64 control points in length
+    # must be supported.
+    L = 32
+    LM1 = float(L-1)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 2
+
+        # Test 1: that the tonemap curves have the expected effect. Take two
+        # shots, with n in [0,1], where each has a linear tonemap, with the
+        # n=1 shot having a steeper gradient. The gradient for each R,G,B
+        # channel increases (i.e.) R[n=1] should be brighter than R[n=0],
+        # and G[n=1] should be brighter than G[n=0] by a larger margin, etc.
+        rgb_means = []
+
+        for n in [0,1]:
+            req = its.objects.manual_capture_request(s,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = (
+                    sum([[i/LM1, min(1.0,(1+0.5*n)*i/LM1)] for i in range(L)], []))
+            req["android.tonemap.curveGreen"] = (
+                    sum([[i/LM1, min(1.0,(1+1.0*n)*i/LM1)] for i in range(L)], []))
+            req["android.tonemap.curveBlue"] = (
+                    sum([[i/LM1, min(1.0,(1+1.5*n)*i/LM1)] for i in range(L)], []))
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_n=%d.jpg" %(NAME, n))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means.append(its.image.compute_image_means(tile))
+
+        rgb_ratios = [rgb_means[1][i] / rgb_means[0][i] for i in xrange(3)]
+        print "Test 1: RGB ratios:", rgb_ratios
+        assert(rgb_ratios[0] + THRESHOLD_RATIO_MIN_DIFF < rgb_ratios[1])
+        assert(rgb_ratios[1] + THRESHOLD_RATIO_MIN_DIFF < rgb_ratios[2])
+
+
+        # Test 2: that the length of the tonemap curve (i.e. number of control
+        # points) doesn't affect the output.
+        rgb_means = []
+
+        for size in [32,64]:
+            m = float(size-1)
+            curve = sum([[i/m, i/m] for i in range(size)], [])
+            req = its.objects.manual_capture_request(s,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = curve
+            req["android.tonemap.curveGreen"] = curve
+            req["android.tonemap.curveBlue"] = curve
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_size=%02d.jpg" %(NAME, size))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means.append(its.image.compute_image_means(tile))
+
+        rgb_diffs = [rgb_means[1][i] - rgb_means[0][i] for i in xrange(3)]
+        print "Test 2: RGB diffs:", rgb_diffs
+        assert(abs(rgb_diffs[0]) < THRESHOLD_DIFF_MAX_DIFF)
+        assert(abs(rgb_diffs[1]) < THRESHOLD_DIFF_MAX_DIFF)
+        assert(abs(rgb_diffs[2]) < THRESHOLD_DIFF_MAX_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
new file mode 100644
index 0000000..bf0e2ea
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
@@ -0,0 +1,86 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.image
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Capture a set of raw images with increasing gains and measure the noise.
+
+    Capture raw-only, in a burst.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Each shot must be 1% noisier (by the variance metric) than the previous
+    # one.
+    VAR_THRESH = 1.01
+
+    NUM_STEPS = 5
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        if not its.caps.raw16(props) or \
+           not its.caps.manual_sensor(props) or \
+           not its.caps.read_3a(props):
+            print "Test skipped"
+            return
+
+        # Expose for the scene with min sensitivity
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_max - sens_min) / NUM_STEPS
+        s_ae,e_ae,_,_,_  = cam.do_3a(get_results=True)
+        s_e_prod = s_ae * e_ae
+
+        reqs = []
+        settings = []
+        for s in range(sens_min, sens_max, sens_step):
+            e = int(s_e_prod / float(s))
+            req = its.objects.manual_capture_request(s, e)
+            reqs.append(req)
+            settings.append((s,e))
+
+        caps = cam.do_capture(reqs, cam.CAP_RAW)
+
+        variances = []
+        for i,cap in enumerate(caps):
+            (s,e) = settings[i]
+
+            # Measure the variance. Each shot should be noisier than the
+            # previous shot (as the gain is increasing).
+            plane = its.image.convert_capture_to_planes(cap, props)[1]
+            tile = its.image.get_image_patch(plane, 0.45,0.45,0.1,0.1)
+            var = its.image.compute_image_variances(tile)[0]
+            variances.append(var)
+
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_s=%05d_var=%f.jpg" % (NAME,s,var))
+            print "s=%d, e=%d, var=%e"%(s,e,var)
+
+        pylab.plot(range(len(variances)), variances)
+        matplotlib.pyplot.savefig("%s_variances.png" % (NAME))
+
+        # Test that each shot is noisier than the previous one.
+        for i in range(len(variances) - 1):
+            assert(variances[i] < variances[i+1] / VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
new file mode 100644
index 0000000..8e36219
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
@@ -0,0 +1,79 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.image
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Capture a set of raw images with increasing gains and measure the noise.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Each shot must be 1% noisier (by the variance metric) than the previous
+    # one.
+    VAR_THRESH = 1.01
+
+    NUM_STEPS = 5
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        if (not its.caps.raw16(props) or
+            not its.caps.manual_sensor(props) or
+            not its.caps.read_3a(props)):
+            print "Test skipped"
+            return
+
+        # Expose for the scene with min sensitivity
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_max - sens_min) / NUM_STEPS
+        s_ae,e_ae,_,_,_  = cam.do_3a(get_results=True)
+        s_e_prod = s_ae * e_ae
+
+        variances = []
+        for s in range(sens_min, sens_max, sens_step):
+
+            e = int(s_e_prod / float(s))
+            req = its.objects.manual_capture_request(s, e)
+
+            # Capture raw+yuv, but only look at the raw.
+            cap,_ = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+            # Measure the variance. Each shot should be noisier than the
+            # previous shot (as the gain is increasing).
+            plane = its.image.convert_capture_to_planes(cap, props)[1]
+            tile = its.image.get_image_patch(plane, 0.45,0.45,0.1,0.1)
+            var = its.image.compute_image_variances(tile)[0]
+            variances.append(var)
+
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_s=%05d_var=%f.jpg" % (NAME,s,var))
+            print "s=%d, e=%d, var=%e"%(s,e,var)
+
+        pylab.plot(range(len(variances)), variances)
+        matplotlib.pyplot.savefig("%s_variances.png" % (NAME))
+
+        # Test that each shot is noisier than the previous one.
+        for i in range(len(variances) - 1):
+            assert(variances[i] < variances[i+1] / VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
new file mode 100644
index 0000000..7af51c5
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+
+def main():
+    """Test a sequence of shots with different tonemap curves.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # There should be 3 identical frames followed by a different set of
+    # 3 identical frames.
+    MAX_SAME_DELTA = 0.01
+    MIN_DIFF_DELTA = 0.10
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.manual_sensor(props) or
+            not its.caps.manual_post_proc(props)):
+            print "Test skipped"
+            return
+
+        sens, exp_time, _,_,_ = cam.do_3a(do_af=False,get_results=True)
+
+        means = []
+
+        # Capture 3 manual shots with a linear tonemap.
+        req = its.objects.manual_capture_request(sens, exp_time, True)
+        for i in [0,1,2]:
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means.append(tile.mean(0).mean(0))
+
+        # Capture 3 manual shots with the default tonemap.
+        req = its.objects.manual_capture_request(sens, exp_time, False)
+        for i in [3,4,5]:
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means.append(tile.mean(0).mean(0))
+
+        # Compute the delta between each consecutive frame pair.
+        deltas = [numpy.max(numpy.fabs(means[i+1]-means[i])) \
+                  for i in range(len(means)-1)]
+        print "Deltas between consecutive frames:", deltas
+
+        assert(all([abs(deltas[i]) < MAX_SAME_DELTA for i in [0,1,3,4]]))
+        assert(abs(deltas[2]) > MIN_DIFF_DELTA)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
new file mode 100644
index 0000000..2367ca2
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
@@ -0,0 +1,85 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test that the reported sizes and formats for image capture work.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.03
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        # Use a manual request with a linear tonemap so that the YUV and JPEG
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        rgbs = []
+
+        for size in its.objects.get_available_output_sizes("yuv", props):
+            out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "yuv")
+            assert(cap["width"] == size[0])
+            assert(cap["height"] == size[1])
+            print "Captured YUV %dx%d" % (cap["width"], cap["height"])
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_yuv_w%d_h%d.jpg"%(
+                    NAME,size[0],size[1]))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            rgbs.append(rgb)
+
+        for size in its.objects.get_available_output_sizes("jpg", props):
+            out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "jpeg")
+            assert(cap["width"] == size[0])
+            assert(cap["height"] == size[1])
+            img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+            its.image.write_image(img, "%s_jpg_w%d_h%d.jpg"%(
+                    NAME,size[0], size[1]))
+            assert(img.shape[0] == size[1])
+            assert(img.shape[1] == size[0])
+            assert(img.shape[2] == 3)
+            print "Captured JPEG %dx%d" % (cap["width"], cap["height"])
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            rgbs.append(rgb)
+
+        max_diff = 0
+        rgb0 = rgbs[0]
+        for rgb1 in rgbs[1:]:
+            rms_diff = math.sqrt(
+                    sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+            max_diff = max(max_diff, rms_diff)
+        print "Max RMS difference:", max_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
new file mode 100644
index 0000000..4924c7b
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
@@ -0,0 +1,49 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test capturing a single frame as both DNG and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.raw(props) or
+            not its.caps.read_3a(props)):
+            print "Test skipped"
+            return
+
+        cam.do_3a()
+
+        req = its.objects.auto_capture_request()
+        cap_dng, cap_yuv = cam.do_capture(req, cam.CAP_DNG_YUV)
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s.jpg" % (NAME))
+
+        with open("%s.dng"%(NAME), "wb") as f:
+            f.write(cap_dng["data"])
+
+        # No specific pass/fail check; test is assumed to have succeeded if
+        # it completes.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
new file mode 100644
index 0000000..15aa17c
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both YUV and JPEG outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.01
+
+    fmt_yuv =  {"format":"yuv"}
+    fmt_jpeg = {"format":"jpeg"}
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if not its.caps.compute_target_exposure(props):
+            print "Test skipped"
+            return
+
+        # Use a manual request with a linear tonemap so that the YUV and JPEG
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_yuv, cap_jpeg = cam.do_capture(req, [fmt_yuv, fmt_jpeg])
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv, True)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        img = its.image.convert_capture_to_rgb_image(cap_jpeg, True)
+        its.image.write_image(img, "%s_jpeg.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
new file mode 100644
index 0000000..7a345c9
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both RAW and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.02
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        if (not its.caps.compute_target_exposure(props) or
+            not its.caps.raw16(props)):
+            print "Test skipped"
+            return
+
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_raw, cap_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
+        # tile appropriately.
+        img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
+        its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
new file mode 100644
index 0000000..15612c5
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
@@ -0,0 +1,65 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both RAW10 and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.02
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        if (not its.caps.compute_target_exposure(props) or
+            not its.caps.raw10(props)):
+            print "Test skipped"
+            return
+
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_raw, cap_yuv = cam.do_capture(req,
+                [{"format":"raw10"}, {"format":"yuv"}])
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
+        # tile appropriately.
+        img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
+        its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/tutorial.py b/apps/CameraITS/tests/tutorial.py
new file mode 100644
index 0000000..1b1999e
--- /dev/null
+++ b/apps/CameraITS/tests/tutorial.py
@@ -0,0 +1,188 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# --------------------------------------------------------------------------- #
+# The Google Python style guide should be used for scripts:                   #
+# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html              #
+# --------------------------------------------------------------------------- #
+
+# The ITS modules that are in the pymodules/its/ directory. To see formatted
+# docs, use the "pydoc" command:
+#
+# > pydoc its.image
+#
+import its.image
+import its.device
+import its.objects
+import its.target
+
+# Standard Python modules.
+import os.path
+import pprint
+import math
+
+# Modules from the numpy, scipy, and matplotlib libraries. These are used for
+# the image processing code, and images are represented as numpy arrays.
+import pylab
+import numpy
+import matplotlib
+import matplotlib.pyplot
+
+# Each script has a "main" function.
+def main():
+
+    # Each script has a string description of what it does. This is the first
+    # entry inside the main function.
+    """Tutorial script to show how to use the ITS infrastructure.
+    """
+
+    # A convention in each script is to use the filename (without the extension)
+    # as the name of the test, when printing results to the screen or dumping
+    # files.
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # The standard way to open a session with a connected camera device. This
+    # creates a cam object which encapsulates the session and which is active
+    # within the scope of the "with" block; when the block exits, the camera
+    # session is closed.
+    with its.device.ItsSession() as cam:
+
+        # Get the static properties of the camera device. Returns a Python
+        # associative array object; print it to the console.
+        props = cam.get_camera_properties()
+        pprint.pprint(props)
+
+        # Grab a YUV frame with manual exposure of sensitivity = 200, exposure
+        # duration = 50ms.
+        req = its.objects.manual_capture_request(200, 50*1000*1000)
+        cap = cam.do_capture(req)
+
+        # Print the properties of the captured frame; width and height are
+        # integers, and the metadata is a Python associative array object.
+        print "Captured image width:", cap["width"]
+        print "Captured image height:", cap["height"]
+        pprint.pprint(cap["metadata"])
+
+        # The captured image is YUV420. Convert to RGB, and save as a file.
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_rgb_1.jpg" % (NAME))
+
+        # Can also get the Y,U,V planes separately; save these to greyscale
+        # files.
+        yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+        its.image.write_image(yimg, "%s_y_plane_1.jpg" % (NAME))
+        its.image.write_image(uimg, "%s_u_plane_1.jpg" % (NAME))
+        its.image.write_image(vimg, "%s_v_plane_1.jpg" % (NAME))
+
+        # Run 3A on the device. In this case, just use the entire image as the
+        # 3A region, and run each of AWB,AE,AF. Can also change the region and
+        # specify independently for each of AE,AWB,AF whether it should run.
+        #
+        # NOTE: This may fail, if the camera isn't pointed at a reasonable
+        # target scene. If it fails, the script will end. The logcat messages
+        # can be inspected to see the status of 3A running on the device.
+        #
+        # > adb logcat -s 'ItsService:v'
+        #
+        # If this keeps on failing, try also rebooting the device before
+        # running the test.
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        print "AE: sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
+        print "AWB: gains", gains, "transform", xform
+        print "AF: distance", focus
+
+        # Grab a new manual frame, using the 3A values, and convert it to RGB
+        # and save it to a file too. Note that the "req" object is just a
+        # Python dictionary that is pre-populated by the its.objets module
+        # functions (in this case a default manual capture), and the key/value
+        # pairs in the object can be used to set any field of the capture
+        # request. Here, the AWB gains and transform (CCM) are being used.
+        # Note that the CCM transform is in a rational format in capture
+        # requests, meaning it is an object with integer numerators and
+        # denominators. The 3A routine returns simple floats instead, however,
+        # so a conversion from float to rational must be performed.
+        req = its.objects.manual_capture_request(sens, exp)
+        xform_rat = its.objects.float_to_rational(xform)
+
+        req["android.colorCorrection.transform"] = xform_rat
+        req["android.colorCorrection.gains"] = gains
+        cap = cam.do_capture(req)
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_rgb_2.jpg" % (NAME))
+
+        # Print out the actual capture request object that was used.
+        pprint.pprint(req)
+
+        # Images are numpy arrays. The dimensions are (h,w,3) when indexing,
+        # in the case of RGB images. Greyscale images are (h,w,1). Pixels are
+        # generally float32 values in the [0,1] range, however some of the
+        # helper functions in its.image deal with the packed YUV420 and other
+        # formats of images that come from the device (and convert them to
+        # float32).
+        # Print the dimensions of the image, and the top-left pixel value,
+        # which is an array of 3 floats.
+        print "RGB image dimensions:", rgbimg.shape
+        print "RGB image top-left pixel:", rgbimg[0,0]
+
+        # Grab a center tile from the image; this returns a new image. Save
+        # this tile image. In this case, the tile is the middle 10% x 10%
+        # rectangle.
+        tile = its.image.get_image_patch(rgbimg, 0.45, 0.45, 0.1, 0.1)
+        its.image.write_image(tile, "%s_rgb_2_tile.jpg" % (NAME))
+
+        # Compute the mean values of the center tile image.
+        rgb_means = its.image.compute_image_means(tile)
+        print "RGB means:", rgb_means
+
+        # Apply a lookup table to the image, and save the new version. The LUT
+        # is basically a tonemap, and can be used to implement a gamma curve.
+        # In this case, the LUT is used to double the value of each pixel.
+        lut = numpy.array([2*i for i in xrange(65536)])
+        rgbimg_lut = its.image.apply_lut_to_image(rgbimg, lut)
+        its.image.write_image(rgbimg_lut, "%s_rgb_2_lut.jpg" % (NAME))
+
+        # Apply a 3x3 matrix to the image, and save the new version. The matrix
+        # is a numpy array, in row major order, and the pixel values are right-
+        # multipled to it (when considered as column vectors). The example
+        # matrix here just boosts the blue channel by 10%.
+        mat = numpy.array([[1, 0, 0  ],
+                           [0, 1, 0  ],
+                           [0, 0, 1.1]])
+        rgbimg_mat = its.image.apply_matrix_to_image(rgbimg, mat)
+        its.image.write_image(rgbimg_mat, "%s_rgb_2_mat.jpg" % (NAME))
+
+        # Compute a histogram of the luma image, in 256 buckeits.
+        yimg,_,_ = its.image.convert_capture_to_planes(cap)
+        hist,_ = numpy.histogram(yimg*255, 256, (0,256))
+
+        # Plot the histogram using matplotlib, and save as a PNG image.
+        pylab.plot(range(256), hist.tolist())
+        pylab.xlabel("Luma DN")
+        pylab.ylabel("Pixel count")
+        pylab.title("Histogram of luma channel of captured image")
+        matplotlib.pyplot.savefig("%s_histogram.png" % (NAME))
+
+        # Capture a frame to be returned as a JPEG. Load it as an RGB image,
+        # then save it back as a JPEG.
+        cap = cam.do_capture(req, cam.CAP_JPEG)
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_jpg.jpg" % (NAME))
+        r,g,b = its.image.convert_capture_to_planes(cap)
+        its.image.write_image(r, "%s_r.jpg" % (NAME))
+
+# This is the standard boilerplate in each test that allows the script to both
+# be executed directly and imported as a module.
+if __name__ == '__main__':
+    main()
+