CameraITS: add full size YUV burst capture test

We used to exercise this in face detection test. Switch to use
VGA size capture for face detection test and add a dedicated
burst capture test.

Bug: 31912588
Change-Id: I5537a77377701648dd351c0195c4496d76ad6c47
diff --git a/apps/CameraITS/tests/scene0/test_burst_capture.py b/apps/CameraITS/tests/scene0/test_burst_capture.py
new file mode 100644
index 0000000..e6ee100
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_burst_capture.py
@@ -0,0 +1,40 @@
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test capture a burst of full size images is fast enough to not timeout.
+       This test verify that entire capture pipeline can keep up the speed
+       of fullsize capture + CPU read for at least some time.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+    NUM_TEST_FRAMES = 20
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        req = its.objects.auto_capture_request()
+        caps = cam.do_capture([req]*NUM_TEST_FRAMES)
+
+        cap = caps[0]
+        img = its.image.convert_capture_to_rgb_image(cap, props=props)
+        img_name = "%s.jpg" % (NAME)
+        its.image.write_image(img, img_name)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene2/test_faces.py b/apps/CameraITS/tests/scene2/test_faces.py
index ba4f317..f42552d 100644
--- a/apps/CameraITS/tests/scene2/test_faces.py
+++ b/apps/CameraITS/tests/scene2/test_faces.py
@@ -25,6 +25,7 @@
     FD_MODE_OFF = 0
     FD_MODE_SIMPLE = 1
     FD_MODE_FULL = 2
+    W, H = 640, 480
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -42,14 +43,12 @@
             assert(FD_MODE_OFF <= fd_mode <= FD_MODE_FULL)
             req = its.objects.auto_capture_request()
             req['android.statistics.faceDetectMode'] = fd_mode
-            caps = cam.do_capture([req]*NUM_TEST_FRAMES)
+            fmt = {"format":"yuv", "width":W, "height":H}
+            caps = cam.do_capture([req]*NUM_TEST_FRAMES, fmt)
             for i,cap in enumerate(caps):
                 md = cap['metadata']
                 assert(md['android.statistics.faceDetectMode'] == fd_mode)
                 faces = md['android.statistics.faces']
-                img = its.image.convert_capture_to_rgb_image(cap, props=props)
-                img_name = "%s_fd_mode_%s.jpg" % (NAME, fd_mode)
-                its.image.write_image(img, img_name)
 
                 # 0 faces should be returned for OFF mode
                 if fd_mode == FD_MODE_OFF:
@@ -58,6 +57,9 @@
                 # Face detection could take several frames to warm up,
                 # but it should detect at least one face in last frame
                 if i == NUM_TEST_FRAMES - 1:
+                    img = its.image.convert_capture_to_rgb_image(cap, props=props)
+                    img_name = "%s_fd_mode_%s.jpg" % (NAME, fd_mode)
+                    its.image.write_image(img, img_name)
                     if len(faces) == 0:
                         print "Error: no face detected in mode", fd_mode
                         assert(0)
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
index b0eaf35..363c9e1 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
@@ -187,13 +187,16 @@
             ByteBuffer buffer = planes[0].getBuffer();
             if (quota != null) {
                 try {
+                    Logt.i(TAG, "Start waiting for quota Semaphore");
                     quota.acquire(buffer.capacity());
+                    Logt.i(TAG, "Acquired quota Semaphore. Start reading image");
                 } catch (java.lang.InterruptedException e) {
                     Logt.e(TAG, "getDataFromImage error acquiring memory quota. Interrupted", e);
                 }
             }
             data = new byte[buffer.capacity()];
             buffer.get(data);
+            Logt.i(TAG, "Done reading jpeg image, format %d");
             return data;
         } else if (format == ImageFormat.YUV_420_888 || format == ImageFormat.RAW_SENSOR
                 || format == ImageFormat.RAW10 || format == ImageFormat.RAW12) {
@@ -201,7 +204,9 @@
             int dataSize = width * height * ImageFormat.getBitsPerPixel(format) / 8;
             if (quota != null) {
                 try {
+                    Logt.i(TAG, "Start waiting for quota Semaphore");
                     quota.acquire(dataSize);
+                    Logt.i(TAG, "Acquired quota Semaphore. Start reading image");
                 } catch (java.lang.InterruptedException e) {
                     Logt.e(TAG, "getDataFromImage error acquiring memory quota. Interrupted", e);
                 }
@@ -220,8 +225,9 @@
                 int pixelStride = planes[i].getPixelStride();
                 int bytesPerPixel = ImageFormat.getBitsPerPixel(format) / 8;
                 Logt.i(TAG, String.format(
-                        "Reading image: fmt %d, plane %d, w %d, h %d, rowStride %d, pixStride %d",
-                        format, i, width, height, rowStride, pixelStride));
+                        "Reading image: fmt %d, plane %d, w %d, h %d," +
+                        "rowStride %d, pixStride %d, bytesPerPixel %d",
+                        format, i, width, height, rowStride, pixelStride, bytesPerPixel));
                 // For multi-planar yuv images, assuming yuv420 with 2x2 chroma subsampling.
                 int w = (i == 0) ? width : width / 2;
                 int h = (i == 0) ? height : height / 2;