CameraITS: add raw12 test
Bug: 20494692
Change-Id: I53b095a7d7df42253549648102a2008d51f39217
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
index b97091b..e57ff88 100644
--- a/apps/CameraITS/pymodules/its/caps.py
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -133,6 +133,17 @@
"""
return len(its.objects.get_available_output_sizes("raw10", props)) > 0
+def raw12(props):
+ """Returns whether a device supports RAW12 output.
+
+ Args:
+ props: Camera properties object.
+
+ Returns:
+ Boolean.
+ """
+ return len(its.objects.get_available_output_sizes("raw12", props)) > 0
+
def sensor_fusion(props):
"""Returns whether the camera and motion sensor timestamps for the device
are in the same time domain and can be compared directly.
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index 035e70b..e396483 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -368,7 +368,7 @@
The out_surfaces field can specify the width(s), height(s), and
format(s) of the captured image. The formats may be "yuv", "jpeg",
- "dng", "raw", or "raw10". The default is a YUV420 frame ("yuv")
+ "dng", "raw", "raw10", or "raw12". The default is a YUV420 frame ("yuv")
corresponding to a full sensor frame.
Note that one or more surfaces can be specified, allowing a capture to
diff --git a/apps/CameraITS/pymodules/its/image.py b/apps/CameraITS/pymodules/its/image.py
index b3bdb65..03f8ff9 100644
--- a/apps/CameraITS/pymodules/its/image.py
+++ b/apps/CameraITS/pymodules/its/image.py
@@ -64,6 +64,9 @@
if cap["format"] == "raw10":
assert(props is not None)
cap = unpack_raw10_capture(cap, props)
+ if cap["format"] == "raw12":
+ assert(props is not None)
+ cap = unpack_raw12_capture(cap, props)
if cap["format"] == "yuv":
y = cap["data"][0:w*h]
u = cap["data"][w*h:w*h*5/4]
@@ -114,12 +117,12 @@
raise its.error.Error('Invalid raw-10 buffer width')
w = img.shape[1]*4/5
h = img.shape[0]
- # Cut out the 4x8b MSBs and shift to bits [10:2] in 16b words.
+ # Cut out the 4x8b MSBs and shift to bits [9:2] in 16b words.
msbs = numpy.delete(img, numpy.s_[4::5], 1)
msbs = msbs.astype(numpy.uint16)
msbs = numpy.left_shift(msbs, 2)
msbs = msbs.reshape(h,w)
- # Cut out the 4x2b LSBs and put each in bits [2:0] of their own 8b words.
+ # Cut out the 4x2b LSBs and put each in bits [1:0] of their own 8b words.
lsbs = img[::, 4::5].reshape(h,w/4)
lsbs = numpy.right_shift(
numpy.packbits(numpy.unpackbits(lsbs).reshape(h,w/4,4,2),3), 6)
@@ -128,6 +131,56 @@
img16 = numpy.bitwise_or(msbs, lsbs).reshape(h,w)
return img16
+def unpack_raw12_capture(cap, props):
+ """Unpack a raw-12 capture to a raw-16 capture.
+
+ Args:
+ cap: A raw-12 capture object.
+ props: Camera properties object.
+
+ Returns:
+ New capture object with raw-16 data.
+ """
+ # Data is packed as 4x10b pixels in 5 bytes, with the first 4 bytes holding
+ # the MSBs of the pixels, and the 5th byte holding 4x2b LSBs.
+ w,h = cap["width"], cap["height"]
+ if w % 2 != 0:
+ raise its.error.Error('Invalid raw-12 buffer width')
+ cap = copy.deepcopy(cap)
+ cap["data"] = unpack_raw12_image(cap["data"].reshape(h,w*3/2))
+ cap["format"] = "raw"
+ return cap
+
+def unpack_raw12_image(img):
+ """Unpack a raw-12 image to a raw-16 image.
+
+ Output image will have the 12 LSBs filled in each 16b word, and the 4 MSBs
+ will be set to zero.
+
+ Args:
+ img: A raw-12 image, as a uint8 numpy array.
+
+ Returns:
+ Image as a uint16 numpy array, with all row padding stripped.
+ """
+ if img.shape[1] % 3 != 0:
+ raise its.error.Error('Invalid raw-12 buffer width')
+ w = img.shape[1]*2/3
+ h = img.shape[0]
+ # Cut out the 2x8b MSBs and shift to bits [11:4] in 16b words.
+ msbs = numpy.delete(img, numpy.s_[2::3], 1)
+ msbs = msbs.astype(numpy.uint16)
+ msbs = numpy.left_shift(msbs, 4)
+ msbs = msbs.reshape(h,w)
+ # Cut out the 2x4b LSBs and put each in bits [3:0] of their own 8b words.
+ lsbs = img[::, 2::3].reshape(h,w/2)
+ lsbs = numpy.right_shift(
+ numpy.packbits(numpy.unpackbits(lsbs).reshape(h,w/2,2,4),3), 4)
+ lsbs = lsbs.reshape(h,w)
+ # Fuse the MSBs and LSBs back together
+ img16 = numpy.bitwise_or(msbs, lsbs).reshape(h,w)
+ return img16
+
def convert_capture_to_planes(cap, props=None):
"""Convert a captured image object to separate image planes.
diff --git a/apps/CameraITS/pymodules/its/objects.py b/apps/CameraITS/pymodules/its/objects.py
index 22540b8..f6d2e2d 100644
--- a/apps/CameraITS/pymodules/its/objects.py
+++ b/apps/CameraITS/pymodules/its/objects.py
@@ -142,13 +142,15 @@
"""Return a sorted list of available output sizes for a given format.
Args:
- fmt: the output format, as a string in ["jpg", "yuv", "raw"].
+ fmt: the output format, as a string in
+ ["jpg", "yuv", "raw", "raw10", "raw12"].
props: the object returned from its.device.get_camera_properties().
Returns:
A sorted list of (w,h) tuples (sorted large-to-small).
"""
- fmt_codes = {"raw":0x20, "raw10":0x25, "yuv":0x23, "jpg":0x100, "jpeg":0x100}
+ fmt_codes = {"raw":0x20, "raw10":0x25, "raw12":0x26, "yuv":0x23,
+ "jpg":0x100, "jpeg":0x100}
configs = props['android.scaler.streamConfigurationMap']\
['availableStreamConfigurations']
fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_codes[fmt]]
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
new file mode 100644
index 0000000..bbd9144
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
@@ -0,0 +1,63 @@
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+ """Test capturing a single frame as both RAW12 and YUV outputs.
+ """
+ NAME = os.path.basename(__file__).split(".")[0]
+
+ THRESHOLD_MAX_RMS_DIFF = 0.035
+
+ with its.device.ItsSession() as cam:
+ props = cam.get_camera_properties()
+ its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+ its.caps.raw12(props) and
+ its.caps.per_frame_control(props))
+
+ # Use a manual request with a linear tonemap so that the YUV and RAW
+ # should look the same (once converted by the its.image module).
+ e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+ req = its.objects.manual_capture_request(s, e, True)
+
+ cap_raw, cap_yuv = cam.do_capture(req,
+ [{"format":"raw12"}, {"format":"yuv"}])
+
+ img = its.image.convert_capture_to_rgb_image(cap_yuv)
+ its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
+ tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+ rgb0 = its.image.compute_image_means(tile)
+
+ # Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
+ # tile appropriately.
+ img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
+ its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
+ tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
+ rgb1 = its.image.compute_image_means(tile)
+
+ rms_diff = math.sqrt(
+ sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+ print "RMS difference:", rms_diff
+ assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
index e3d0b6d..a9021e0 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
@@ -146,6 +146,7 @@
private AtomicInteger mCountCallbacksRemaining = new AtomicInteger();
private AtomicInteger mCountRawOrDng = new AtomicInteger();
private AtomicInteger mCountRaw10 = new AtomicInteger();
+ private AtomicInteger mCountRaw12 = new AtomicInteger();
private AtomicInteger mCountJpg = new AtomicInteger();
private AtomicInteger mCountYuv = new AtomicInteger();
private AtomicInteger mCountCapRes = new AtomicInteger();
@@ -658,6 +659,8 @@
jsonSurface.put("format", "raw");
} else if (format == ImageFormat.RAW10) {
jsonSurface.put("format", "raw10");
+ } else if (format == ImageFormat.RAW12) {
+ jsonSurface.put("format", "raw12");
} else if (format == ImageFormat.JPEG) {
jsonSurface.put("format", "jpeg");
} else if (format == ImageFormat.YUV_420_888) {
@@ -1004,6 +1007,7 @@
mCountJpg.set(0);
mCountYuv.set(0);
mCountRaw10.set(0);
+ mCountRaw12.set(0);
mCountCapRes.set(0);
mCaptureRawIsDng = false;
mCaptureResults = new CaptureResult[requests.size()];
@@ -1028,10 +1032,13 @@
sizes = ItsUtils.getJpegOutputSizes(mCameraCharacteristics);
} else if ("raw".equals(sformat)) {
formats[i] = ImageFormat.RAW_SENSOR;
- sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
+ sizes = ItsUtils.getRaw16OutputSizes(mCameraCharacteristics);
} else if ("raw10".equals(sformat)) {
formats[i] = ImageFormat.RAW10;
- sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
+ sizes = ItsUtils.getRaw10OutputSizes(mCameraCharacteristics);
+ } else if ("raw12".equals(sformat)) {
+ formats[i] = ImageFormat.RAW12;
+ sizes = ItsUtils.getRaw12OutputSizes(mCameraCharacteristics);
} else if ("dng".equals(sformat)) {
formats[i] = ImageFormat.RAW_SENSOR;
sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
@@ -1170,6 +1177,12 @@
ByteBuffer buf = ByteBuffer.wrap(img);
int count = mCountRaw10.getAndIncrement();
mSocketRunnableObj.sendResponseCaptureBuffer("raw10Image", buf);
+ } else if (format == ImageFormat.RAW12) {
+ Logt.i(TAG, "Received RAW12 capture");
+ byte[] img = ItsUtils.getDataFromImage(capture);
+ ByteBuffer buf = ByteBuffer.wrap(img);
+ int count = mCountRaw12.getAndIncrement();
+ mSocketRunnableObj.sendResponseCaptureBuffer("raw12Image", buf);
} else if (format == ImageFormat.RAW_SENSOR) {
Logt.i(TAG, "Received RAW16 capture");
int count = mCountRawOrDng.getAndIncrement();
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
index 2011314..b09b90c 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
@@ -91,11 +91,21 @@
}
}
- public static Size[] getRawOutputSizes(CameraCharacteristics ccs)
+ public static Size[] getRaw16OutputSizes(CameraCharacteristics ccs)
throws ItsException {
return getOutputSizes(ccs, ImageFormat.RAW_SENSOR);
}
+ public static Size[] getRaw10OutputSizes(CameraCharacteristics ccs)
+ throws ItsException {
+ return getOutputSizes(ccs, ImageFormat.RAW10);
+ }
+
+ public static Size[] getRaw12OutputSizes(CameraCharacteristics ccs)
+ throws ItsException {
+ return getOutputSizes(ccs, ImageFormat.RAW12);
+ }
+
public static Size[] getJpegOutputSizes(CameraCharacteristics ccs)
throws ItsException {
return getOutputSizes(ccs, ImageFormat.JPEG);
@@ -139,7 +149,7 @@
buffer.get(data);
return data;
} else if (format == ImageFormat.YUV_420_888 || format == ImageFormat.RAW_SENSOR
- || format == ImageFormat.RAW10) {
+ || format == ImageFormat.RAW10 || format == ImageFormat.RAW12) {
int offset = 0;
data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
int maxRowSize = planes[0].getRowStride();
@@ -213,6 +223,7 @@
return 3 == planes.length;
case ImageFormat.RAW_SENSOR:
case ImageFormat.RAW10:
+ case ImageFormat.RAW12:
case ImageFormat.JPEG:
return 1 == planes.length;
default: