CameraITS: Enable logical multi-camera stream configuration
- Add support to specify physical camera id for each configured stream.
- Support YUV and RAW streams.
Test: Hack test_yuv_plus_raw and capture physical YUV streams
Bug: 66697407
Change-Id: I4923467d11d3a4094a8aa94a2d80741d01f010ca
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
index e6f096f..3d409a1 100644
--- a/apps/CameraITS/pymodules/its/caps.py
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -446,6 +446,33 @@
return props.has_key("android.lens.info.minimumFocusDistance") and \
props["android.lens.info.minimumFocusDistance"] == 0
+def logical_multi_camera(props):
+ """Returns whether a device is a logical multi-camera.
+
+ Args:
+ props: Camera properties object.
+
+ Return:
+ Boolean.
+ """
+ return props.has_key("android.request.availableCapabilities") and \
+ 11 in props["android.request.availableCapabilities"]
+
+def logical_multi_camera_physical_ids(props):
+ """Returns a logical multi-camera's underlying physical cameras.
+
+ Args:
+ props: Camera properties object.
+
+ Return:
+ list of physical cameras backing the logical multi-camera.
+ """
+ physicalIdsList = []
+ if logical_multi_camera(props):
+ physicalIds = props['android.logicalMultiCamera.physicalIds'];
+ physicalIdsString = ''.join(chr(e) for e in physicalIds);
+ physicalIdsList = physicalIdsString.split('\x00');
+ return physicalIdsList
def debug_mode():
"""Returns True/False for whether test is run in debug mode.
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index 66d28c2..3e3cc02 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -25,6 +25,7 @@
import hashlib
import numpy
import string
+import unicodedata
CMD_DELAY = 1 # seconds
@@ -497,6 +498,17 @@
"dng", "raw", "raw10", "raw12", or "rawStats". The default is a YUV420
frame ("yuv") corresponding to a full sensor frame.
+ Optionally the out_surfaces field can specify physical camera id(s) if the
+ current camera device is a logical multi-camera. The physical camera id
+ must refer to a physical camera backing this logical camera device. And
+ only "yuv", "raw", "raw10", "raw12" support the physical camera id field.
+
+ Currently only 2 physical streams with the same format are supported, one
+ from each physical camera:
+ - yuv physical streams of the same size.
+ - raw physical streams with the same or different sizes, depending on
+ device capability. (Different physical cameras may have different raw sizes).
+
Note that one or more surfaces can be specified, allowing a capture to
request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
@@ -658,14 +670,36 @@
cmd["outputSurfaces"] = [{"format": "yuv",
"width" : max_yuv_size[0],
"height": max_yuv_size[1]}]
+
+ # Figure out requested physical camera ids, physical and logical
+ # streams.
+ physical_cam_ids = {}
+ physical_buffers = {}
+ physical_cam_format = None
+ logical_cam_formats = []
+ for i,s in enumerate(cmd["outputSurfaces"]):
+ if "format" in s and s["format"] in ["yuv", "raw", "raw10", "raw12"]:
+ if "physicalCamera" in s:
+ if physical_cam_format is not None and s["format"] != physical_cam_format:
+ raise its.error.Error('ITS does not support capturing multiple ' +
+ 'physical formats yet')
+ physical_cam_ids[i] = s["physicalCamera"]
+ physical_buffers[s["physicalCamera"]] = []
+ physical_cam_format = s["format"]
+ else:
+ logical_cam_formats.append(s["format"])
+ else:
+ logical_cam_formats.append(s["format"])
+
ncap = len(cmd["captureRequests"])
nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"])
# Only allow yuv output to multiple targets
- yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"]
- n_yuv = len(yuv_surfaces)
+ logical_yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\
+ and "physicalCamera" not in s]
+ n_yuv = len(logical_yuv_surfaces)
# Compute the buffer size of YUV targets
yuv_maxsize_1d = 0
- for s in yuv_surfaces:
+ for s in logical_yuv_surfaces:
if not ("width" in s and "height" in s):
if self.props is None:
raise its.error.Error('Camera props are unavailable')
@@ -676,14 +710,14 @@
yuv_sizes = [c["width"]*c["height"]*3/2
if "width" in c and "height" in c
else yuv_maxsize_1d
- for c in yuv_surfaces]
+ for c in logical_yuv_surfaces]
# Currently we don't pass enough metadta from ItsService to distinguish
# different yuv stream of same buffer size
if len(yuv_sizes) != len(set(yuv_sizes)):
raise its.error.Error(
'ITS does not support yuv outputs of same buffer size')
- if len(formats) > len(set(formats)):
- if n_yuv != len(formats) - len(set(formats)) + 1:
+ if len(logical_cam_formats) > len(set(logical_cam_formats)):
+ if n_yuv != len(logical_cam_formats) - len(set(logical_cam_formats)) + 1:
raise its.error.Error('Duplicate format requested')
raw_formats = 0;
@@ -742,8 +776,13 @@
widths = [out['width'] for out in outputs]
heights = [out['height'] for out in outputs]
else:
- # Just ignore other tags
- None
+ tagString = unicodedata.normalize('NFKD', jsonObj['tag']).encode('ascii', 'ignore');
+ for x in ['rawImage', 'raw10Image', 'raw12Image', 'yuvImage']:
+ if (tagString.startswith(x)):
+ physicalId = jsonObj['tag'][len(x):];
+ if physicalId in physical_cam_ids.values():
+ physical_buffers[physicalId].append(buf)
+ nbufs += 1
rets = []
for j,fmt in enumerate(formats):
objs = []
@@ -753,7 +792,10 @@
obj["height"] = heights[j]
obj["format"] = fmt
obj["metadata"] = mds[i]
- if fmt == 'yuv':
+
+ if j in physical_cam_ids:
+ obj["data"] = physical_buffers[physical_cam_ids[j]][i]
+ elif fmt == 'yuv':
buf_size = widths[j] * heights[j] * 3 / 2
obj["data"] = yuv_bufs[buf_size][i]
else:
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
index febaf08..8677941 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
@@ -35,6 +35,7 @@
import android.hardware.camera2.TotalCaptureResult;
import android.hardware.camera2.params.InputConfiguration;
import android.hardware.camera2.params.MeteringRectangle;
+import android.hardware.camera2.params.OutputConfiguration;
import android.hardware.Sensor;
import android.hardware.SensorEvent;
import android.hardware.SensorEventListener;
@@ -54,6 +55,7 @@
import android.util.Log;
import android.util.Rational;
import android.util.Size;
+import android.util.SparseArray;
import android.view.Surface;
import com.android.ex.camera2.blocking.BlockingCameraManager;
@@ -142,6 +144,7 @@
private CameraDevice mCamera = null;
private CameraCaptureSession mSession = null;
private ImageReader[] mOutputImageReaders = null;
+ private SparseArray<String> mPhysicalStreamMap = new SparseArray<String>();
private ImageReader mInputImageReader = null;
private CameraCharacteristics mCameraCharacteristics = null;
@@ -209,7 +212,7 @@
private Handler mSensorHandler = null;
public interface CaptureCallback {
- void onCaptureAvailable(Image capture);
+ void onCaptureAvailable(Image capture, String physicalCameraId);
}
public abstract class CaptureResultListener extends CameraCaptureSession.CaptureCallback {}
@@ -808,7 +811,13 @@
Image i = null;
try {
i = reader.acquireNextImage();
- listener.onCaptureAvailable(i);
+ String physicalCameraId = new String();
+ for (int idx = 0; idx < mOutputImageReaders.length; idx++) {
+ if (mOutputImageReaders[idx] == reader) {
+ physicalCameraId = mPhysicalStreamMap.get(idx);
+ }
+ }
+ listener.onCaptureAvailable(i, physicalCameraId);
} finally {
if (i != null) {
i.close();
@@ -1141,6 +1150,7 @@
Size outputSizes[];
int outputFormats[];
int numSurfaces = 0;
+ mPhysicalStreamMap.clear();
if (jsonOutputSpecs != null) {
try {
@@ -1209,6 +1219,10 @@
if (height <= 0) {
height = ItsUtils.getMaxSize(sizes).getHeight();
}
+ String physicalCameraId = surfaceObj.optString("physicalCamera");
+ if (physicalCameraId != null) {
+ mPhysicalStreamMap.put(i, physicalCameraId);
+ }
// The stats computation only applies to the active array region.
int aaw = ItsUtils.getActiveArrayCropRegion(mCameraCharacteristics).width();
@@ -1300,11 +1314,18 @@
numSurfaces = mOutputImageReaders.length;
numCaptureSurfaces = numSurfaces - (backgroundRequest ? 1 : 0);
- List<Surface> outputSurfaces = new ArrayList<Surface>(numSurfaces);
+ List<OutputConfiguration> outputConfigs =
+ new ArrayList<OutputConfiguration>(numSurfaces);
for (int i = 0; i < numSurfaces; i++) {
- outputSurfaces.add(mOutputImageReaders[i].getSurface());
+ OutputConfiguration config = new OutputConfiguration(
+ mOutputImageReaders[i].getSurface());
+ if (mPhysicalStreamMap.get(i) != null) {
+ config.setPhysicalCameraId(mPhysicalStreamMap.get(i));
+ }
+ outputConfigs.add(config);
}
- mCamera.createCaptureSession(outputSurfaces, sessionListener, mCameraHandler);
+ mCamera.createCaptureSessionByOutputConfigurations(outputConfigs,
+ sessionListener, mCameraHandler);
mSession = sessionListener.waitAndGetSession(TIMEOUT_IDLE_MS);
for (int i = 0; i < numSurfaces; i++) {
@@ -1569,7 +1590,7 @@
private final CaptureCallback mCaptureCallback = new CaptureCallback() {
@Override
- public void onCaptureAvailable(Image capture) {
+ public void onCaptureAvailable(Image capture, String physicalCameraId) {
try {
int format = capture.getFormat();
if (format == ImageFormat.JPEG) {
@@ -1582,20 +1603,21 @@
Logt.i(TAG, "Received YUV capture");
byte[] img = ItsUtils.getDataFromImage(capture, mSocketQueueQuota);
ByteBuffer buf = ByteBuffer.wrap(img);
- int count = mCountYuv.getAndIncrement();
- mSocketRunnableObj.sendResponseCaptureBuffer("yuvImage", buf);
+ mSocketRunnableObj.sendResponseCaptureBuffer(
+ "yuvImage"+physicalCameraId, buf);
} else if (format == ImageFormat.RAW10) {
Logt.i(TAG, "Received RAW10 capture");
byte[] img = ItsUtils.getDataFromImage(capture, mSocketQueueQuota);
ByteBuffer buf = ByteBuffer.wrap(img);
int count = mCountRaw10.getAndIncrement();
- mSocketRunnableObj.sendResponseCaptureBuffer("raw10Image", buf);
+ mSocketRunnableObj.sendResponseCaptureBuffer(
+ "raw10Image"+physicalCameraId, buf);
} else if (format == ImageFormat.RAW12) {
Logt.i(TAG, "Received RAW12 capture");
byte[] img = ItsUtils.getDataFromImage(capture, mSocketQueueQuota);
ByteBuffer buf = ByteBuffer.wrap(img);
int count = mCountRaw12.getAndIncrement();
- mSocketRunnableObj.sendResponseCaptureBuffer("raw12Image", buf);
+ mSocketRunnableObj.sendResponseCaptureBuffer("raw12Image"+physicalCameraId, buf);
} else if (format == ImageFormat.RAW_SENSOR) {
Logt.i(TAG, "Received RAW16 capture");
int count = mCountRawOrDng.getAndIncrement();
@@ -1603,7 +1625,8 @@
byte[] img = ItsUtils.getDataFromImage(capture, mSocketQueueQuota);
if (! mCaptureRawIsStats) {
ByteBuffer buf = ByteBuffer.wrap(img);
- mSocketRunnableObj.sendResponseCaptureBuffer("rawImage", buf);
+ mSocketRunnableObj.sendResponseCaptureBuffer(
+ "rawImage" + physicalCameraId, buf);
} else {
// Compute the requested stats on the raw frame, and return the results
// in a new "stats image".