CameraITS: add support for MONO sensor.
add its.caps.mono_camera(props), skip inappropriate tests,
and allow mono sensors to return AWB generic values from HAL.
bug: 68779442
Change-Id: Iedbe71f76ff1ed4a484019e22436d864b6638285
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
index b229a98..6b12e50 100644
--- a/apps/CameraITS/pymodules/its/caps.py
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -474,6 +474,18 @@
physicalIdsList = filter(None, physicalIdsString.split('\x00'));
return physicalIdsList
+def mono_camera(props):
+ """Returns whether a device is monochromatic.
+
+ Args:
+ props: Camera properties object.
+
+ Return:
+ Boolean.
+ """
+ return props.has_key("android.request.availableCapabilities") and \
+ 12 in props["android.request.availableCapabilities"]
+
def debug_mode():
"""Returns True/False for whether test is run in debug mode.
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index 4fc5fe7..9c56ea7 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -412,7 +412,7 @@
do_ae=True, do_awb=True, do_af=True,
lock_ae=False, lock_awb=False,
get_results=False,
- ev_comp=0):
+ ev_comp=0, mono_camera=False):
"""Perform a 3A operation on the device.
Triggers some or all of AE, AWB, and AF, and returns once they have
@@ -432,6 +432,7 @@
lock_awb: Request AWB lock after convergence, and wait for it.
get_results: Return the 3A results from this function.
ev_comp: An EV compensation value to use when running AE.
+ mono_camera: Boolean for monochrome camera.
Region format in args:
Arguments are lists of weighted regions; each weighted region is a
@@ -492,8 +493,9 @@
raise its.error.Error('Invalid command response')
if converged and not get_results:
return None,None,None,None,None
- if (do_ae and ae_sens == None or do_awb and awb_gains == None
+ if (do_ae and ae_sens == None or (not mono_camera and do_awb and awb_gains == None)
or do_af and af_dist == None or not converged):
+
raise its.error.Error('3A failed to converge')
return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
diff --git a/apps/CameraITS/tests/scene1/test_3a.py b/apps/CameraITS/tests/scene1/test_3a.py
index 2c5dcfe..65cac71 100644
--- a/apps/CameraITS/tests/scene1/test_3a.py
+++ b/apps/CameraITS/tests/scene1/test_3a.py
@@ -27,8 +27,10 @@
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.read_3a(props))
+ mono_camera = its.caps.mono_camera(props)
- sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+ sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
+ mono_camera=mono_camera)
print 'AE: sensitivity %d, exposure %dms' % (sens, exp/1000000)
print 'AWB: gains', gains, 'transform', xform
print 'AF: distance', focus
diff --git a/apps/CameraITS/tests/scene1/test_ae_af.py b/apps/CameraITS/tests/scene1/test_ae_af.py
index ed94017..fabc494 100644
--- a/apps/CameraITS/tests/scene1/test_ae_af.py
+++ b/apps/CameraITS/tests/scene1/test_ae_af.py
@@ -35,6 +35,7 @@
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.read_3a(props))
+ mono_camera = its.caps.mono_camera(props)
for k, v in sorted(SINGLE_A.items()):
print k
@@ -42,7 +43,8 @@
s, e, gains, xform, fd = cam.do_3a(get_results=True,
do_ae=v[0],
do_af=v[1],
- do_awb=v[2])
+ do_awb=v[2],
+ mono_camera=mono_camera)
print ' sensitivity', s, 'exposure', e
print ' gains', gains, 'transform', xform
print ' fd', fd
diff --git a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
index 849c720..4393fe6 100644
--- a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
+++ b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
@@ -35,6 +35,7 @@
its.caps.skip_unless(its.caps.manual_sensor(props) and
its.caps.manual_post_proc(props) and
its.caps.per_frame_control(props))
+ mono_camera = its.caps.mono_camera(props)
# Converge 3A and get the estimates.
debug = its.caps.debug_mode()
@@ -44,7 +45,8 @@
else:
match_ar = (largest_yuv['width'], largest_yuv['height'])
fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
- sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+ sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
+ mono_camera=mono_camera)
xform_rat = its.objects.float_to_rational(xform)
print "AE sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
print "AWB gains", gains
diff --git a/apps/CameraITS/tests/scene1/test_capture_result.py b/apps/CameraITS/tests/scene1/test_capture_result.py
index e797ec0..31490d6 100644
--- a/apps/CameraITS/tests/scene1/test_capture_result.py
+++ b/apps/CameraITS/tests/scene1/test_capture_result.py
@@ -108,7 +108,8 @@
# Get 3A lock first, so the auto values in the capture result are
# populated properly.
rect = [[0,0,1,1,1]]
- cam.do_3a(rect, rect, rect, do_af=False)
+ mono_camera = its.caps.mono_camera(props)
+ cam.do_3a(rect, rect, rect, do_af=False, mono_camera=mono_camera)
cap = cam.do_capture(auto_req)
cap_res = cap["metadata"]
diff --git a/apps/CameraITS/tests/scene1/test_crop_region_raw.py b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
index f2d788e..8754a8c 100644
--- a/apps/CameraITS/tests/scene1/test_crop_region_raw.py
+++ b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
@@ -32,7 +32,8 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.raw16(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
# Calculate the active sensor region for a full (non-cropped) image.
a = props['android.sensor.info.activeArraySize']
diff --git a/apps/CameraITS/tests/scene1/test_dng_noise_model.py b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
index 3cd0389..853c498 100644
--- a/apps/CameraITS/tests/scene1/test_dng_noise_model.py
+++ b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
@@ -45,7 +45,8 @@
its.caps.raw16(props) and
its.caps.manual_sensor(props) and
its.caps.read_3a(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
debug = its.caps.debug_mode()
white_level = float(props['android.sensor.info.whiteLevel'])
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
index a3605f6..c1588fd 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
@@ -41,6 +41,7 @@
its.caps.per_frame_control(props) and
its.caps.ev_compensation(props))
+ mono_camera = its.caps.mono_camera(props)
debug = its.caps.debug_mode()
largest_yuv = its.objects.get_largest_yuv_format(props)
if debug:
@@ -63,7 +64,7 @@
# Converge 3A, and lock AE once converged. skip AF trigger as
# dark/bright scene could make AF convergence fail and this test
# doesn't care the image sharpness.
- cam.do_3a(ev_comp=0, lock_ae=True, do_af=False)
+ cam.do_3a(ev_comp=0, lock_ae=True, do_af=False, mono_camera=mono_camera)
for ev in ev_steps:
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
index 4473bc7..32e5001 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
@@ -40,6 +40,7 @@
its.caps.ae_lock(props))
debug = its.caps.debug_mode()
+ mono_camera = its.caps.mono_camera(props)
largest_yuv = its.objects.get_largest_yuv_format(props)
if debug:
fmt = largest_yuv
@@ -59,7 +60,7 @@
# Converge 3A, and lock AE once converged. skip AF trigger as
# dark/bright scene could make AF convergence fail and this test
# doesn't care the image sharpness.
- cam.do_3a(ev_comp=0, lock_ae=True, do_af=False)
+ cam.do_3a(ev_comp=0, lock_ae=True, do_af=False, mono_camera=mono_camera)
for ev in evs:
# Capture a single shot with the same EV comp and locked AE.
diff --git a/apps/CameraITS/tests/scene1/test_locked_burst.py b/apps/CameraITS/tests/scene1/test_locked_burst.py
index 47a0186..befbbed 100644
--- a/apps/CameraITS/tests/scene1/test_locked_burst.py
+++ b/apps/CameraITS/tests/scene1/test_locked_burst.py
@@ -40,9 +40,11 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.ae_lock(props) and
its.caps.awb_lock(props))
+ mono_camera = its.caps.mono_camera(props)
# Converge 3A prior to capture.
- cam.do_3a(do_af=True, lock_ae=True, lock_awb=True)
+ cam.do_3a(do_af=True, lock_ae=True, lock_awb=True,
+ mono_camera=mono_camera)
fmt = its.objects.get_largest_yuv_format(props)
diff --git a/apps/CameraITS/tests/scene1/test_param_color_correction.py b/apps/CameraITS/tests/scene1/test_param_color_correction.py
index 3dac3f5..83f4f7f 100644
--- a/apps/CameraITS/tests/scene1/test_param_color_correction.py
+++ b/apps/CameraITS/tests/scene1/test_param_color_correction.py
@@ -38,7 +38,8 @@
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
# Baseline request
debug = its.caps.debug_mode()
diff --git a/apps/CameraITS/tests/scene1/test_param_shading_mode.py b/apps/CameraITS/tests/scene1/test_param_shading_mode.py
index f4c2b99..33ed2f6 100644
--- a/apps/CameraITS/tests/scene1/test_param_shading_mode.py
+++ b/apps/CameraITS/tests/scene1/test_param_shading_mode.py
@@ -40,6 +40,8 @@
its.caps.lsc_map(props) and
its.caps.lsc_off(props))
+ mono_camera = its.caps.mono_camera(props)
+
assert(props.has_key("android.lens.info.shadingMapSize") and
props["android.lens.info.shadingMapSize"] != None)
@@ -56,7 +58,7 @@
# shading modes.
# 3. Lens shading maps with mode HIGH_QUALITY are similar after
# switching shading modes.
- cam.do_3a();
+ cam.do_3a(mono_camera=mono_camera);
# Get the reference lens shading maps for OFF, FAST, and HIGH_QUALITY
# in different sessions.
diff --git a/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py b/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
index 70b1927..73c001d 100644
--- a/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
+++ b/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
@@ -40,7 +40,8 @@
its.caps.skip_unless(its.caps.raw_output(props) and
its.caps.post_raw_sensitivity_boost(props) and
its.caps.compute_target_exposure(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
w,h = its.objects.get_available_output_sizes(
"yuv", props, (1920, 1080))[0]
diff --git a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
index 053f7ca..04a3386 100644
--- a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
@@ -39,7 +39,8 @@
its.caps.skip_unless(its.caps.raw16(props) and
its.caps.manual_sensor(props) and
its.caps.read_3a(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
debug = its.caps.debug_mode()
# Expose for the scene with min sensitivity
diff --git a/apps/CameraITS/tests/scene1/test_raw_exposure.py b/apps/CameraITS/tests/scene1/test_raw_exposure.py
index 3a0dcf6..8fafe4a 100644
--- a/apps/CameraITS/tests/scene1/test_raw_exposure.py
+++ b/apps/CameraITS/tests/scene1/test_raw_exposure.py
@@ -42,7 +42,8 @@
its.caps.skip_unless(its.caps.raw16(props) and
its.caps.manual_sensor(props) and
its.caps.read_3a(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
debug = its.caps.debug_mode()
# Expose for the scene with min sensitivity
diff --git a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
index 6dac206..44a3b41 100644
--- a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
@@ -37,7 +37,8 @@
its.caps.skip_unless(its.caps.raw16(props) and
its.caps.manual_sensor(props) and
its.caps.read_3a(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
debug = its.caps.debug_mode()
# Expose for the scene with min sensitivity
diff --git a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
index 2b5f094..a7b9f6d 100644
--- a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
+++ b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
@@ -33,7 +33,8 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.manual_sensor(props) and
its.caps.manual_post_proc(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
debug = its.caps.debug_mode()
largest_yuv = its.objects.get_largest_yuv_format(props)
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
index 268b64a..4a62120 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
@@ -27,8 +27,9 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.raw(props) and
its.caps.read_3a(props))
+ mono_camera = its.caps.mono_camera(props)
- cam.do_3a()
+ cam.do_3a(mono_camera=mono_camera)
req = its.objects.auto_capture_request()
max_dng_size = \
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
index dd7ef21..0c5b78b 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
@@ -31,7 +31,8 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.raw16(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
# Use a manual request with a linear tonemap so that the YUV and RAW
# should look the same (once converted by the its.image module).
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
index 9c0c69b..6e700b7 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
@@ -31,7 +31,8 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.raw10(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
# Use a manual request with a linear tonemap so that the YUV and RAW
# should look the same (once converted by the its.image module).
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
index 8070785..cd284b7 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
@@ -31,7 +31,8 @@
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.raw12(props) and
- its.caps.per_frame_control(props))
+ its.caps.per_frame_control(props) and
+ not its.caps.mono_camera(props))
# Use a manual request with a linear tonemap so that the YUV and RAW
# should look the same (once converted by the its.image module).
diff --git a/apps/CameraITS/tests/scene2/test_faces.py b/apps/CameraITS/tests/scene2/test_faces.py
index 2acce85..46cf15d 100644
--- a/apps/CameraITS/tests/scene2/test_faces.py
+++ b/apps/CameraITS/tests/scene2/test_faces.py
@@ -30,11 +30,13 @@
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
+ mono_camera = its.caps.mono_camera(props)
fd_modes = props['android.statistics.info.availableFaceDetectModes']
a = props['android.sensor.info.activeArraySize']
aw, ah = a['right'] - a['left'], a['bottom'] - a['top']
if its.caps.read_3a(props):
- gain, exp, _, _, focus = cam.do_3a(get_results=True)
+ gain, exp, _, _, focus = cam.do_3a(get_results=True,
+ mono_camera=mono_camera)
print 'iso = %d' % gain
print 'exp = %.2fms' % (exp*1.0E-6)
if focus == 0.0:
diff --git a/apps/CameraITS/tests/scene2/test_num_faces.py b/apps/CameraITS/tests/scene2/test_num_faces.py
index 16e53ad..881bcba 100644
--- a/apps/CameraITS/tests/scene2/test_num_faces.py
+++ b/apps/CameraITS/tests/scene2/test_num_faces.py
@@ -32,12 +32,14 @@
"""Test face detection."""
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
+ mono_camera = its.caps.mono_camera(props)
fd_modes = props['android.statistics.info.availableFaceDetectModes']
a = props['android.sensor.info.activeArraySize']
aw, ah = a['right'] - a['left'], a['bottom'] - a['top']
if its.caps.read_3a(props):
- _, _, _, _, _ = cam.do_3a(get_results=True)
+ _, _, _, _, _ = cam.do_3a(get_results=True,
+ mono_camera=mono_camera)
for fd_mode in fd_modes:
assert FD_MODE_OFF <= fd_mode <= FD_MODE_FULL
diff --git a/apps/CameraITS/tests/scene3/test_3a_consistency.py b/apps/CameraITS/tests/scene3/test_3a_consistency.py
index 2dbee4c..76b44ce 100644
--- a/apps/CameraITS/tests/scene3/test_3a_consistency.py
+++ b/apps/CameraITS/tests/scene3/test_3a_consistency.py
@@ -34,6 +34,7 @@
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.read_3a(props))
+ mono_camera = its.caps.mono_camera(props)
exps = []
senses = []
@@ -41,7 +42,8 @@
fds = []
for _ in range(NUM_TEST_ITERATIONS):
try:
- s, e, gains, xform, fd = cam.do_3a(get_results=True)
+ s, e, gains, xform, fd = cam.do_3a(get_results=True,
+ mono_camera=mono_camera)
print ' sensitivity', s, 'exposure', e
print ' gains', gains, 'transform', xform
print ' fd', fd
diff --git a/apps/CameraITS/tests/scene3/test_edge_enhancement.py b/apps/CameraITS/tests/scene3/test_edge_enhancement.py
index 37e1d63..76093ef 100644
--- a/apps/CameraITS/tests/scene3/test_edge_enhancement.py
+++ b/apps/CameraITS/tests/scene3/test_edge_enhancement.py
@@ -88,12 +88,13 @@
its.caps.per_frame_control(props) and
its.caps.edge_mode(props, 0))
+ mono_camera = its.caps.mono_camera(props)
test_fmt = "yuv"
size = its.objects.get_available_output_sizes(test_fmt, props)[0]
out_surface = {"width":size[0], "height":size[1], "format":test_fmt}
# Get proper sensitivity, exposure time, and focus distance.
- s,e,_,_,fd = cam.do_3a(get_results=True)
+ s,e,_,_,fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
# Get the sharpness for each edge mode for regular requests
sharpness_regular = []
diff --git a/apps/CameraITS/tests/scene3/test_flip_mirror.py b/apps/CameraITS/tests/scene3/test_flip_mirror.py
index f0d17ec..28e6420 100644
--- a/apps/CameraITS/tests/scene3/test_flip_mirror.py
+++ b/apps/CameraITS/tests/scene3/test_flip_mirror.py
@@ -49,6 +49,9 @@
boolean: True if flipped, False if not
"""
+ # determine if monochrome camera
+ mono_camera = its.caps.mono_camera(props)
+
# determine if in debug mode
debug = its.caps.debug_mode()
@@ -56,7 +59,7 @@
template = cv2.imread(CHART_FILE, cv2.IMREAD_ANYDEPTH)
# take img, crop chart, scale and prep for cv2 template match
- s, e, _, _, fd = cam.do_3a(get_results=True)
+ s, e, _, _, fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
req = its.objects.manual_capture_request(s, e, fd)
cap = cam.do_capture(req, fmt)
y, _, _ = its.image.convert_capture_to_planes(cap, props)
diff --git a/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py b/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
index aaa8484..030063b 100644
--- a/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
+++ b/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
@@ -112,11 +112,12 @@
its.caps.skip_unless(not its.caps.fixed_focus(props))
its.caps.skip_unless(its.caps.read_3a(props) and
its.caps.lens_approx_calibrated(props))
+ mono_camera = its.caps.mono_camera(props)
min_fd = props['android.lens.info.minimumFocusDistance']
fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
# Get proper sensitivity, exposure time, and focus distance with 3A.
- s, e, _, _, fd = cam.do_3a(get_results=True)
+ s, e, _, _, fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
# Get sharpness for each focal distance
d = test_lens_movement_reporting(cam, props, fmt, s, e, fd, chart)
diff --git a/apps/CameraITS/tests/scene3/test_lens_position.py b/apps/CameraITS/tests/scene3/test_lens_position.py
index feb28a8..3839566 100644
--- a/apps/CameraITS/tests/scene3/test_lens_position.py
+++ b/apps/CameraITS/tests/scene3/test_lens_position.py
@@ -129,10 +129,11 @@
its.caps.skip_unless(not its.caps.fixed_focus(props))
its.caps.skip_unless(its.caps.read_3a(props) and
its.caps.lens_calibrated(props))
+ mono_camera = its.caps.mono_camera(props)
fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
# Get proper sensitivity and exposure time with 3A
- s, e, _, _, _ = cam.do_3a(get_results=True)
+ s, e, _, _, _ = cam.do_3a(get_results=True, mono_camera=mono_camera)
# Get sharpness for each focal distance
d_stat, d_move = test_lens_position(cam, props, fmt, s, e, chart)
diff --git a/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py b/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
index 95fc636..049426a 100644
--- a/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
+++ b/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
@@ -101,6 +101,7 @@
(its.caps.yuv_reprocess(props) or
its.caps.private_reprocess(props)))
+ mono_camera = its.caps.mono_camera(props)
# If reprocessing is supported, ZSL EE mode must be avaiable.
assert(its.caps.edge_mode(props, 3))
@@ -114,7 +115,7 @@
out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
# Get proper sensitivity, exposure time, and focus distance.
- s,e,_,_,fd = cam.do_3a(get_results=True)
+ s,e,_,_,fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
# Get the sharpness for each edge mode for regular requests
sharpness_regular = []
diff --git a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
index 2d2ccc6..18dd05e 100644
--- a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
+++ b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
@@ -92,13 +92,15 @@
its.caps.skip_unless(full_device or limited_device)
level3_device = its.caps.level3(props)
raw_avlb = its.caps.raw16(props)
+ mono_camera = its.caps.mono_camera(props)
run_crop_test = (level3_device or full_device) and raw_avlb
if not run_crop_test:
print "Crop test skipped"
debug = its.caps.debug_mode()
# Converge 3A and get the estimates.
sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
- lock_ae=True, lock_awb=True)
+ lock_ae=True, lock_awb=True,
+ mono_camera=mono_camera)
print "AE sensitivity %d, exposure %dms" % (sens, exp / 1000000.0)
print "AWB gains", gains
print "AWB transform", xform