blob: a875b4781742a2dc71be633841b4e18191b560ea [file] [log] [blame]
Igor Murashkin9c595172014-05-12 13:56:20 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.camera2.params;
18
19import android.graphics.ImageFormat;
20import android.graphics.PixelFormat;
21import android.hardware.camera2.CameraCharacteristics;
22import android.hardware.camera2.CameraDevice;
23import android.hardware.camera2.CaptureRequest;
24import android.hardware.camera2.utils.HashCodeHelpers;
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -080025import android.hardware.camera2.legacy.LegacyCameraDevice;
26import android.hardware.camera2.legacy.LegacyMetadataMapper;
27import android.hardware.camera2.legacy.LegacyExceptionUtils.BufferQueueAbandonedException;
Igor Murashkin9c595172014-05-12 13:56:20 -070028import android.view.Surface;
29import android.util.Log;
Yin-Chia Yeh12da1402014-07-15 10:37:31 -070030import android.util.Range;
Igor Murashkin9c595172014-05-12 13:56:20 -070031import android.util.Size;
32
33import java.util.Arrays;
34import java.util.HashMap;
35import java.util.Objects;
Yin-Chia Yehb0056642014-07-28 13:17:05 -070036import java.util.Set;
Igor Murashkin9c595172014-05-12 13:56:20 -070037
38import static com.android.internal.util.Preconditions.*;
39
40/**
41 * Immutable class to store the available stream
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -070042 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up
43 * {@link android.view.Surface Surfaces} for creating a
44 * {@link android.hardware.camera2.CameraCaptureSession capture session} with
45 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}.
Igor Murashkin9c595172014-05-12 13:56:20 -070046 * <!-- TODO: link to input stream configuration -->
47 *
48 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively
49 * for that format) that are supported by a camera device.</p>
50 *
51 * <p>This also contains the minimum frame durations and stall durations for each format/size
52 * combination that can be used to calculate effective frame rate when submitting multiple captures.
53 * </p>
54 *
55 * <p>An instance of this object is available from {@link CameraCharacteristics} using
56 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the
57 * {@link CameraCharacteristics#get} method.</p>
58 *
59 * <pre><code>{@code
60 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId);
61 * StreamConfigurationMap configs = characteristics.get(
62 * CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
63 * }</code></pre>
64 *
65 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -070066 * @see CameraDevice#createCaptureSession
Igor Murashkin9c595172014-05-12 13:56:20 -070067 */
68public final class StreamConfigurationMap {
69
70 private static final String TAG = "StreamConfigurationMap";
Ruben Brunk3e4fed22014-06-18 17:08:42 -070071
72 /**
Igor Murashkin9c595172014-05-12 13:56:20 -070073 * Create a new {@link StreamConfigurationMap}.
74 *
75 * <p>The array parameters ownership is passed to this object after creation; do not
76 * write to them after this constructor is invoked.</p>
77 *
78 * @param configurations a non-{@code null} array of {@link StreamConfiguration}
79 * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration}
80 * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration}
Yin-Chia Yeh12da1402014-07-15 10:37:31 -070081 * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if
82 * camera device does not support high speed video recording
Igor Murashkin9c595172014-05-12 13:56:20 -070083 *
Yin-Chia Yeh12da1402014-07-15 10:37:31 -070084 * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations
85 * were {@code null} or any subelements were {@code null}
Igor Murashkin9c595172014-05-12 13:56:20 -070086 *
87 * @hide
88 */
89 public StreamConfigurationMap(
90 StreamConfiguration[] configurations,
91 StreamConfigurationDuration[] minFrameDurations,
Yin-Chia Yeh12da1402014-07-15 10:37:31 -070092 StreamConfigurationDuration[] stallDurations,
Eino-Ville Talvala838beb22015-03-05 15:42:49 -080093 StreamConfiguration[] depthConfigurations,
94 StreamConfigurationDuration[] depthMinFrameDurations,
95 StreamConfigurationDuration[] depthStallDurations,
Yin-Chia Yeh12da1402014-07-15 10:37:31 -070096 HighSpeedVideoConfiguration[] highSpeedVideoConfigurations) {
Igor Murashkin9c595172014-05-12 13:56:20 -070097
98 mConfigurations = checkArrayElementsNotNull(configurations, "configurations");
99 mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations");
100 mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations");
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800101
102 if (depthConfigurations == null) {
103 mDepthConfigurations = new StreamConfiguration[0];
104 mDepthMinFrameDurations = new StreamConfigurationDuration[0];
105 mDepthStallDurations = new StreamConfigurationDuration[0];
106 } else {
107 mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations,
108 "depthConfigurations");
109 mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations,
110 "depthMinFrameDurations");
111 mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations,
112 "depthStallDurations");
113 }
114
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700115 if (highSpeedVideoConfigurations == null) {
116 mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0];
117 } else {
118 mHighSpeedVideoConfigurations = checkArrayElementsNotNull(
119 highSpeedVideoConfigurations, "highSpeedVideoConfigurations");
120 }
Igor Murashkin9c595172014-05-12 13:56:20 -0700121
122 // For each format, track how many sizes there are available to configure
123 for (StreamConfiguration config : configurations) {
124 HashMap<Integer, Integer> map = config.isOutput() ? mOutputFormats : mInputFormats;
125
126 Integer count = map.get(config.getFormat());
127
128 if (count == null) {
129 count = 0;
130 }
Igor Murashkin9c595172014-05-12 13:56:20 -0700131
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800132 map.put(config.getFormat(), count + 1);
133 }
134
135 // For each depth format, track how many sizes there are available to configure
136 for (StreamConfiguration config : mDepthConfigurations) {
137 if (!config.isOutput()) {
138 // Ignoring input depth configs
139 continue;
140 }
141
142 Integer count = mDepthOutputFormats.get(config.getFormat());
143
144 if (count == null) {
145 count = 0;
146 }
147
148 mDepthOutputFormats.put(config.getFormat(), count + 1);
Igor Murashkin9c595172014-05-12 13:56:20 -0700149 }
150
151 if (!mOutputFormats.containsKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
152 throw new AssertionError(
153 "At least one stream configuration for IMPLEMENTATION_DEFINED must exist");
154 }
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700155
156 // For each Size/FPS range, track how many FPS range/Size there are available
157 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
158 Size size = config.getSize();
159 Range<Integer> fpsRange = config.getFpsRange();
160 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
161 if (fpsRangeCount == null) {
162 fpsRangeCount = 0;
163 }
164 mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1);
165 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
166 if (sizeCount == null) {
167 sizeCount = 0;
168 }
169 mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1);
170 }
Igor Murashkin9c595172014-05-12 13:56:20 -0700171 }
172
173 /**
174 * Get the image {@code format} output formats in this stream configuration.
175 *
176 * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
177 * or in {@link PixelFormat} (and there is no possibility of collision).</p>
178 *
179 * <p>Formats listed in this array are guaranteed to return true if queried with
Eino-Ville Talvalaf3621f32014-08-26 14:53:39 -0700180 * {@link #isOutputSupportedFor(int)}.</p>
Igor Murashkin9c595172014-05-12 13:56:20 -0700181 *
182 * @return an array of integer format
183 *
184 * @see ImageFormat
185 * @see PixelFormat
186 */
187 public final int[] getOutputFormats() {
188 return getPublicFormats(/*output*/true);
189 }
190
191 /**
192 * Get the image {@code format} input formats in this stream configuration.
193 *
194 * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
195 * or in {@link PixelFormat} (and there is no possibility of collision).</p>
196 *
197 * @return an array of integer format
198 *
199 * @see ImageFormat
200 * @see PixelFormat
201 *
202 * @hide
203 */
204 public final int[] getInputFormats() {
205 return getPublicFormats(/*output*/false);
206 }
207
208 /**
209 * Get the supported input sizes for this input format.
210 *
211 * <p>The format must have come from {@link #getInputFormats}; otherwise
212 * {@code null} is returned.</p>
213 *
214 * @param format a format from {@link #getInputFormats}
215 * @return a non-empty array of sizes, or {@code null} if the format was not available.
216 *
217 * @hide
218 */
219 public Size[] getInputSizes(final int format) {
220 return getPublicFormatSizes(format, /*output*/false);
221 }
222
223 /**
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700224 * Determine whether or not output surfaces with a particular user-defined format can be passed
225 * {@link CameraDevice#createCaptureSession createCaptureSession}.
Igor Murashkin9c595172014-05-12 13:56:20 -0700226 *
227 * <p>This method determines that the output {@code format} is supported by the camera device;
228 * each output {@code surface} target may or may not itself support that {@code format}.
229 * Refer to the class which provides the surface for additional documentation.</p>
230 *
231 * <p>Formats for which this returns {@code true} are guaranteed to exist in the result
232 * returned by {@link #getOutputSizes}.</p>
233 *
234 * @param format an image format from either {@link ImageFormat} or {@link PixelFormat}
235 * @return
236 * {@code true} iff using a {@code surface} with this {@code format} will be
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700237 * supported with {@link CameraDevice#createCaptureSession}
Igor Murashkin9c595172014-05-12 13:56:20 -0700238 *
239 * @throws IllegalArgumentException
240 * if the image format was not a defined named constant
241 * from either {@link ImageFormat} or {@link PixelFormat}
242 *
243 * @see ImageFormat
244 * @see PixelFormat
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700245 * @see CameraDevice#createCaptureSession
Igor Murashkin9c595172014-05-12 13:56:20 -0700246 */
247 public boolean isOutputSupportedFor(int format) {
248 checkArgumentFormat(format);
249
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800250 int internalFormat = imageFormatToInternal(format);
251 int dataspace = imageFormatToDataspace(format);
252 if (dataspace == HAL_DATASPACE_DEPTH) {
253 return mDepthOutputFormats.containsKey(internalFormat);
254 } else {
255 return getFormatsMap(/*output*/true).containsKey(internalFormat);
256 }
Igor Murashkin9c595172014-05-12 13:56:20 -0700257 }
258
259 /**
260 * Determine whether or not output streams can be configured with a particular class
261 * as a consumer.
262 *
263 * <p>The following list is generally usable for outputs:
264 * <ul>
265 * <li>{@link android.media.ImageReader} -
266 * Recommended for image processing or streaming to external resources (such as a file or
267 * network)
268 * <li>{@link android.media.MediaRecorder} -
269 * Recommended for recording video (simple to use)
270 * <li>{@link android.media.MediaCodec} -
271 * Recommended for recording video (more complicated to use, with more flexibility)
272 * <li>{@link android.renderscript.Allocation} -
273 * Recommended for image processing with {@link android.renderscript RenderScript}
274 * <li>{@link android.view.SurfaceHolder} -
275 * Recommended for low-power camera preview with {@link android.view.SurfaceView}
276 * <li>{@link android.graphics.SurfaceTexture} -
277 * Recommended for OpenGL-accelerated preview processing or compositing with
278 * {@link android.view.TextureView}
279 * </ul>
280 * </p>
281 *
282 * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i>
283 * provide a producer endpoint that is suitable to be used with
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700284 * {@link CameraDevice#createCaptureSession}.</p>
Igor Murashkin9c595172014-05-12 13:56:20 -0700285 *
286 * <p>Since not all of the above classes support output of all format and size combinations,
287 * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p>
288 *
289 * @param klass a non-{@code null} {@link Class} object reference
290 * @return {@code true} if this class is supported as an output, {@code false} otherwise
291 *
292 * @throws NullPointerException if {@code klass} was {@code null}
293 *
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700294 * @see CameraDevice#createCaptureSession
Igor Murashkin9c595172014-05-12 13:56:20 -0700295 * @see #isOutputSupportedFor(Surface)
296 */
297 public static <T> boolean isOutputSupportedFor(Class<T> klass) {
298 checkNotNull(klass, "klass must not be null");
299
300 if (klass == android.media.ImageReader.class) {
301 return true;
302 } else if (klass == android.media.MediaRecorder.class) {
303 return true;
304 } else if (klass == android.media.MediaCodec.class) {
305 return true;
306 } else if (klass == android.renderscript.Allocation.class) {
307 return true;
308 } else if (klass == android.view.SurfaceHolder.class) {
309 return true;
310 } else if (klass == android.graphics.SurfaceTexture.class) {
311 return true;
312 }
313
314 return false;
315 }
316
317 /**
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700318 * Determine whether or not the {@code surface} in its current state is suitable to be included
319 * in a {@link CameraDevice#createCaptureSession capture session} as an output.
Igor Murashkin9c595172014-05-12 13:56:20 -0700320 *
321 * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations
322 * of that {@code surface} are compatible. Some classes that provide the {@code surface} are
323 * compatible with the {@link CameraDevice} in general
324 * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the
325 * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p>
326 *
327 * <p>Reasons for a {@code surface} being specifically incompatible might be:
328 * <ul>
329 * <li>Using a format that's not listed by {@link #getOutputFormats}
330 * <li>Using a format/size combination that's not listed by {@link #getOutputSizes}
331 * <li>The {@code surface} itself is not in a state where it can service a new producer.</p>
332 * </li>
333 * </ul>
334 *
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -0800335 * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does
336 * not match a camera-supported size, as long as the format (or class) is supported and the
337 * camera device supports a size that is equal to or less than 1080p in that format. If such as
338 * Surface is used to create a capture session, it will have its size rounded to the nearest
339 * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture,
340 * and ImageReader.</p>
341 *
342 * <p>This is not an exhaustive list; see the particular class's documentation for further
Igor Murashkin9c595172014-05-12 13:56:20 -0700343 * possible reasons of incompatibility.</p>
344 *
345 * @param surface a non-{@code null} {@link Surface} object reference
346 * @return {@code true} if this is supported, {@code false} otherwise
347 *
348 * @throws NullPointerException if {@code surface} was {@code null}
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -0800349 * @throws IllegalArgumentException if the Surface endpoint is no longer valid
Igor Murashkin9c595172014-05-12 13:56:20 -0700350 *
Eino-Ville Talvalab67a3b32014-06-06 13:07:20 -0700351 * @see CameraDevice#createCaptureSession
Igor Murashkin9c595172014-05-12 13:56:20 -0700352 * @see #isOutputSupportedFor(Class)
353 */
354 public boolean isOutputSupportedFor(Surface surface) {
355 checkNotNull(surface, "surface must not be null");
356
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -0800357 Size surfaceSize;
358 int surfaceFormat = -1;
359 try {
360 surfaceSize = LegacyCameraDevice.getSurfaceSize(surface);
361 surfaceFormat = LegacyCameraDevice.detectSurfaceType(surface);
362 } catch(BufferQueueAbandonedException e) {
363 throw new IllegalArgumentException("Abandoned surface", e);
364 }
Igor Murashkin9c595172014-05-12 13:56:20 -0700365
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -0800366 // See if consumer is flexible.
367 boolean isFlexible = LegacyCameraDevice.isFlexibleConsumer(surface);
368
369 // Override RGB formats to IMPLEMENTATION_DEFINED, b/9487482
370 if ((surfaceFormat >= LegacyMetadataMapper.HAL_PIXEL_FORMAT_RGBA_8888 &&
371 surfaceFormat <= LegacyMetadataMapper.HAL_PIXEL_FORMAT_BGRA_8888)) {
372 surfaceFormat = LegacyMetadataMapper.HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
373 }
374
375 for (StreamConfiguration config : mConfigurations) {
376 if (config.getFormat() == surfaceFormat && config.isOutput()) {
377 // Mathing format, either need exact size match, or a flexible consumer
378 // and a size no bigger than MAX_DIMEN_FOR_ROUNDING
379 if (config.getSize().equals(surfaceSize)) {
380 return true;
381 } else if (isFlexible &&
382 (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) {
383 return true;
384 }
385 }
386 }
387 return false;
Igor Murashkin9c595172014-05-12 13:56:20 -0700388 }
389
390 /**
391 * Get a list of sizes compatible with {@code klass} to use as an output.
392 *
393 * <p>Since some of the supported classes may support additional formats beyond
394 * an opaque/implementation-defined (under-the-hood) format; this function only returns
395 * sizes for the implementation-defined format.</p>
396 *
397 * <p>Some classes such as {@link android.media.ImageReader} may only support user-defined
398 * formats; in particular {@link #isOutputSupportedFor(Class)} will return {@code true} for
399 * that class and this method will return an empty array (but not {@code null}).</p>
400 *
401 * <p>If a well-defined format such as {@code NV21} is required, use
402 * {@link #getOutputSizes(int)} instead.</p>
403 *
404 * <p>The {@code klass} should be a supported output, that querying
405 * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p>
406 *
407 * @param klass
408 * a non-{@code null} {@link Class} object reference
409 * @return
410 * an array of supported sizes for implementation-defined formats,
411 * or {@code null} iff the {@code klass} is not a supported output
412 *
413 * @throws NullPointerException if {@code klass} was {@code null}
414 *
415 * @see #isOutputSupportedFor(Class)
416 */
417 public <T> Size[] getOutputSizes(Class<T> klass) {
Igor Murashkin517b7452014-08-19 10:51:30 -0700418 // Image reader is "supported", but never for implementation-defined formats; return empty
419 if (android.media.ImageReader.class.isAssignableFrom(klass)) {
420 return new Size[0];
421 }
422
Igor Murashkin9c595172014-05-12 13:56:20 -0700423 if (isOutputSupportedFor(klass) == false) {
424 return null;
425 }
426
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800427 return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
428 HAL_DATASPACE_UNKNOWN,/*output*/true);
Igor Murashkin9c595172014-05-12 13:56:20 -0700429 }
430
431 /**
432 * Get a list of sizes compatible with the requested image {@code format}.
433 *
434 * <p>The {@code format} should be a supported format (one of the formats returned by
435 * {@link #getOutputFormats}).</p>
436 *
437 * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
438 * @return
439 * an array of supported sizes,
440 * or {@code null} if the {@code format} is not a supported output
441 *
442 * @see ImageFormat
443 * @see PixelFormat
444 * @see #getOutputFormats
445 */
446 public Size[] getOutputSizes(int format) {
447 return getPublicFormatSizes(format, /*output*/true);
448 }
449
450 /**
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700451 * Get a list of supported high speed video recording sizes.
452 *
453 * <p> When HIGH_SPEED_VIDEO is supported in
454 * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
455 * method will list the supported high speed video size configurations. All the sizes listed
456 * will be a subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling
457 * formats (typically ImageFormat#YUV_420_888, ImageFormat#NV21, ImageFormat#YV12)</p>
458 *
459 * <p> To enable high speed video recording, application must set
460 * {@link CaptureRequest#CONTROL_SCENE_MODE} to
461 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
462 * requests and select the video size from this method and
463 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
464 * {@link #getHighSpeedVideoFpsRangesFor} to configure the recording and preview streams and
465 * setup the recording requests. For example, if the application intends to do high speed
466 * recording, it can select the maximum size reported by this method to configure output
467 * streams. Note that for the use case of multiple output streams, application must select one
468 * unique size from this method to use. Otherwise a request error might occur. Once the size is
469 * selected, application can get the supported FPS ranges by
470 * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording
471 * requests.</p>
472 *
473 * @return
474 * an array of supported high speed video recording sizes
475 *
476 * @see #getHighSpeedVideoFpsRangesFor(Size)
477 */
478 public Size[] getHighSpeedVideoSizes() {
Yin-Chia Yehb0056642014-07-28 13:17:05 -0700479 Set<Size> keySet = mHighSpeedVideoSizeMap.keySet();
480 return keySet.toArray(new Size[keySet.size()]);
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700481 }
482
483 /**
484 * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size.
485 *
486 * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p>
487 *
488 * <p> For normal video recording use case, where some application will NOT set
489 * {@link CaptureRequest#CONTROL_SCENE_MODE} to
490 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
491 * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
492 * this method must not be used to setup capture requests, or it will cause request error.</p>
493 *
494 * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()}
495 * @return
496 * An array of FPS range to use with
497 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE TARGET_FPS_RANGE} when using
498 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
499 * mode.
500 * The upper bound of returned ranges is guaranteed to be larger or equal to 60.
501 *
502 * @throws IllegalArgumentException if input size does not exist in the return value of
503 * getHighSpeedVideoSizes
504 * @see #getHighSpeedVideoSizes()
505 */
506 public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) {
507 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
508 if (fpsRangeCount == null || fpsRangeCount == 0) {
509 throw new IllegalArgumentException(String.format(
510 "Size %s does not support high speed video recording", size));
511 }
512
513 @SuppressWarnings("unchecked")
514 Range<Integer>[] fpsRanges = new Range[fpsRangeCount];
515 int i = 0;
516 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
517 if (size.equals(config.getSize())) {
518 fpsRanges[i++] = config.getFpsRange();
519 }
520 }
521 return fpsRanges;
522 }
523
524 /**
525 * Get a list of supported high speed video recording FPS ranges.
526 *
527 * <p> When HIGH_SPEED_VIDEO is supported in
528 * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
529 * method will list the supported high speed video FPS range configurations. Application can
530 * then use {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned
531 * FPS range.</p>
532 *
533 * <p> To enable high speed video recording, application must set
534 * {@link CaptureRequest#CONTROL_SCENE_MODE} to
535 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
536 * requests and select the video size from {@link #getHighSpeedVideoSizesFor} and
537 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
538 * this method to configure the recording and preview streams and setup the recording requests.
539 * For example, if the application intends to do high speed recording, it can select one FPS
540 * range reported by this method, query the video sizes corresponding to this FPS range by
541 * {@link #getHighSpeedVideoSizesFor} and select one of reported sizes to configure output
542 * streams. Note that for the use case of multiple output streams, application must select one
543 * unique size from {@link #getHighSpeedVideoSizesFor}, and use it for all output streams.
544 * Otherwise a request error might occur when attempting to enable
545 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO}.
546 * Once the stream is configured, application can set the FPS range in the recording requests.
547 * </p>
548 *
549 * @return
550 * an array of supported high speed video recording FPS ranges
551 * The upper bound of returned ranges is guaranteed to be larger or equal to 60.
552 *
553 * @see #getHighSpeedVideoSizesFor
554 */
555 @SuppressWarnings("unchecked")
556 public Range<Integer>[] getHighSpeedVideoFpsRanges() {
Yin-Chia Yehb0056642014-07-28 13:17:05 -0700557 Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet();
558 return keySet.toArray(new Range[keySet.size()]);
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700559 }
560
561 /**
562 * Get the supported video sizes for input FPS range.
563 *
564 * <p> See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording.</p>
565 *
566 * <p> For normal video recording use case, where the application will NOT set
567 * {@link CaptureRequest#CONTROL_SCENE_MODE} to
568 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
569 * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
570 * this method must not be used to setup capture requests, or it will cause request error.</p>
571 *
572 * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()}
573 * @return
574 * An array of video sizes to configure output stream when using
575 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
576 * mode.
577 *
578 * @throws IllegalArgumentException if input FPS range does not exist in the return value of
579 * getHighSpeedVideoFpsRanges
580 * @see #getHighSpeedVideoFpsRanges()
581 */
582 public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) {
583 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
584 if (sizeCount == null || sizeCount == 0) {
585 throw new IllegalArgumentException(String.format(
586 "FpsRange %s does not support high speed video recording", fpsRange));
587 }
588
589 Size[] sizes = new Size[sizeCount];
590 int i = 0;
591 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
592 if (fpsRange.equals(config.getFpsRange())) {
593 sizes[i++] = config.getSize();
594 }
595 }
596 return sizes;
597 }
598
599 /**
Igor Murashkin9c595172014-05-12 13:56:20 -0700600 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
601 * for the format/size combination (in nanoseconds).
602 *
603 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
604 * <p>{@code size} should be one of the ones returned by
605 * {@link #getOutputSizes(int)}.</p>
606 *
607 * <p>This should correspond to the frame duration when only that stream is active, with all
608 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
609 * </p>
610 *
611 * <p>When multiple streams are used in a request, the minimum frame duration will be
612 * {@code max(individual stream min durations)}.</p>
613 *
Eino-Ville Talvala08bc3b02014-07-09 10:07:36 -0700614 * <p>For devices that do not support manual sensor control
615 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
Eino-Ville Talvalaabd9d3c2014-07-28 13:01:52 -0700616 * this function may return 0.</p>
Eino-Ville Talvala08bc3b02014-07-09 10:07:36 -0700617 *
Igor Murashkin9c595172014-05-12 13:56:20 -0700618 * <!--
619 * TODO: uncomment after adding input stream support
620 * <p>The minimum frame duration of a stream (of a particular format, size) is the same
621 * regardless of whether the stream is input or output.</p>
622 * -->
623 *
624 * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
625 * @param size an output-compatible size
Ruben Brunk3e4fed22014-06-18 17:08:42 -0700626 * @return a minimum frame duration {@code >} 0 in nanoseconds, or
Eino-Ville Talvalaabd9d3c2014-07-28 13:01:52 -0700627 * 0 if the minimum frame duration is not available.
Igor Murashkin9c595172014-05-12 13:56:20 -0700628 *
629 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
630 * @throws NullPointerException if {@code size} was {@code null}
631 *
632 * @see CaptureRequest#SENSOR_FRAME_DURATION
633 * @see #getOutputStallDuration(int, Size)
634 * @see ImageFormat
635 * @see PixelFormat
636 */
637 public long getOutputMinFrameDuration(int format, Size size) {
638 checkNotNull(size, "size must not be null");
639 checkArgumentFormatSupported(format, /*output*/true);
640
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800641 return getInternalFormatDuration(imageFormatToInternal(format),
642 imageFormatToDataspace(format),
643 size,
644 DURATION_MIN_FRAME);
Igor Murashkin9c595172014-05-12 13:56:20 -0700645 }
646
647 /**
648 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
649 * for the class/size combination (in nanoseconds).
650 *
651 * <p>This assumes a the {@code klass} is set up to use an implementation-defined format.
652 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
653 *
654 * <p>{@code klass} should be one of the ones which is supported by
655 * {@link #isOutputSupportedFor(Class)}.</p>
656 *
657 * <p>{@code size} should be one of the ones returned by
658 * {@link #getOutputSizes(int)}.</p>
659 *
660 * <p>This should correspond to the frame duration when only that stream is active, with all
661 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
662 * </p>
663 *
664 * <p>When multiple streams are used in a request, the minimum frame duration will be
665 * {@code max(individual stream min durations)}.</p>
666 *
Eino-Ville Talvala08bc3b02014-07-09 10:07:36 -0700667 * <p>For devices that do not support manual sensor control
668 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
Eino-Ville Talvalaabd9d3c2014-07-28 13:01:52 -0700669 * this function may return 0.</p>
Eino-Ville Talvala08bc3b02014-07-09 10:07:36 -0700670 *
Igor Murashkin9c595172014-05-12 13:56:20 -0700671 * <!--
672 * TODO: uncomment after adding input stream support
673 * <p>The minimum frame duration of a stream (of a particular format, size) is the same
674 * regardless of whether the stream is input or output.</p>
675 * -->
676 *
677 * @param klass
678 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
679 * non-empty array returned by {@link #getOutputSizes(Class)}
680 * @param size an output-compatible size
Ruben Brunk3e4fed22014-06-18 17:08:42 -0700681 * @return a minimum frame duration {@code >} 0 in nanoseconds, or
Eino-Ville Talvalaabd9d3c2014-07-28 13:01:52 -0700682 * 0 if the minimum frame duration is not available.
Igor Murashkin9c595172014-05-12 13:56:20 -0700683 *
684 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
685 * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
686 *
687 * @see CaptureRequest#SENSOR_FRAME_DURATION
688 * @see ImageFormat
689 * @see PixelFormat
690 */
691 public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) {
692 if (!isOutputSupportedFor(klass)) {
693 throw new IllegalArgumentException("klass was not supported");
694 }
695
696 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800697 HAL_DATASPACE_UNKNOWN,
Igor Murashkin9c595172014-05-12 13:56:20 -0700698 size, DURATION_MIN_FRAME);
699 }
700
701 /**
702 * Get the stall duration for the format/size combination (in nanoseconds).
703 *
704 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
705 * <p>{@code size} should be one of the ones returned by
706 * {@link #getOutputSizes(int)}.</p>
707 *
708 * <p>
709 * A stall duration is how much extra time would get added to the normal minimum frame duration
710 * for a repeating request that has streams with non-zero stall.
711 *
712 * <p>For example, consider JPEG captures which have the following characteristics:
713 *
714 * <ul>
715 * <li>JPEG streams act like processed YUV streams in requests for which they are not included;
716 * in requests in which they are directly referenced, they act as JPEG streams.
717 * This is because supporting a JPEG stream requires the underlying YUV data to always be ready
718 * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on
719 * requests that actually reference a JPEG stream.
720 * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot
721 * process more than 1 capture at a time.
722 * </ul>
723 *
724 * <p>In other words, using a repeating YUV request would result in a steady frame rate
725 * (let's say it's 30 FPS). If a single JPEG request is submitted periodically,
726 * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each
727 * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from
728 * 30 FPS.</p>
729 *
730 * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a
731 * frame rate drop unless there are still outstanding buffers for that stream from previous
732 * requests.</p>
733 *
734 * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting
735 * the minimum frame duration from the normal minimum frame duration corresponding to {@code S},
736 * added with the maximum stall duration for {@code S}.</p>
737 *
738 * <p>If interleaving requests with and without a stall duration, a request will stall by the
739 * maximum of the remaining times for each can-stall stream with outstanding buffers.</p>
740 *
741 * <p>This means that a stalling request will not have an exposure start until the stall has
742 * completed.</p>
743 *
744 * <p>This should correspond to the stall duration when only that stream is active, with all
745 * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}.
746 * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an
747 * indeterminate stall duration for all streams in a request (the regular stall calculation
748 * rules are ignored).</p>
749 *
750 * <p>The following formats may always have a stall duration:
751 * <ul>
752 * <li>{@link ImageFormat#JPEG JPEG}
753 * <li>{@link ImageFormat#RAW_SENSOR RAW16}
754 * </ul>
755 * </p>
756 *
757 * <p>The following formats will never have a stall duration:
758 * <ul>
759 * <li>{@link ImageFormat#YUV_420_888 YUV_420_888}
760 * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined}
761 * </ul></p>
762 *
763 * <p>
764 * All other formats may or may not have an allowed stall duration on a per-capability basis;
765 * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES
766 * android.request.availableCapabilities} for more details.</p>
767 * </p>
768 *
769 * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration}
770 * for more information about calculating the max frame rate (absent stalls).</p>
771 *
772 * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
773 * @param size an output-compatible size
774 * @return a stall duration {@code >=} 0 in nanoseconds
775 *
776 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
777 * @throws NullPointerException if {@code size} was {@code null}
778 *
779 * @see CaptureRequest#SENSOR_FRAME_DURATION
780 * @see ImageFormat
781 * @see PixelFormat
782 */
783 public long getOutputStallDuration(int format, Size size) {
784 checkArgumentFormatSupported(format, /*output*/true);
785
786 return getInternalFormatDuration(imageFormatToInternal(format),
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800787 imageFormatToDataspace(format),
788 size,
789 DURATION_STALL);
Igor Murashkin9c595172014-05-12 13:56:20 -0700790 }
791
792 /**
793 * Get the stall duration for the class/size combination (in nanoseconds).
794 *
795 * <p>This assumes a the {@code klass} is set up to use an implementation-defined format.
796 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
797 *
798 * <p>{@code klass} should be one of the ones with a non-empty array returned by
799 * {@link #getOutputSizes(Class)}.</p>
800 *
801 * <p>{@code size} should be one of the ones returned by
802 * {@link #getOutputSizes(Class)}.</p>
803 *
804 * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a
805 * <em>stall duration</em>.</p>
806 *
807 * @param klass
808 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
809 * non-empty array returned by {@link #getOutputSizes(Class)}
810 * @param size an output-compatible size
811 * @return a minimum frame duration {@code >=} 0 in nanoseconds
812 *
813 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
814 * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
815 *
816 * @see CaptureRequest#SENSOR_FRAME_DURATION
817 * @see ImageFormat
818 * @see PixelFormat
819 */
820 public <T> long getOutputStallDuration(final Class<T> klass, final Size size) {
821 if (!isOutputSupportedFor(klass)) {
822 throw new IllegalArgumentException("klass was not supported");
823 }
824
825 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800826 HAL_DATASPACE_UNKNOWN, size, DURATION_STALL);
Igor Murashkin9c595172014-05-12 13:56:20 -0700827 }
828
829 /**
830 * Check if this {@link StreamConfigurationMap} is equal to another
831 * {@link StreamConfigurationMap}.
832 *
833 * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p>
834 *
835 * @return {@code true} if the objects were equal, {@code false} otherwise
836 */
837 @Override
838 public boolean equals(final Object obj) {
839 if (obj == null) {
840 return false;
841 }
842 if (this == obj) {
843 return true;
844 }
845 if (obj instanceof StreamConfigurationMap) {
846 final StreamConfigurationMap other = (StreamConfigurationMap) obj;
847 // XX: do we care about order?
848 return Arrays.equals(mConfigurations, other.mConfigurations) &&
849 Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) &&
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700850 Arrays.equals(mStallDurations, other.mStallDurations) &&
851 Arrays.equals(mHighSpeedVideoConfigurations,
852 other.mHighSpeedVideoConfigurations);
Igor Murashkin9c595172014-05-12 13:56:20 -0700853 }
854 return false;
855 }
856
857 /**
858 * {@inheritDoc}
859 */
860 @Override
861 public int hashCode() {
862 // XX: do we care about order?
Yin-Chia Yeh12da1402014-07-15 10:37:31 -0700863 return HashCodeHelpers.hashCode(
864 mConfigurations, mMinFrameDurations,
865 mStallDurations, mHighSpeedVideoConfigurations);
Igor Murashkin9c595172014-05-12 13:56:20 -0700866 }
867
868 // Check that the argument is supported by #getOutputFormats or #getInputFormats
869 private int checkArgumentFormatSupported(int format, boolean output) {
870 checkArgumentFormat(format);
871
872 int[] formats = output ? getOutputFormats() : getInputFormats();
873 for (int i = 0; i < formats.length; ++i) {
874 if (format == formats[i]) {
875 return format;
876 }
877 }
878
879 throw new IllegalArgumentException(String.format(
880 "format %x is not supported by this stream configuration map", format));
881 }
882
883 /**
884 * Ensures that the format is either user-defined or implementation defined.
885 *
886 * <p>If a format has a different internal representation than the public representation,
887 * passing in the public representation here will fail.</p>
888 *
889 * <p>For example if trying to use {@link ImageFormat#JPEG}:
890 * it has a different public representation than the internal representation
891 * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p>
892 *
893 * <p>Any invalid/undefined formats will raise an exception.</p>
894 *
895 * @param format image format
896 * @return the format
897 *
898 * @throws IllegalArgumentException if the format was invalid
899 */
900 static int checkArgumentFormatInternal(int format) {
901 switch (format) {
902 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
903 case HAL_PIXEL_FORMAT_BLOB:
Eino-Ville Talvalad3b85f62014-08-05 15:02:31 -0700904 case HAL_PIXEL_FORMAT_RAW_OPAQUE:
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800905 case HAL_PIXEL_FORMAT_Y16:
Igor Murashkin9c595172014-05-12 13:56:20 -0700906 return format;
907 case ImageFormat.JPEG:
908 throw new IllegalArgumentException(
909 "ImageFormat.JPEG is an unknown internal format");
910 default:
911 return checkArgumentFormat(format);
912 }
913 }
914
915 /**
916 * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat.
917 *
918 * <p>If a format has a different public representation than the internal representation,
919 * passing in the internal representation here will fail.</p>
920 *
921 * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}:
922 * it has a different internal representation than the public representation
923 * {@link ImageFormat#JPEG}, this check will fail.</p>
924 *
925 * <p>Any invalid/undefined formats will raise an exception, including implementation-defined.
926 * </p>
927 *
928 * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p>
929 *
930 * @param format image format
931 * @return the format
932 *
933 * @throws IllegalArgumentException if the format was not user-defined
934 */
935 static int checkArgumentFormat(int format) {
Igor Murashkin9c595172014-05-12 13:56:20 -0700936 if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) {
937 throw new IllegalArgumentException(String.format(
938 "format 0x%x was not defined in either ImageFormat or PixelFormat", format));
939 }
940
941 return format;
942 }
943
944 /**
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800945 * Convert an internal format compatible with {@code graphics.h} into public-visible
946 * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH.
Igor Murashkin9c595172014-05-12 13:56:20 -0700947 *
948 * <p>In particular these formats are converted:
949 * <ul>
950 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG
951 * </ul>
952 * </p>
953 *
954 * <p>Passing in an implementation-defined format which has no public equivalent will fail;
955 * as will passing in a public format which has a different internal format equivalent.
956 * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
957 *
958 * <p>All other formats are returned as-is, no further invalid check is performed.</p>
959 *
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800960 * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than
961 * HAL_DATASPACE_DEPTH.</p>
Igor Murashkin9c595172014-05-12 13:56:20 -0700962 *
963 * @param format image format from {@link ImageFormat} or {@link PixelFormat}
964 * @return the converted image formats
965 *
966 * @throws IllegalArgumentException
967 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
968 * {@link ImageFormat#JPEG}
969 *
970 * @see ImageFormat
971 * @see PixelFormat
972 * @see #checkArgumentFormat
973 */
974 static int imageFormatToPublic(int format) {
975 switch (format) {
976 case HAL_PIXEL_FORMAT_BLOB:
977 return ImageFormat.JPEG;
978 case ImageFormat.JPEG:
979 throw new IllegalArgumentException(
980 "ImageFormat.JPEG is an unknown internal format");
981 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
982 throw new IllegalArgumentException(
983 "IMPLEMENTATION_DEFINED must not leak to public API");
984 default:
985 return format;
986 }
987 }
988
989 /**
Eino-Ville Talvala838beb22015-03-05 15:42:49 -0800990 * Convert an internal format compatible with {@code graphics.h} into public-visible
991 * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH.
992 *
993 * <p>In particular these formats are converted:
994 * <ul>
995 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD
996 * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16
997 * </ul>
998 * </p>
999 *
1000 * <p>Passing in an implementation-defined format which has no public equivalent will fail;
1001 * as will passing in a public format which has a different internal format equivalent.
1002 * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1003 *
1004 * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1005 *
1006 * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with
1007 * HAL_DATASPACE_DEPTH.</p>
1008 *
1009 * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1010 * @return the converted image formats
1011 *
1012 * @throws IllegalArgumentException
1013 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1014 * {@link ImageFormat#JPEG}
1015 *
1016 * @see ImageFormat
1017 * @see PixelFormat
1018 * @see #checkArgumentFormat
1019 */
1020 static int depthFormatToPublic(int format) {
1021 switch (format) {
1022 case HAL_PIXEL_FORMAT_BLOB:
1023 return ImageFormat.DEPTH_POINT_CLOUD;
1024 case HAL_PIXEL_FORMAT_Y16:
1025 return ImageFormat.DEPTH16;
1026 case ImageFormat.JPEG:
1027 throw new IllegalArgumentException(
1028 "ImageFormat.JPEG is an unknown internal format");
1029 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1030 throw new IllegalArgumentException(
1031 "IMPLEMENTATION_DEFINED must not leak to public API");
1032 default:
1033 throw new IllegalArgumentException(
1034 "Unknown DATASPACE_DEPTH format " + format);
1035 }
1036 }
1037
1038 /**
Igor Murashkin9c595172014-05-12 13:56:20 -07001039 * Convert image formats from internal to public formats (in-place).
1040 *
1041 * @param formats an array of image formats
1042 * @return {@code formats}
1043 *
1044 * @see #imageFormatToPublic
1045 */
1046 static int[] imageFormatToPublic(int[] formats) {
1047 if (formats == null) {
1048 return null;
1049 }
1050
1051 for (int i = 0; i < formats.length; ++i) {
1052 formats[i] = imageFormatToPublic(formats[i]);
1053 }
1054
1055 return formats;
1056 }
1057
1058 /**
1059 * Convert a public format compatible with {@code ImageFormat} to an internal format
1060 * from {@code graphics.h}.
1061 *
1062 * <p>In particular these formats are converted:
1063 * <ul>
1064 * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001065 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB
1066 * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16
Igor Murashkin9c595172014-05-12 13:56:20 -07001067 * </ul>
1068 * </p>
1069 *
1070 * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1071 * as will passing in an internal format which has a different public format equivalent.
1072 * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1073 *
1074 * <p>All other formats are returned as-is, no invalid check is performed.</p>
1075 *
1076 * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1077 *
1078 * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1079 * @return the converted image formats
1080 *
1081 * @see ImageFormat
1082 * @see PixelFormat
1083 *
1084 * @throws IllegalArgumentException
1085 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1086 */
1087 static int imageFormatToInternal(int format) {
1088 switch (format) {
1089 case ImageFormat.JPEG:
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001090 case ImageFormat.DEPTH_POINT_CLOUD:
Igor Murashkin9c595172014-05-12 13:56:20 -07001091 return HAL_PIXEL_FORMAT_BLOB;
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001092 case ImageFormat.DEPTH16:
1093 return HAL_PIXEL_FORMAT_Y16;
Igor Murashkin9c595172014-05-12 13:56:20 -07001094 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1095 throw new IllegalArgumentException(
1096 "IMPLEMENTATION_DEFINED is not allowed via public API");
1097 default:
1098 return format;
1099 }
1100 }
1101
1102 /**
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001103 * Convert a public format compatible with {@code ImageFormat} to an internal dataspace
1104 * from {@code graphics.h}.
1105 *
1106 * <p>In particular these formats are converted:
1107 * <ul>
1108 * <li>ImageFormat.JPEG => HAL_DATASPACE_JFIF
1109 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH
1110 * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH
1111 * <li>others => HAL_DATASPACE_UNKNOWN
1112 * </ul>
1113 * </p>
1114 *
1115 * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1116 * as will passing in an internal format which has a different public format equivalent.
1117 * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1118 *
1119 * <p>All other formats are returned as-is, no invalid check is performed.</p>
1120 *
1121 * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1122 *
1123 * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1124 * @return the converted image formats
1125 *
1126 * @see ImageFormat
1127 * @see PixelFormat
1128 *
1129 * @throws IllegalArgumentException
1130 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1131 */
1132 static int imageFormatToDataspace(int format) {
1133 switch (format) {
1134 case ImageFormat.JPEG:
1135 return HAL_DATASPACE_JFIF;
1136 case ImageFormat.DEPTH_POINT_CLOUD:
1137 case ImageFormat.DEPTH16:
1138 return HAL_DATASPACE_DEPTH;
1139 default:
1140 return HAL_DATASPACE_UNKNOWN;
1141 }
1142 }
1143
1144 /**
Igor Murashkin9c595172014-05-12 13:56:20 -07001145 * Convert image formats from public to internal formats (in-place).
1146 *
1147 * @param formats an array of image formats
1148 * @return {@code formats}
1149 *
1150 * @see #imageFormatToInternal
1151 *
1152 * @hide
1153 */
1154 public static int[] imageFormatToInternal(int[] formats) {
1155 if (formats == null) {
1156 return null;
1157 }
1158
1159 for (int i = 0; i < formats.length; ++i) {
1160 formats[i] = imageFormatToInternal(formats[i]);
1161 }
1162
1163 return formats;
1164 }
1165
1166 private Size[] getPublicFormatSizes(int format, boolean output) {
1167 try {
1168 checkArgumentFormatSupported(format, output);
1169 } catch (IllegalArgumentException e) {
1170 return null;
1171 }
1172
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001173 int internalFormat = imageFormatToInternal(format);
1174 int dataspace = imageFormatToDataspace(format);
Igor Murashkin9c595172014-05-12 13:56:20 -07001175
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001176 return getInternalFormatSizes(internalFormat, dataspace, output);
Igor Murashkin9c595172014-05-12 13:56:20 -07001177 }
1178
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001179 private Size[] getInternalFormatSizes(int format, int dataspace, boolean output) {
1180
1181 HashMap<Integer, Integer> formatsMap =
1182 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthOutputFormats : getFormatsMap(output);
Igor Murashkin9c595172014-05-12 13:56:20 -07001183
1184 Integer sizesCount = formatsMap.get(format);
1185 if (sizesCount == null) {
1186 throw new IllegalArgumentException("format not available");
1187 }
1188
1189 int len = sizesCount;
1190 Size[] sizes = new Size[len];
1191 int sizeIndex = 0;
1192
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001193 StreamConfiguration[] configurations =
1194 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1195
1196
1197 for (StreamConfiguration config : configurations) {
Igor Murashkin9c595172014-05-12 13:56:20 -07001198 if (config.getFormat() == format && config.isOutput() == output) {
1199 sizes[sizeIndex++] = config.getSize();
1200 }
1201 }
1202
1203 if (sizeIndex != len) {
1204 throw new AssertionError(
1205 "Too few sizes (expected " + len + ", actual " + sizeIndex + ")");
1206 }
1207
1208 return sizes;
1209 }
1210
1211 /** Get the list of publically visible output formats; does not include IMPL_DEFINED */
1212 private int[] getPublicFormats(boolean output) {
1213 int[] formats = new int[getPublicFormatCount(output)];
1214
1215 int i = 0;
1216
1217 for (int format : getFormatsMap(output).keySet()) {
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -08001218 if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
1219 format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001220 formats[i++] = imageFormatToPublic(format);
Igor Murashkin9c595172014-05-12 13:56:20 -07001221 }
1222 }
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001223 if (output) {
1224 for (int format : mDepthOutputFormats.keySet()) {
1225 formats[i++] = depthFormatToPublic(format);
1226 }
1227 }
Igor Murashkin9c595172014-05-12 13:56:20 -07001228 if (formats.length != i) {
1229 throw new AssertionError("Too few formats " + i + ", expected " + formats.length);
1230 }
1231
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001232 return formats;
Igor Murashkin9c595172014-05-12 13:56:20 -07001233 }
1234
1235 /** Get the format -> size count map for either output or input formats */
1236 private HashMap<Integer, Integer> getFormatsMap(boolean output) {
1237 return output ? mOutputFormats : mInputFormats;
1238 }
1239
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001240 private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) {
Igor Murashkin9c595172014-05-12 13:56:20 -07001241 // assume format is already checked, since its internal
1242
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001243 if (!arrayContains(getInternalFormatSizes(format, dataspace, /*output*/true), size)) {
Igor Murashkin9c595172014-05-12 13:56:20 -07001244 throw new IllegalArgumentException("size was not supported");
1245 }
1246
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001247 StreamConfigurationDuration[] durations = getDurations(duration, dataspace);
Igor Murashkin9c595172014-05-12 13:56:20 -07001248
1249 for (StreamConfigurationDuration configurationDuration : durations) {
1250 if (configurationDuration.getFormat() == format &&
1251 configurationDuration.getWidth() == size.getWidth() &&
1252 configurationDuration.getHeight() == size.getHeight()) {
1253 return configurationDuration.getDuration();
1254 }
1255 }
Eino-Ville Talvalaabd9d3c2014-07-28 13:01:52 -07001256 // Default duration is '0' (unsupported/no extra stall)
1257 return 0;
Igor Murashkin9c595172014-05-12 13:56:20 -07001258 }
1259
1260 /**
1261 * Get the durations array for the kind of duration
1262 *
1263 * @see #DURATION_MIN_FRAME
1264 * @see #DURATION_STALL
1265 * */
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001266 private StreamConfigurationDuration[] getDurations(int duration, int dataspace) {
Igor Murashkin9c595172014-05-12 13:56:20 -07001267 switch (duration) {
1268 case DURATION_MIN_FRAME:
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001269 return (dataspace == HAL_DATASPACE_DEPTH) ?
1270 mDepthMinFrameDurations : mMinFrameDurations;
Igor Murashkin9c595172014-05-12 13:56:20 -07001271 case DURATION_STALL:
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001272 return (dataspace == HAL_DATASPACE_DEPTH) ?
1273 mDepthStallDurations : mStallDurations;
Igor Murashkin9c595172014-05-12 13:56:20 -07001274 default:
1275 throw new IllegalArgumentException("duration was invalid");
1276 }
1277 }
1278
Igor Murashkin9c595172014-05-12 13:56:20 -07001279 /** Count the number of publicly-visible output formats */
1280 private int getPublicFormatCount(boolean output) {
1281 HashMap<Integer, Integer> formatsMap = getFormatsMap(output);
1282
1283 int size = formatsMap.size();
1284 if (formatsMap.containsKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
1285 size -= 1;
1286 }
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -08001287 if (formatsMap.containsKey(HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
1288 size -= 1;
1289 }
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001290 if (output) {
1291 size += mDepthOutputFormats.size();
1292 }
Eino-Ville Talvalafa0b9a02015-01-20 12:30:59 -08001293
Igor Murashkin9c595172014-05-12 13:56:20 -07001294 return size;
1295 }
1296
1297 private static <T> boolean arrayContains(T[] array, T element) {
1298 if (array == null) {
1299 return false;
1300 }
1301
1302 for (T el : array) {
1303 if (Objects.equals(el, element)) {
1304 return true;
1305 }
1306 }
1307
1308 return false;
1309 }
1310
1311 // from system/core/include/system/graphics.h
1312 private static final int HAL_PIXEL_FORMAT_BLOB = 0x21;
1313 private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22;
1314 private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24;
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001315 private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159;
1316
1317 private static final int HAL_DATASPACE_UNKNOWN = 0x0;
1318 private static final int HAL_DATASPACE_JFIF = 0x101;
1319 private static final int HAL_DATASPACE_DEPTH = 0x1000;
Igor Murashkin9c595172014-05-12 13:56:20 -07001320
1321 /**
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001322 * @see #getDurations(int, int)
Igor Murashkin9c595172014-05-12 13:56:20 -07001323 */
1324 private static final int DURATION_MIN_FRAME = 0;
1325 private static final int DURATION_STALL = 1;
1326
1327 private final StreamConfiguration[] mConfigurations;
1328 private final StreamConfigurationDuration[] mMinFrameDurations;
1329 private final StreamConfigurationDuration[] mStallDurations;
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001330
1331 private final StreamConfiguration[] mDepthConfigurations;
1332 private final StreamConfigurationDuration[] mDepthMinFrameDurations;
1333 private final StreamConfigurationDuration[] mDepthStallDurations;
1334
Yin-Chia Yeh12da1402014-07-15 10:37:31 -07001335 private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations;
Igor Murashkin9c595172014-05-12 13:56:20 -07001336
1337 /** ImageFormat -> num output sizes mapping */
1338 private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mOutputFormats =
1339 new HashMap<Integer, Integer>();
1340 /** ImageFormat -> num input sizes mapping */
1341 private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mInputFormats =
1342 new HashMap<Integer, Integer>();
Eino-Ville Talvala838beb22015-03-05 15:42:49 -08001343 /** ImageFormat -> num depth output sizes mapping */
1344 private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mDepthOutputFormats =
1345 new HashMap<Integer, Integer>();
Yin-Chia Yeh12da1402014-07-15 10:37:31 -07001346 /** High speed video Size -> FPS range count mapping*/
1347 private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap =
1348 new HashMap<Size, Integer>();
1349 /** High speed video FPS range -> Size count mapping*/
1350 private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer>
1351 mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>();
Igor Murashkin9c595172014-05-12 13:56:20 -07001352
1353}