Merge "Camera3: SHUTTER event for reprocessing" into mnc-dev
diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h
index 73726cf..d23bdf2 100644
--- a/include/hardware/camera3.h
+++ b/include/hardware/camera3.h
@@ -135,6 +135,8 @@
  *
  *   - Addition of rotation field to camera3_stream_t.
  *
+ *   - Addition of camera3 stream configuration operation mode to camera3_stream_configuration_t
+ *
  */
 
 /**
@@ -1397,6 +1399,83 @@
 } camera3_stream_rotation_t;
 
 /**
+ * camera3_stream_configuration_mode_t:
+ *
+ * This defines the general operation mode for the HAL (for a given stream configuration), where
+ * modes besides NORMAL have different semantics, and usually limit the generality of the API in
+ * exchange for higher performance in some particular area.
+ */
+typedef enum camera3_stream_configuration_mode {
+    /**
+     * Normal stream configuration operation mode. This is the default camera operation mode,
+     * where all semantics of HAL APIs and metadata controls apply.
+     */
+    CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE = 0,
+
+    /**
+     * Special constrained high speed operation mode for devices that can not support high
+     * speed output in NORMAL mode. All streams in this configuration are operating at high speed
+     * mode and have different characteristics and limitations to achieve high speed output.
+     * The NORMAL mode can still be used for high speed output if the HAL can support high speed
+     * output while satisfying all the semantics of HAL APIs and metadata controls. It is
+     * recommended for the HAL to support high speed output in NORMAL mode (by advertising the high
+     * speed FPS ranges in android.control.aeAvailableTargetFpsRanges) if possible.
+     *
+     * This mode has below limitations/requirements:
+     *
+     *   1. The HAL must support up to 2 streams with sizes reported by
+     *      android.control.availableHighSpeedVideoConfigurations.
+     *   2. In this mode, the HAL is expected to output up to 120fps or higher. This mode must
+     *      support the targeted FPS range and size configurations reported by
+     *      android.control.availableHighSpeedVideoConfigurations.
+     *   3. The HAL must support HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED output stream format.
+     *   4. To achieve efficient high speed streaming, the HAL may have to aggregate
+     *      multiple frames together and send to camera device for processing where the request
+     *      controls are same for all the frames in this batch (batch mode). The HAL must support
+     *      max batch size and the max batch size requirements defined by
+     *      android.control.availableHighSpeedVideoConfigurations.
+     *   5. In this mode, the HAL must override aeMode, awbMode, and afMode to ON, ON, and
+     *      CONTINUOUS_VIDEO, respectively. All post-processing block mode controls must be
+     *      overridden to be FAST. Therefore, no manual control of capture and post-processing
+     *      parameters is possible. All other controls operate the same as when
+     *      android.control.mode == AUTO. This means that all other android.control.* fields
+     *      must continue to work, such as
+     *
+     *      android.control.aeTargetFpsRange
+     *      android.control.aeExposureCompensation
+     *      android.control.aeLock
+     *      android.control.awbLock
+     *      android.control.effectMode
+     *      android.control.aeRegions
+     *      android.control.afRegions
+     *      android.control.awbRegions
+     *      android.control.afTrigger
+     *      android.control.aePrecaptureTrigger
+     *
+     *      Outside of android.control.*, the following controls must work:
+     *
+     *      android.flash.mode (TORCH mode only, automatic flash for still capture will not work
+     *      since aeMode is ON)
+     *      android.lens.opticalStabilizationMode (if it is supported)
+     *      android.scaler.cropRegion
+     *      android.statistics.faceDetectMode (if it is supported)
+     *
+     * For more details about high speed stream requirements, see
+     * android.control.availableHighSpeedVideoConfigurations and CONSTRAINED_HIGH_SPEED_VIDEO
+     * capability defined in android.request.availableCapabilities.
+     *
+     * This mode only needs to be supported by HALs that include CONSTRAINED_HIGH_SPEED_VIDEO in
+     * the android.request.availableCapabilities static metadata.
+     */
+    CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE = 1,
+
+    /**
+     * First value for vendor-defined stream configuration modes.
+     */
+    CAMERA3_VENDOR_STREAM_CONFIGURATION_MODE_START = 0x8000
+} camera3_stream_configuration_mode_t;
+
+/**
  * camera3_stream_t:
  *
  * A handle to a single camera input or output stream. A stream is defined by
@@ -1598,6 +1677,19 @@
      */
     camera3_stream_t **streams;
 
+    /**
+     * >= CAMERA_DEVICE_API_VERSION_3_3:
+     *
+     * The operation mode of streams in this configuration, one of the value defined in
+     * camera3_stream_configuration_mode_t.
+     * The HAL can use this mode as an indicator to set the stream property (e.g.,
+     * camera3_stream->max_buffers) appropriately. For example, if the configuration is
+     * CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE, the HAL may want to set aside more
+     * buffers for batch mode operation (see android.control.availableHighSpeedVideoConfigurations
+     * for batch mode definition).
+     *
+     */
+    uint32_t operation_mode;
 } camera3_stream_configuration_t;
 
 /**
@@ -2628,6 +2720,11 @@
      *          - Unsupported rotation configuration (only applies to
      *            devices with version >= CAMERA_DEVICE_API_VERSION_3_3)
      *
+     *          - Stream sizes/formats don't satisfy the
+     *            camera3_stream_configuration_t->operation_mode requirements for non-NORMAL mode,
+     *            or the requested operation_mode is not supported by the HAL.
+     *            (only applies to devices with version >= CAMERA_DEVICE_API_VERSION_3_3)
+     *
      *          Note that the framework submitting an invalid stream
      *          configuration is not normal operation, since stream
      *          configurations are checked before configure. An invalid
diff --git a/include/hardware/gatekeeper.h b/include/hardware/gatekeeper.h
index ebe79b8..6d2fb0b 100644
--- a/include/hardware/gatekeeper.h
+++ b/include/hardware/gatekeeper.h
@@ -76,7 +76,7 @@
      * - enrolled_password_handle: on success, a buffer will be allocated with the
      *   new password handle referencing the password provided in desired_password.
      *   This buffer can be used on subsequent calls to enroll or verify.
-     *   The caller is responsible for deallocating this buffer via a call to free()
+     *   The caller is responsible for deallocating this buffer via a call to delete[]
      * - enrolled_password_handle_length: pointer to the length in bytes of the buffer allocated
      *   by this function and pointed to by *enrolled_password_handle_length.
      *
@@ -124,10 +124,13 @@
      *
      * - auth_token: on success, a buffer containing the authentication token
      *   resulting from this verification is assigned to *auth_token. The caller
-     *   is responsible for deallocating this memory via a call to free()
+     *   is responsible for deallocating this memory via a call to delete[]
      * - auth_token_length: on success, the length in bytes of the authentication
      *   token assigned to *auth_token will be assigned to *auth_token_length
      *
+     * - request_reenroll: a request to the upper layers to re-enroll the verified
+     *   password due to a version change. Not set if verification fails.
+     *
      * Returns:
      * - 0 on success
      * - An error code < 0 on failure, or
@@ -138,7 +141,7 @@
     int (*verify)(const struct gatekeeper_device *dev, uint32_t uid, uint64_t challenge,
             const uint8_t *enrolled_password_handle, uint32_t enrolled_password_handle_length,
             const uint8_t *provided_password, uint32_t provided_password_length,
-            uint8_t **auth_token, uint32_t *auth_token_length);
+            uint8_t **auth_token, uint32_t *auth_token_length, bool *request_reenroll);
 
 };
 typedef struct gatekeeper_device gatekeeper_device_t;
diff --git a/include/hardware/gralloc.h b/include/hardware/gralloc.h
index 7d00114..7aeb84e 100644
--- a/include/hardware/gralloc.h
+++ b/include/hardware/gralloc.h
@@ -127,6 +127,17 @@
     /* buffer may be used as a cursor */
     GRALLOC_USAGE_CURSOR                = 0x00008000,
 
+    /* Set by the consumer to indicate to the producer that they may attach a
+     * buffer that they did not detach from the BufferQueue. Will be filtered
+     * out by GRALLOC_USAGE_ALLOC_MASK, so gralloc modules will not need to
+     * handle this flag. */
+    GRALLOC_USAGE_FOREIGN_BUFFERS       = 0x00010000,
+
+    /* Mask of all flags which could be passed to a gralloc module for buffer
+     * allocation. Any flags not in this mask do not need to be handled by
+     * gralloc modules. */
+    GRALLOC_USAGE_ALLOC_MASK            = ~(GRALLOC_USAGE_FOREIGN_BUFFERS),
+
     /* implementation-specific private usage flags */
     GRALLOC_USAGE_PRIVATE_0             = 0x10000000,
     GRALLOC_USAGE_PRIVATE_1             = 0x20000000,
diff --git a/modules/audio_remote_submix/audio_hw.cpp b/modules/audio_remote_submix/audio_hw.cpp
index b9dcf7a..20c0fab 100644
--- a/modules/audio_remote_submix/audio_hw.cpp
+++ b/modules/audio_remote_submix/audio_hw.cpp
@@ -25,6 +25,7 @@
 #include <sys/time.h>
 #include <sys/limits.h>
 
+#include <cutils/compiler.h>
 #include <cutils/log.h>
 #include <cutils/properties.h>
 #include <cutils/str_parms.h>
@@ -62,7 +63,7 @@
 #endif // SUBMIX_VERBOSE_LOGGING
 
 // NOTE: This value will be rounded up to the nearest power of 2 by MonoPipe().
-#define DEFAULT_PIPE_SIZE_IN_FRAMES  (1024*8)
+#define DEFAULT_PIPE_SIZE_IN_FRAMES  (1024*4)
 // Value used to divide the MonoPipe() buffer into segments that are written to the source and
 // read from the sink.  The maximum latency of the device is the size of the MonoPipe's buffer
 // the minimum latency is the MonoPipe buffer size divided by this value.
@@ -178,6 +179,7 @@
     struct submix_audio_device *dev;
     int route_handle;
     bool output_standby;
+    uint64_t write_counter_frames;
 #if LOG_STREAMS_TO_FILES
     int log_fd;
 #endif // LOG_STREAMS_TO_FILES
@@ -189,11 +191,10 @@
     int route_handle;
     bool input_standby;
     bool output_standby_rec_thr; // output standby state as seen from record thread
-
     // wall clock when recording starts
     struct timespec record_start_time;
     // how many frames have been requested to be read
-    int64_t read_counter_frames;
+    uint64_t read_counter_frames;
 
 #if ENABLE_LEGACY_INPUT_OPEN
     // Number of references to this input stream.
@@ -699,6 +700,7 @@
     pthread_mutex_lock(&rsxadev->lock);
 
     out->output_standby = true;
+    out->write_counter_frames = 0;
 
     pthread_mutex_unlock(&rsxadev->lock);
 
@@ -852,6 +854,9 @@
 
     pthread_mutex_lock(&rsxadev->lock);
     sink.clear();
+    if (written_frames > 0) {
+        out->write_counter_frames += written_frames;
+    }
     pthread_mutex_unlock(&rsxadev->lock);
 
     if (written_frames < 0) {
@@ -863,12 +868,51 @@
     return written_bytes;
 }
 
+static int out_get_presentation_position(const struct audio_stream_out *stream,
+                                   uint64_t *frames, struct timespec *timestamp)
+{
+    const submix_stream_out * out = reinterpret_cast<const struct submix_stream_out *>
+            (reinterpret_cast<const uint8_t *>(stream) -
+                    offsetof(struct submix_stream_out, stream));
+    struct submix_audio_device * const rsxadev = out->dev;
+    int ret = 0;
+
+    pthread_mutex_lock(&rsxadev->lock);
+
+    if (frames) {
+        const ssize_t frames_in_pipe =
+                rsxadev->routes[out->route_handle].rsxSource->availableToRead();
+        if (CC_UNLIKELY(frames_in_pipe < 0)) {
+            *frames = out->write_counter_frames;
+        } else {
+            *frames = out->write_counter_frames > (uint64_t) frames_in_pipe ?
+                    out->write_counter_frames - frames_in_pipe : 0;
+        }
+    }
+    if (timestamp) {
+        clock_gettime(CLOCK_MONOTONIC, timestamp);
+    }
+
+    pthread_mutex_unlock(&rsxadev->lock);
+
+    SUBMIX_ALOGV("out_get_presentation_position() got frames=%llu timestamp sec=%llu",
+            frames ? *frames : -1, timestamp ? timestamp->tv_sec : -1);
+
+    return ret;
+}
+
 static int out_get_render_position(const struct audio_stream_out *stream,
                                    uint32_t *dsp_frames)
 {
-    (void)stream;
-    (void)dsp_frames;
-    return -EINVAL;
+    if (!dsp_frames) {
+        return -EINVAL;
+    }
+    uint64_t frames = 0;
+    int ret = out_get_presentation_position(stream, &frames, NULL);
+    if ((ret == 0) && dsp_frames) {
+        *dsp_frames = (uint32_t) frames;
+    }
+    return ret;
 }
 
 static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
@@ -1335,6 +1379,9 @@
     out->stream.write = out_write;
     out->stream.get_render_position = out_get_render_position;
     out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
+    out->stream.get_presentation_position = out_get_presentation_position;
+
+    out->write_counter_frames = 0;
 
 #if ENABLE_RESAMPLING
     // Recreate the pipe with the correct sample rate so that MonoPipe.write() rate limits