Merge "Add uniqueId to Virtual Display and pass through to inputflinger (2/2)" into oc-dev
diff --git a/cmds/atrace/Android.bp b/cmds/atrace/Android.bp
index 4f1065f..6c5869a 100644
--- a/cmds/atrace/Android.bp
+++ b/cmds/atrace/Android.bp
@@ -16,9 +16,6 @@
"libz",
"libbase",
],
- static_libs: [
- "libpdx_default_transport",
- ],
init_rc: ["atrace.rc"],
diff --git a/cmds/atrace/atrace.cpp b/cmds/atrace/atrace.cpp
index f8e0ad5..6cfbed9 100644
--- a/cmds/atrace/atrace.cpp
+++ b/cmds/atrace/atrace.cpp
@@ -41,7 +41,6 @@
#include <hidl/ServiceManagement.h>
#include <cutils/properties.h>
-#include <pdx/default_transport/service_utility.h>
#include <utils/String8.h>
#include <utils/Timers.h>
#include <utils/Tokenizer.h>
@@ -49,7 +48,6 @@
#include <android-base/file.h>
using namespace android;
-using pdx::default_transport::ServiceUtility;
using std::string;
#define NELEM(x) ((int) (sizeof(x) / sizeof((x)[0])))
@@ -816,7 +814,6 @@
ok &= setAppCmdlineProperty(&packageList[0]);
ok &= pokeBinderServices();
pokeHalServices();
- ok &= ServiceUtility::PokeServices();
// Disable all the sysfs enables. This is done as a separate loop from
// the enables to allow the same enable to exist in multiple categories.
@@ -854,7 +851,6 @@
setTagsProperty(0);
clearAppProperties();
pokeBinderServices();
- ServiceUtility::PokeServices();
// Set the options back to their defaults.
setTraceOverwriteEnable(true);
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 19dfb87..a0d987d 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -1100,10 +1100,13 @@
ALOGV("unlink %s\n", dex_path);
if (unlink(dex_path) < 0) {
- return error(StringPrintf("Failed to unlink %s", dex_path));
- } else {
- return ok();
+ // It's ok if we don't have a dalvik cache path. Report error only when the path exists
+ // but could not be unlinked.
+ if (errno != ENOENT) {
+ return error(StringPrintf("Failed to unlink %s", dex_path));
+ }
}
+ return ok();
}
struct stats {
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 3710e6b..6472a0f 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -1198,7 +1198,11 @@
if (dexopt_action == DEX2OAT_FOR_BOOT_IMAGE &&
in_vdex_wrapper_fd->get() != -1 &&
in_vdex_path_str == out_vdex_path_str) {
- out_vdex_wrapper_fd->reset(in_vdex_wrapper_fd->get());
+ // We unlink the file in case the invocation of dex2oat fails, to ensure we don't
+ // have bogus stale vdex files.
+ out_vdex_wrapper_fd->reset(
+ in_vdex_wrapper_fd->get(),
+ [out_vdex_path_str]() { unlink(out_vdex_path_str.c_str()); });
// Disable auto close for the in wrapper fd (it will be done when destructing the out
// wrapper).
in_vdex_wrapper_fd->DisableAutoClose();
diff --git a/cmds/installd/otapreopt.cpp b/cmds/installd/otapreopt.cpp
index ff838ce..9a3fb70 100644
--- a/cmds/installd/otapreopt.cpp
+++ b/cmds/installd/otapreopt.cpp
@@ -734,6 +734,10 @@
}
static const char* ParseNull(const char* arg) {
+ // b/38186355. Revert soon.
+ if (strcmp(arg, "!null") == 0) {
+ return nullptr;
+ }
return (strcmp(arg, "!") == 0) ? nullptr : arg;
}
diff --git a/cmds/installd/tests/installd_service_test.cpp b/cmds/installd/tests/installd_service_test.cpp
index 4a1f333..34818f6 100644
--- a/cmds/installd/tests/installd_service_test.cpp
+++ b/cmds/installd/tests/installd_service_test.cpp
@@ -54,10 +54,12 @@
return false;
}
-bool create_cache_path(char path[PKG_PATH_MAX] ATTRIBUTE_UNUSED,
- const char *src ATTRIBUTE_UNUSED,
- const char *instruction_set ATTRIBUTE_UNUSED) {
- return false;
+bool create_cache_path(char path[PKG_PATH_MAX],
+ const char *src,
+ const char *instruction_set) {
+ // Not really a valid path but it's good enough for testing.
+ sprintf(path,"/data/dalvik-cache/%s/%s", instruction_set, src);
+ return true;
}
static void mkdir(const char* path, uid_t owner, gid_t group, mode_t mode) {
@@ -151,5 +153,13 @@
EXPECT_EQ(10000, stat_gid("com.example/bar/file"));
}
+TEST_F(ServiceTest, RmDexNoDalvikCache) {
+ LOG(INFO) << "RmDexNoDalvikCache";
+
+ // Try to remove a non existing dalvik cache dex. The call should be
+ // successful because there's nothing to remove.
+ EXPECT_TRUE(service->rmdex("com.example", "arm").isOk());
+}
+
} // namespace installd
} // namespace android
diff --git a/cmds/vr/pose/Android.mk b/cmds/vr/pose/Android.mk
deleted file mode 100644
index 8be3214..0000000
--- a/cmds/vr/pose/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-sourceFiles := \
- pose.cpp
-
-staticLibraries := \
- libdvrcommon \
- libvrsensor \
- libpdx_default_transport \
-
-sharedLibraries := \
- libcutils \
- liblog
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(sourceFiles)
-LOCAL_STATIC_LIBRARIES := $(staticLibraries)
-LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
-LOCAL_MODULE := pose
-LOCAL_MODULE_TAGS := optional
-include $(BUILD_EXECUTABLE)
diff --git a/cmds/vr/pose/pose.cpp b/cmds/vr/pose/pose.cpp
deleted file mode 100644
index 2288a86..0000000
--- a/cmds/vr/pose/pose.cpp
+++ /dev/null
@@ -1,274 +0,0 @@
-// pose is a utility to query and manipulate the current pose via the pose
-// service.
-
-#include <cmath>
-#include <cstdio>
-#include <iomanip>
-#include <iostream>
-#include <regex>
-#include <vector>
-
-#include <private/dvr/types.h>
-#include <dvr/pose_client.h>
-
-using android::dvr::vec3;
-using android::dvr::quat;
-
-namespace {
-
-// Prints usage information to stderr.
-void PrintUsage(const char* executable_name) {
- std::cerr << "Usage: " << executable_name
- << " [--identity|--set=...|--unfreeze]\n"
- << "\n"
- << " no arguments: display the current pose.\n"
- << " --identity: freeze the pose to the identity pose.\n"
- << " --set=rx,ry,rz,rw[,px,py,pz]: freeze the pose to the given "
- "state. rx,ry,rz,rw are interpreted as rotation quaternion. "
- " px, py, pz as position (0,0,0 if omitted).\n"
- << " --mode=mode: sets mode to one of normal, head_turn:slow, "
- "head_turn:fast, rotate:slow, rotate:medium, rotate:fast, "
- "circle_strafe.\n"
- << " --unfreeze: sets the mode to normal.\n"
- << " --log_controller=[true|false]: starts and stops controller"
- " logs\n"
- << std::endl;
-}
-
-// If return_code is negative, print out its corresponding string description
-// and exit the program with a non-zero exit code.
-void ExitIfNegative(int return_code) {
- if (return_code < 0) {
- std::cerr << "Error: " << strerror(-return_code) << std::endl;
- std::exit(1);
- }
-}
-
-// Parses the following command line flags:
-// --identity
-// --set=rx,ry,rz,rw[,px,py,pz]
-// Returns false if parsing fails.
-bool ParseState(const std::string& arg, DvrPoseState* out_state) {
- if (arg == "--identity") {
- *out_state = {.head_from_start_rotation = {0.f, 0.f, 0.f, 1.f},
- .head_from_start_translation = {0.f, 0.f, 0.f},
- .timestamp_ns = 0,
- .sensor_from_start_rotation_velocity = {0.f, 0.f, 0.f}};
- return true;
- }
-
- const std::string prefix("--set=");
- if (arg.size() < 6 || arg.compare(0, prefix.size(), prefix) != 0) {
- return false;
- }
-
- // Tokenize by ','.
- std::regex split_by_comma("[,]+");
- std::sregex_token_iterator token_it(arg.begin() + prefix.size(), arg.end(),
- split_by_comma,
- -1 /* return inbetween parts */);
- std::sregex_token_iterator token_end;
-
- // Convert to float and store values.
- std::vector<float> values;
- for (; token_it != token_end; ++token_it) {
- std::string token = *(token_it);
- float value = 0.f;
- if (sscanf(token.c_str(), "%f", &value) != 1) {
- std::cerr << "Unable to parse --set value as float: " << token
- << std::endl;
- return false;
- } else {
- values.push_back(value);
- }
- }
-
- if (values.size() != 4 && values.size() != 7) {
- std::cerr << "Unable to parse --set, expected either 4 or 7 of values."
- << std::endl;
- return false;
- }
-
- float norm2 = values[0] * values[0] + values[1] * values[1] +
- values[2] * values[2] + values[3] * values[3];
- if (std::abs(norm2 - 1.f) > 1e-4) {
- if (norm2 < 1e-8) {
- std::cerr << "--set quaternion norm close to zero." << std::endl;
- return false;
- }
- float norm = std::sqrt(norm2);
- values[0] /= norm;
- values[1] /= norm;
- values[2] /= norm;
- values[3] /= norm;
- }
-
- out_state->head_from_start_rotation = {values[0], values[1], values[2],
- values[3]};
-
- if (values.size() == 7) {
- out_state->head_from_start_translation = {values[4], values[5], values[6]};
- } else {
- out_state->head_from_start_translation = {0.f, 0.f, 0.f};
- }
-
- out_state->timestamp_ns = 0;
- out_state->sensor_from_start_rotation_velocity = {0.f, 0.f, 0.f};
-
- return true;
-}
-
-// Parses the command line flag --mode.
-// Returns false if parsing fails.
-bool ParseSetMode(const std::string& arg, DvrPoseMode* mode) {
- const std::string prefix("--mode=");
- if (arg.size() < prefix.size() ||
- arg.compare(0, prefix.size(), prefix) != 0) {
- return false;
- }
-
- std::string value = arg.substr(prefix.size());
-
- if (value == "normal") {
- *mode = DVR_POSE_MODE_6DOF;
- return true;
- } else if (value == "head_turn:slow") {
- *mode = DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW;
- return true;
- } else if (value == "head_turn:fast") {
- *mode = DVR_POSE_MODE_MOCK_HEAD_TURN_FAST;
- return true;
- } else if (value == "rotate:slow") {
- *mode = DVR_POSE_MODE_MOCK_ROTATE_SLOW;
- return true;
- } else if (value == "rotate:medium") {
- *mode = DVR_POSE_MODE_MOCK_ROTATE_MEDIUM;
- return true;
- } else if (value == "rotate:fast") {
- *mode = DVR_POSE_MODE_MOCK_ROTATE_FAST;
- return true;
- } else if (value == "circle_strafe") {
- *mode = DVR_POSE_MODE_MOCK_CIRCLE_STRAFE;
- return true;
- } else {
- return false;
- }
-}
-
-// Parses the command line flag --controller_log.
-// Returns false if parsing fails.
-bool ParseLogController(const std::string& arg, bool* log_enabled) {
- const std::string prefix("--log_controller=");
- if (arg.size() < prefix.size() ||
- arg.compare(0, prefix.size(), prefix) != 0) {
- return false;
- }
-
- std::string value = arg.substr(prefix.size());
-
- if (value == "false") {
- *log_enabled = false;
- return true;
- } else if (value == "true") {
- *log_enabled = true;
- return true;
- } else {
- return false;
- }
-}
-
-// The different actions that the tool can perform.
-enum class Action {
- Query, // Query the current pose.
- Set, // Set the pose and freeze.
- Unfreeze, // Set the pose mode to normal.
- SetMode, // Sets the pose mode.
- LogController, // Start/stop controller logging in sensord.
-};
-
-// The action to perform when no arguments are passed to the tool.
-constexpr Action kDefaultAction = Action::Query;
-
-} // namespace
-
-int main(int argc, char** argv) {
- Action action = kDefaultAction;
- DvrPoseState state;
- DvrPoseMode pose_mode = DVR_POSE_MODE_6DOF;
- bool log_controller = false;
-
- // Parse command-line arguments.
- for (int i = 1; i < argc; ++i) {
- const std::string arg = argv[i];
- if (ParseState(arg, &state) && action == kDefaultAction) {
- action = Action::Set;
- } else if (arg == "--unfreeze" && action == kDefaultAction) {
- action = Action::Unfreeze;
- } else if (ParseSetMode(arg, &pose_mode) && action == kDefaultAction) {
- action = Action::SetMode;
- } else if (ParseLogController(arg, &log_controller)) {
- action = Action::LogController;
- } else {
- PrintUsage(argv[0]);
- return 1;
- }
- }
-
- auto pose_client = dvrPoseCreate();
- if (!pose_client) {
- std::cerr << "Unable to create pose client." << std::endl;
- return 1;
- }
-
- switch (action) {
- case Action::Query: {
- ExitIfNegative(dvrPosePoll(pose_client, &state));
- uint64_t timestamp = state.timestamp_ns;
- const auto& rotation = state.head_from_start_rotation;
- const auto& translation = state.head_from_start_translation;
- const auto& rotation_velocity = state.sensor_from_start_rotation_velocity;
- quat q(rotation.w, rotation.x, rotation.y, rotation.z);
- vec3 angles = q.matrix().eulerAngles(0, 1, 2);
- angles = angles * 180.f / M_PI;
- vec3 x = q * vec3(1.0f, 0.0f, 0.0f);
- vec3 y = q * vec3(0.0f, 1.0f, 0.0f);
- vec3 z = q * vec3(0.0f, 0.0f, 1.0f);
-
- std::cout << "timestamp_ns: " << timestamp << std::endl
- << "rotation_quaternion: " << rotation.x << ", " << rotation.y
- << ", " << rotation.z << ", " << rotation.w << std::endl
- << "rotation_angles: " << angles.x() << ", " << angles.y()
- << ", " << angles.z() << std::endl
- << "translation: " << translation.x << ", " << translation.y
- << ", " << translation.z << std::endl
- << "rotation_velocity: " << rotation_velocity.x << ", "
- << rotation_velocity.y << ", " << rotation_velocity.z
- << std::endl
- << "axes: " << std::setprecision(3)
- << "x(" << x.x() << ", " << x.y() << ", " << x.z() << "), "
- << "y(" << y.x() << ", " << y.y() << ", " << y.z() << "), "
- << "z(" << z.x() << ", " << z.y() << ", " << z.z() << "), "
- << std::endl;
- break;
- }
- case Action::Set: {
- ExitIfNegative(dvrPoseFreeze(pose_client, &state));
- break;
- }
- case Action::Unfreeze: {
- ExitIfNegative(dvrPoseSetMode(pose_client, DVR_POSE_MODE_6DOF));
- break;
- }
- case Action::SetMode: {
- ExitIfNegative(dvrPoseSetMode(pose_client, pose_mode));
- break;
- }
- case Action::LogController: {
- ExitIfNegative(
- dvrPoseLogController(pose_client, log_controller));
- break;
- }
- }
-
- dvrPoseDestroy(pose_client);
-}
diff --git a/include/android/sharedmem.h b/include/android/sharedmem.h
index 8f8a931..46d2f4b 100644
--- a/include/android/sharedmem.h
+++ b/include/android/sharedmem.h
@@ -86,21 +86,20 @@
*
* It is a common use case to create a shared memory region, map it read/write locally to intialize
* content, and then send the shared memory to another process with read only access. Code example
- * as below (error handling ommited).
+ * as below (error handling omited).
*
- * \code{.c}
- * int fd = ASharedMemory_create("memory", 128);
*
- * // By default it has PROT_READ | PROT_WRITE | PROT_EXEC.
- * char *buffer = (char *) mmap(NULL, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ * int fd = ASharedMemory_create("memory", 128);
*
- * strcpy(buffer, "This is an example."); // trivially initialize content
+ * // By default it has PROT_READ | PROT_WRITE | PROT_EXEC.
+ * char *buffer = (char *) mmap(NULL, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
*
- * // limit access to read only
- * ASharedMemory_setProt(fd, PROT_READ);
+ * strcpy(buffer, "This is an example."); // trivially initialize content
*
- * // share fd with another process here and the other process can only map with PROT_READ.
- * \endcode
+ * // limit access to read only
+ * ASharedMemory_setProt(fd, PROT_READ);
+ *
+ * // share fd with another process here and the other process can only map with PROT_READ.
*
* \param fd file descriptor of the shared memory region.
* \param prot any bitwise-or'ed combination of PROT_READ, PROT_WRITE, PROT_EXEC denoting
diff --git a/include/powermanager/IPowerManager.h b/include/powermanager/IPowerManager.h
index 3230189..3c81f0f 100644
--- a/include/powermanager/IPowerManager.h
+++ b/include/powermanager/IPowerManager.h
@@ -44,10 +44,12 @@
NAP = IBinder::FIRST_CALL_TRANSACTION + 10,
IS_INTERACTIVE = IBinder::FIRST_CALL_TRANSACTION + 11,
IS_POWER_SAVE_MODE = IBinder::FIRST_CALL_TRANSACTION + 12,
- SET_POWER_SAVE_MODE = IBinder::FIRST_CALL_TRANSACTION + 13,
- REBOOT = IBinder::FIRST_CALL_TRANSACTION + 14,
- SHUTDOWN = IBinder::FIRST_CALL_TRANSACTION + 15,
- CRASH = IBinder::FIRST_CALL_TRANSACTION + 16,
+ GET_POWER_SAVE_STATE = IBinder::FIRST_CALL_TRANSACTION + 13,
+ SET_POWER_SAVE_MODE = IBinder::FIRST_CALL_TRANSACTION + 14,
+ REBOOT = IBinder::FIRST_CALL_TRANSACTION + 17,
+ REBOOT_SAFE_MODE = IBinder::FIRST_CALL_TRANSACTION + 18,
+ SHUTDOWN = IBinder::FIRST_CALL_TRANSACTION + 19,
+ CRASH = IBinder::FIRST_CALL_TRANSACTION + 20,
};
DECLARE_META_INTERFACE(PowerManager)
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index aec8f10..6fefb38 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -2547,8 +2547,16 @@
objectsSize = 0;
} else {
while (objectsSize > 0) {
- if (mObjects[objectsSize-1] < desired)
+ if (mObjects[objectsSize-1] < desired) {
+ // Check for an object being sliced
+ if (desired < mObjects[objectsSize-1] + sizeof(flat_binder_object)) {
+ ALOGE("Attempt to shrink Parcel would slice an objects allocated memory");
+ return UNKNOWN_ERROR + 0xBADF10;
+ }
break;
+ }
+ // STOPSHIP: Above code to be replaced with following commented code:
+ // if (mObjects[objectsSize-1] + sizeof(flat_binder_object) <= desired) break;
objectsSize--;
}
}
diff --git a/libs/gui/bufferqueue/1.0/H2BGraphicBufferProducer.cpp b/libs/gui/bufferqueue/1.0/H2BGraphicBufferProducer.cpp
index eafd296..fda5b94 100644
--- a/libs/gui/bufferqueue/1.0/H2BGraphicBufferProducer.cpp
+++ b/libs/gui/bufferqueue/1.0/H2BGraphicBufferProducer.cpp
@@ -1141,7 +1141,7 @@
status_t H2BGraphicBufferProducer::setSidebandStream(
const sp<NativeHandle>& stream) {
- return toStatusT(mBase->setSidebandStream(stream->handle()));
+ return toStatusT(mBase->setSidebandStream(stream == nullptr ? nullptr : stream->handle()));
}
void H2BGraphicBufferProducer::allocateBuffers(uint32_t width, uint32_t height,
diff --git a/libs/nativewindow/include/android/native_window.h b/libs/nativewindow/include/android/native_window.h
index a12bdd7..5290dd5 100644
--- a/libs/nativewindow/include/android/native_window.h
+++ b/libs/nativewindow/include/android/native_window.h
@@ -21,6 +21,7 @@
/**
* @file native_window.h
+ * @brief API for accessing a native window.
*/
#ifndef ANDROID_NATIVE_WINDOW_H
@@ -71,16 +72,16 @@
struct ANativeWindow;
/**
- * {@link ANativeWindow} is opaque type that provides access to a native window.
+ * Opaque type that provides access to a native window.
*
- * A pointer can be obtained using ANativeWindow_fromSurface().
+ * A pointer can be obtained using {@link ANativeWindow_fromSurface()}.
*/
typedef struct ANativeWindow ANativeWindow;
/**
- * {@link ANativeWindow} is a struct that represents a windows buffer.
+ * Struct that represents a windows buffer.
*
- * A pointer can be obtained using ANativeWindow_lock().
+ * A pointer can be obtained using {@link ANativeWindow_lock()}.
*/
typedef struct ANativeWindow_Buffer {
// The number of pixels that are show horizontally.
@@ -90,10 +91,10 @@
int32_t height;
// The number of *pixels* that a line in the buffer takes in
- // memory. This may be >= width.
+ // memory. This may be >= width.
int32_t stride;
- // The format of the buffer. One of AHARDWAREBUFFER_FORMAT_*
+ // The format of the buffer. One of AHARDWAREBUFFER_FORMAT_*
int32_t format;
// The actual bits.
@@ -104,49 +105,53 @@
} ANativeWindow_Buffer;
/**
- * Acquire a reference on the given ANativeWindow object. This prevents the object
+ * Acquire a reference on the given {@link ANativeWindow} object. This prevents the object
* from being deleted until the reference is removed.
*/
void ANativeWindow_acquire(ANativeWindow* window);
/**
- * Remove a reference that was previously acquired with ANativeWindow_acquire().
+ * Remove a reference that was previously acquired with {@link ANativeWindow_acquire()}.
*/
void ANativeWindow_release(ANativeWindow* window);
/**
- * Return the current width in pixels of the window surface. Returns a
- * negative value on error.
+ * Return the current width in pixels of the window surface.
+ *
+ * \return negative value on error.
*/
int32_t ANativeWindow_getWidth(ANativeWindow* window);
/**
- * Return the current height in pixels of the window surface. Returns a
- * negative value on error.
+ * Return the current height in pixels of the window surface.
+ *
+ * \return a negative value on error.
*/
int32_t ANativeWindow_getHeight(ANativeWindow* window);
/**
- * Return the current pixel format (AHARDWAREBUFFER_FORMAT_*) of the window surface. Returns a
- * negative value on error.
+ * Return the current pixel format (AHARDWAREBUFFER_FORMAT_*) of the window surface.
+ *
+ * \return a negative value on error.
*/
int32_t ANativeWindow_getFormat(ANativeWindow* window);
/**
* Change the format and size of the window buffers.
*
- * format: one of AHARDWAREBUFFER_FORMAT_ constants
- *
* The width and height control the number of pixels in the buffers, not the
- * dimensions of the window on screen. If these are different than the
- * window's physical size, then it buffer will be scaled to match that size
- * when compositing it to the screen.
+ * dimensions of the window on screen. If these are different than the
+ * window's physical size, then its buffer will be scaled to match that size
+ * when compositing it to the screen. The width and height must be either both zero
+ * or both non-zero.
*
* For all of these parameters, if 0 is supplied then the window's base
* value will come back in force.
*
- * width and height must be either both zero or both non-zero.
- *
+ * \param width width of the buffers in pixels.
+ * \param height height of the buffers in pixels.
+ * \param format one of AHARDWAREBUFFER_FORMAT_* constants.
+ * \return 0 for success, or a negative value on error.
*/
int32_t ANativeWindow_setBuffersGeometry(ANativeWindow* window,
int32_t width, int32_t height, int32_t format);
@@ -157,7 +162,9 @@
* function, it contains the dirty region, that is, the region the caller
* intends to redraw. When the function returns, inOutDirtyBounds is updated
* with the actual area the caller needs to redraw -- this region is often
- * extended by ANativeWindow_lock.
+ * extended by {@link ANativeWindow_lock}.
+ *
+ * \return 0 for success, or a negative value on error.
*/
int32_t ANativeWindow_lock(ANativeWindow* window, ANativeWindow_Buffer* outBuffer,
ARect* inOutDirtyBounds);
@@ -165,6 +172,8 @@
/**
* Unlock the window's drawing surface after previously locking it,
* posting the new buffer to the display.
+ *
+ * \return 0 for success, or a negative value on error.
*/
int32_t ANativeWindow_unlockAndPost(ANativeWindow* window);
@@ -173,9 +182,8 @@
/**
* Set a transform that will be applied to future buffers posted to the window.
*
- * @param transform combination of {@link ANativeWindowTransform} flags
- * @return 0 if successful
- * @return -EINVAL if @param transform is invalid
+ * \param transform combination of {@link ANativeWindowTransform} flags
+ * \return 0 for success, or -EINVAL if \p transform is invalid
*/
int32_t ANativeWindow_setBuffersTransform(ANativeWindow* window, int32_t transform);
diff --git a/libs/vr/libbufferhub/buffer_hub_client.cpp b/libs/vr/libbufferhub/buffer_hub_client.cpp
index 07c921f..a09a7a1 100644
--- a/libs/vr/libbufferhub/buffer_hub_client.cpp
+++ b/libs/vr/libbufferhub/buffer_hub_client.cpp
@@ -53,44 +53,34 @@
int BufferHubBuffer::ImportBuffer() {
ATRACE_NAME("BufferHubBuffer::ImportBuffer");
- Status<std::vector<NativeBufferHandle<LocalHandle>>> status =
- InvokeRemoteMethod<BufferHubRPC::GetBuffers>();
+ Status<NativeBufferHandle<LocalHandle>> status =
+ InvokeRemoteMethod<BufferHubRPC::GetBuffer>();
if (!status) {
- ALOGE("BufferHubBuffer::ImportBuffer: Failed to get buffers: %s",
+ ALOGE("BufferHubBuffer::ImportBuffer: Failed to get buffer: %s",
status.GetErrorMessage().c_str());
return -status.error();
- } else if (status.get().empty()) {
- ALOGE(
- "BufferHubBuffer::ImportBuffer: Expected to receive at least one "
- "buffer handle but got zero!");
+ } else if (status.get().id() < 0) {
+ ALOGE("BufferHubBuffer::ImportBuffer: Received an invalid id!");
return -EIO;
}
- auto buffer_handles = status.take();
+ auto buffer_handle = status.take();
- // Stash the buffer id to replace the value in id_. All sub-buffers of a
- // buffer hub buffer have the same id.
- const int new_id = buffer_handles[0].id();
+ // Stash the buffer id to replace the value in id_.
+ const int new_id = buffer_handle.id();
- // Import all of the buffers.
- std::vector<IonBuffer> ion_buffers;
- for (auto& handle : buffer_handles) {
- const size_t i = &handle - buffer_handles.data();
- ALOGD_IF(
- TRACE,
- "BufferHubBuffer::ImportBuffer: i=%zu id=%d FdCount=%zu IntCount=%zu",
- i, handle.id(), handle.FdCount(), handle.IntCount());
+ // Import the buffer.
+ IonBuffer ion_buffer;
+ ALOGD_IF(
+ TRACE, "BufferHubBuffer::ImportBuffer: id=%d FdCount=%zu IntCount=%zu",
+ buffer_handle.id(), buffer_handle.FdCount(), buffer_handle.IntCount());
- IonBuffer buffer;
- const int ret = handle.Import(&buffer);
- if (ret < 0)
- return ret;
+ const int ret = buffer_handle.Import(&ion_buffer);
+ if (ret < 0)
+ return ret;
- ion_buffers.emplace_back(std::move(buffer));
- }
-
- // If all imports succeed, replace the previous buffers and id.
- slices_ = std::move(ion_buffers);
+ // If the import succeeds, replace the previous buffer and id.
+ buffer_ = std::move(ion_buffer);
id_ = new_id;
return 0;
}
@@ -102,11 +92,11 @@
}
int BufferHubBuffer::Lock(int usage, int x, int y, int width, int height,
- void** address, size_t index) {
- return slices_[index].Lock(usage, x, y, width, height, address);
+ void** address) {
+ return buffer_.Lock(usage, x, y, width, height, address);
}
-int BufferHubBuffer::Unlock(size_t index) { return slices_[index].Unlock(); }
+int BufferHubBuffer::Unlock() { return buffer_.Unlock(); }
int BufferHubBuffer::GetBlobReadWritePointer(size_t size, void** addr) {
int width = static_cast<int>(size);
@@ -199,27 +189,24 @@
}
BufferProducer::BufferProducer(uint32_t width, uint32_t height, uint32_t format,
- uint32_t usage, size_t metadata_size,
- size_t slice_count)
- : BufferProducer(width, height, format, usage, usage, metadata_size,
- slice_count) {}
+ uint32_t usage, size_t metadata_size)
+ : BufferProducer(width, height, format, usage, usage, metadata_size) {}
BufferProducer::BufferProducer(uint32_t width, uint32_t height, uint32_t format,
uint64_t producer_usage, uint64_t consumer_usage,
- size_t metadata_size, size_t slice_count)
+ size_t metadata_size)
: BASE(BufferHubRPC::kClientPath) {
ATRACE_NAME("BufferProducer::BufferProducer");
ALOGD_IF(TRACE,
"BufferProducer::BufferProducer: fd=%d width=%u height=%u format=%u "
"producer_usage=%" PRIx64 " consumer_usage=%" PRIx64
- " metadata_size=%zu slice_count=%zu",
+ " metadata_size=%zu",
event_fd(), width, height, format, producer_usage, consumer_usage,
- metadata_size, slice_count);
+ metadata_size);
// (b/37881101) Deprecate producer/consumer usage
auto status = InvokeRemoteMethod<BufferHubRPC::CreateBuffer>(
- width, height, format, (producer_usage | consumer_usage), metadata_size,
- slice_count);
+ width, height, format, (producer_usage | consumer_usage), metadata_size);
if (!status) {
ALOGE(
"BufferProducer::BufferProducer: Failed to create producer buffer: %s",
@@ -240,28 +227,27 @@
BufferProducer::BufferProducer(const std::string& name, int user_id,
int group_id, uint32_t width, uint32_t height,
uint32_t format, uint32_t usage,
- size_t meta_size_bytes, size_t slice_count)
+ size_t meta_size_bytes)
: BufferProducer(name, user_id, group_id, width, height, format, usage,
- usage, meta_size_bytes, slice_count) {}
+ usage, meta_size_bytes) {}
BufferProducer::BufferProducer(const std::string& name, int user_id,
int group_id, uint32_t width, uint32_t height,
uint32_t format, uint64_t producer_usage,
- uint64_t consumer_usage, size_t meta_size_bytes,
- size_t slice_count)
+ uint64_t consumer_usage, size_t meta_size_bytes)
: BASE(BufferHubRPC::kClientPath) {
ATRACE_NAME("BufferProducer::BufferProducer");
ALOGD_IF(TRACE,
"BufferProducer::BufferProducer: fd=%d name=%s user_id=%d "
"group_id=%d width=%u height=%u format=%u producer_usage=%" PRIx64
- " consumer_usage=%" PRIx64 " meta_size_bytes=%zu slice_count=%zu",
+ " consumer_usage=%" PRIx64 " meta_size_bytes=%zu",
event_fd(), name.c_str(), user_id, group_id, width, height, format,
- producer_usage, consumer_usage, meta_size_bytes, slice_count);
+ producer_usage, consumer_usage, meta_size_bytes);
// (b/37881101) Deprecate producer/consumer usage
auto status = InvokeRemoteMethod<BufferHubRPC::CreatePersistentBuffer>(
name, user_id, group_id, width, height, format,
- (producer_usage | consumer_usage), meta_size_bytes, slice_count);
+ (producer_usage | consumer_usage), meta_size_bytes);
if (!status) {
ALOGE(
"BufferProducer::BufferProducer: Failed to create/get persistent "
@@ -296,12 +282,11 @@
const int height = 1;
const int format = HAL_PIXEL_FORMAT_BLOB;
const size_t meta_size_bytes = 0;
- const size_t slice_count = 1;
// (b/37881101) Deprecate producer/consumer usage
auto status = InvokeRemoteMethod<BufferHubRPC::CreateBuffer>(
- width, height, format, (producer_usage | consumer_usage), meta_size_bytes,
- slice_count);
+ width, height, format, (producer_usage | consumer_usage),
+ meta_size_bytes);
if (!status) {
ALOGE("BufferProducer::BufferProducer: Failed to create blob: %s",
status.GetErrorMessage().c_str());
@@ -336,12 +321,11 @@
const int height = 1;
const int format = HAL_PIXEL_FORMAT_BLOB;
const size_t meta_size_bytes = 0;
- const size_t slice_count = 1;
// (b/37881101) Deprecate producer/consumer usage
auto status = InvokeRemoteMethod<BufferHubRPC::CreatePersistentBuffer>(
name, user_id, group_id, width, height, format,
- (producer_usage | consumer_usage), meta_size_bytes, slice_count);
+ (producer_usage | consumer_usage), meta_size_bytes);
if (!status) {
ALOGE(
"BufferProducer::BufferProducer: Failed to create persistent "
diff --git a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
index dbd4110..83e9255 100644
--- a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
+++ b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
@@ -32,20 +32,11 @@
// the usage is software then |addr| will be updated to point to the address
// of the buffer in virtual memory. The caller should only access/modify the
// pixels in the specified area. anything else is undefined behavior.
- int Lock(int usage, int x, int y, int width, int height, void** addr,
- size_t index);
+ int Lock(int usage, int x, int y, int width, int height, void** addr);
// Must be called after Lock() when the caller has finished changing the
// buffer.
- int Unlock(size_t index);
-
- // Helper for when index is 0.
- int Lock(int usage, int x, int y, int width, int height, void** addr) {
- return Lock(usage, x, y, width, height, addr, 0);
- }
-
- // Helper for when index is 0.
- int Unlock() { return Unlock(0); }
+ int Unlock();
// Gets a blob buffer that was created with BufferProducer::CreateBlob.
// Locking and Unlocking is handled internally. There's no need to Unlock
@@ -85,38 +76,27 @@
}
native_handle_t* native_handle() const {
- return const_cast<native_handle_t*>(slices_[0].handle());
- }
- // If index is greater than or equal to slice_count(), the result is
- // undefined.
- native_handle_t* native_handle(size_t index) const {
- return const_cast<native_handle_t*>(slices_[index].handle());
+ return const_cast<native_handle_t*>(buffer_.handle());
}
- IonBuffer* buffer() { return &slices_[0]; }
- const IonBuffer* buffer() const { return &slices_[0]; }
+ IonBuffer* buffer() { return &buffer_; }
+ const IonBuffer* buffer() const { return &buffer_; }
- // If index is greater than or equal to slice_count(), the result is
- // undefined.
- IonBuffer* slice(size_t index) { return &slices_[index]; }
- const IonBuffer* slice(size_t index) const { return &slices_[index]; }
-
- int slice_count() const { return static_cast<int>(slices_.size()); }
int id() const { return id_; }
// The following methods return settings of the first buffer. Currently,
// it is only possible to create multi-buffer BufferHubBuffers with the same
// settings.
- uint32_t width() const { return slices_[0].width(); }
- uint32_t height() const { return slices_[0].height(); }
- uint32_t stride() const { return slices_[0].stride(); }
- uint32_t format() const { return slices_[0].format(); }
- uint32_t usage() const { return slices_[0].usage(); }
- uint32_t layer_count() const { return slices_[0].layer_count(); }
+ uint32_t width() const { return buffer_.width(); }
+ uint32_t height() const { return buffer_.height(); }
+ uint32_t stride() const { return buffer_.stride(); }
+ uint32_t format() const { return buffer_.format(); }
+ uint32_t usage() const { return buffer_.usage(); }
+ uint32_t layer_count() const { return buffer_.layer_count(); }
// TODO(b/37881101) Clean up producer/consumer usage.
- uint64_t producer_usage() const { return slices_[0].usage(); }
- uint64_t consumer_usage() const { return slices_[0].usage(); }
+ uint64_t producer_usage() const { return buffer_.usage(); }
+ uint64_t consumer_usage() const { return buffer_.usage(); }
protected:
explicit BufferHubBuffer(LocalChannelHandle channel);
@@ -135,9 +115,7 @@
// or any other functional purpose as a security precaution.
int id_;
- // A BufferHubBuffer may contain multiple slices of IonBuffers with same
- // configurations.
- std::vector<IonBuffer> slices_;
+ IonBuffer buffer_;
};
// This represents a writable buffer. Calling Post notifies all clients and
@@ -224,11 +202,10 @@
// Constructs a buffer with the given geometry and parameters.
BufferProducer(uint32_t width, uint32_t height, uint32_t format,
- uint32_t usage, size_t metadata_size = 0,
- size_t slice_count = 1);
+ uint32_t usage, size_t metadata_size = 0);
BufferProducer(uint32_t width, uint32_t height, uint32_t format,
uint64_t producer_usage, uint64_t consumer_usage,
- size_t metadata_size, size_t slice_count);
+ size_t metadata_size);
// Constructs a persistent buffer with the given geometry and parameters and
// binds it to |name| in one shot. If a persistent buffer with the same name
@@ -244,12 +221,11 @@
// effective user or group id of the calling process.
BufferProducer(const std::string& name, int user_id, int group_id,
uint32_t width, uint32_t height, uint32_t format,
- uint32_t usage, size_t metadata_size = 0,
- size_t slice_count = 1);
+ uint32_t usage, size_t metadata_size = 0);
BufferProducer(const std::string& name, int user_id, int group_id,
uint32_t width, uint32_t height, uint32_t format,
uint64_t producer_usage, uint64_t consumer_usage,
- size_t metadata_size, size_t slice_count);
+ size_t meta_size_bytes);
// Constructs a blob (flat) buffer with the given usage flags.
BufferProducer(uint32_t usage, size_t size);
diff --git a/libs/vr/libbufferhub/include/private/dvr/bufferhub_rpc.h b/libs/vr/libbufferhub/include/private/dvr/bufferhub_rpc.h
index c6f0e1e..ffdc9e2 100644
--- a/libs/vr/libbufferhub/include/private/dvr/bufferhub_rpc.h
+++ b/libs/vr/libbufferhub/include/private/dvr/bufferhub_rpc.h
@@ -23,6 +23,7 @@
stride_(buffer.stride()),
width_(buffer.width()),
height_(buffer.height()),
+ layer_count_(buffer.layer_count()),
format_(buffer.format()),
usage_(buffer.usage()) {
// Populate the fd and int vectors: native_handle->data[] is an array of fds
@@ -47,9 +48,10 @@
for (const auto& fd : fds_)
fd_ints.push_back(fd.Get());
- const int ret = buffer->Import(fd_ints.data(), fd_ints.size(),
- opaque_ints_.data(), opaque_ints_.size(),
- width_, height_, stride_, format_, usage_);
+ const int ret =
+ buffer->Import(fd_ints.data(), fd_ints.size(), opaque_ints_.data(),
+ opaque_ints_.size(), width_, height_, layer_count_,
+ stride_, format_, usage_);
if (ret < 0)
return ret;
@@ -72,6 +74,7 @@
uint32_t stride_;
uint32_t width_;
uint32_t height_;
+ uint32_t layer_count_;
uint32_t format_;
uint64_t usage_;
std::vector<int> opaque_ints_;
@@ -83,8 +86,8 @@
}
PDX_SERIALIZABLE_MEMBERS(NativeBufferHandle<FileHandleType>, id_, stride_,
- width_, height_, format_, usage_, opaque_ints_,
- fds_);
+ width_, height_, layer_count_, format_, usage_,
+ opaque_ints_, fds_);
NativeBufferHandle(const NativeBufferHandle&) = delete;
void operator=(const NativeBufferHandle&) = delete;
@@ -166,7 +169,6 @@
kOpCreatePersistentBuffer,
kOpGetPersistentBuffer,
kOpGetBuffer,
- kOpGetBuffers,
kOpNewConsumer,
kOpProducerMakePersistent,
kOpProducerRemovePersistence,
@@ -192,19 +194,15 @@
// Methods.
PDX_REMOTE_METHOD(CreateBuffer, kOpCreateBuffer,
void(uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t meta_size_bytes,
- size_t slice_count));
+ uint64_t usage, size_t meta_size_bytes));
PDX_REMOTE_METHOD(CreatePersistentBuffer, kOpCreatePersistentBuffer,
void(const std::string& name, int user_id, int group_id,
uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t meta_size_bytes,
- size_t slice_count));
+ uint64_t usage, size_t meta_size_bytes));
PDX_REMOTE_METHOD(GetPersistentBuffer, kOpGetPersistentBuffer,
void(const std::string& name));
PDX_REMOTE_METHOD(GetBuffer, kOpGetBuffer,
- NativeBufferHandle<LocalHandle>(unsigned index));
- PDX_REMOTE_METHOD(GetBuffers, kOpGetBuffers,
- std::vector<NativeBufferHandle<LocalHandle>>(Void));
+ NativeBufferHandle<LocalHandle>(Void));
PDX_REMOTE_METHOD(NewConsumer, kOpNewConsumer, LocalChannelHandle(Void));
PDX_REMOTE_METHOD(ProducerMakePersistent, kOpProducerMakePersistent,
void(const std::string& name, int user_id, int group_id));
@@ -229,9 +227,8 @@
PDX_REMOTE_METHOD(ProducerQueueAllocateBuffers,
kOpProducerQueueAllocateBuffers,
std::vector<std::pair<LocalChannelHandle, size_t>>(
- uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count,
- size_t buffer_count));
+ uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage, size_t buffer_count));
PDX_REMOTE_METHOD(ProducerQueueDetachBuffer, kOpProducerQueueDetachBuffer,
void(size_t slot));
PDX_REMOTE_METHOD(ConsumerQueueImportBuffers, kOpConsumerQueueImportBuffers,
diff --git a/libs/vr/libbufferhub/include/private/dvr/ion_buffer.h b/libs/vr/libbufferhub/include/private/dvr/ion_buffer.h
index 72c8d81..0d337f7 100644
--- a/libs/vr/libbufferhub/include/private/dvr/ion_buffer.h
+++ b/libs/vr/libbufferhub/include/private/dvr/ion_buffer.h
@@ -16,8 +16,8 @@
IonBuffer(buffer_handle_t handle, uint32_t width, uint32_t height,
uint32_t stride, uint32_t format, uint64_t usage);
IonBuffer(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t layer_count, uint32_t stride, uint32_t layer_stride,
- uint32_t format, uint64_t usage);
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage);
~IonBuffer();
IonBuffer(IonBuffer&& other);
@@ -31,25 +31,29 @@
// previous native handle if necessary. Returns 0 on success or a negative
// errno code otherwise. If allocation fails the previous native handle is
// left intact.
- int Alloc(uint32_t width, uint32_t height, uint32_t format, uint64_t usage);
+ int Alloc(uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage);
// Resets the underlying native handle and parameters, freeing the previous
// native handle if necessary.
void Reset(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t stride, uint32_t format, uint64_t usage);
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage);
// Like Reset but also registers the native handle, which is necessary for
// native handles received over IPC. Returns 0 on success or a negative errno
// code otherwise. If import fails the previous native handle is left intact.
int Import(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t stride, uint32_t format, uint64_t usage);
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage);
// Like Reset but imports a native handle from raw fd and int arrays. Returns
// 0 on success or a negative errno code otherwise. If import fails the
// previous native handle is left intact.
int Import(const int* fd_array, int fd_count, const int* int_array,
- int int_count, uint32_t width, uint32_t height, uint32_t stride,
- uint32_t format, uint64_t usage);
+ int int_count, uint32_t width, uint32_t height,
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage);
// Duplicates the native handle underlying |other| and then imports it. This
// is useful for creating multiple, independent views of the same Ion/Gralloc
@@ -72,7 +76,6 @@
return buffer_.get() ? buffer_->getLayerCount() : 0;
}
uint32_t stride() const { return buffer_.get() ? buffer_->getStride() : 0; }
- uint32_t layer_stride() const { return 0; }
uint32_t format() const {
return buffer_.get() ? buffer_->getPixelFormat() : 0;
}
diff --git a/libs/vr/libbufferhub/include/private/dvr/native_buffer.h b/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
index f9b6975..b4ef2f5 100644
--- a/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
+++ b/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
@@ -52,40 +52,11 @@
void operator=(NativeBuffer&) = delete;
};
-// NativeBufferProducerSlice is an implementation of ANativeWindowBuffer backed
-// by a buffer slice of a BufferProducer.
-class NativeBufferProducerSlice
- : public android::ANativeObjectBase<
- ANativeWindowBuffer, NativeBufferProducerSlice,
- android::LightRefBase<NativeBufferProducerSlice>> {
- public:
- NativeBufferProducerSlice(const std::shared_ptr<BufferProducer>& buffer,
- int buffer_index)
- : BASE(), buffer_(buffer) {
- ANativeWindowBuffer::width = buffer_->width();
- ANativeWindowBuffer::height = buffer_->height();
- ANativeWindowBuffer::stride = buffer_->stride();
- ANativeWindowBuffer::format = buffer_->format();
- ANativeWindowBuffer::usage = buffer_->usage();
- handle = buffer_->native_handle(buffer_index);
- }
-
- virtual ~NativeBufferProducerSlice() {}
-
- private:
- friend class android::LightRefBase<NativeBufferProducerSlice>;
-
- std::shared_ptr<BufferProducer> buffer_;
-
- NativeBufferProducerSlice(const NativeBufferProducerSlice&) = delete;
- void operator=(NativeBufferProducerSlice&) = delete;
-};
-
// NativeBufferProducer is an implementation of ANativeWindowBuffer backed by a
// BufferProducer.
class NativeBufferProducer : public android::ANativeObjectBase<
- ANativeWindowBuffer, NativeBufferProducer,
- android::LightRefBase<NativeBufferProducer>> {
+ ANativeWindowBuffer, NativeBufferProducer,
+ android::LightRefBase<NativeBufferProducer>> {
public:
static constexpr int kEmptyFence = -1;
@@ -101,19 +72,6 @@
ANativeWindowBuffer::format = buffer_->format();
ANativeWindowBuffer::usage = buffer_->usage();
handle = buffer_->native_handle();
- for (int i = 0; i < buffer->slice_count(); ++i) {
- // display == null means don't create an EGL image. This is used by our
- // Vulkan code.
- slices_.push_back(new NativeBufferProducerSlice(buffer, i));
- if (display_ != nullptr) {
- egl_images_.push_back(eglCreateImageKHR(
- display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
- static_cast<ANativeWindowBuffer*>(slices_.back().get()), nullptr));
- if (egl_images_.back() == EGL_NO_IMAGE_KHR) {
- ALOGE("NativeBufferProducer: eglCreateImageKHR failed");
- }
- }
- }
}
explicit NativeBufferProducer(const std::shared_ptr<BufferProducer>& buffer)
@@ -154,7 +112,6 @@
std::shared_ptr<BufferProducer> buffer_;
pdx::LocalHandle release_fence_;
- std::vector<android::sp<NativeBufferProducerSlice>> slices_;
std::vector<EGLImageKHR> egl_images_;
uint32_t surface_buffer_index_;
EGLDisplay display_;
@@ -171,21 +128,16 @@
public:
static constexpr int kEmptyFence = -1;
- explicit NativeBufferConsumer(const std::shared_ptr<BufferConsumer>& buffer,
- int index)
+ explicit NativeBufferConsumer(const std::shared_ptr<BufferConsumer>& buffer)
: BASE(), buffer_(buffer), acquire_fence_(kEmptyFence), sequence_(0) {
ANativeWindowBuffer::width = buffer_->width();
ANativeWindowBuffer::height = buffer_->height();
ANativeWindowBuffer::stride = buffer_->stride();
ANativeWindowBuffer::format = buffer_->format();
ANativeWindowBuffer::usage = buffer_->usage();
- LOG_ALWAYS_FATAL_IF(buffer_->slice_count() <= index);
- handle = buffer_->slice(index)->handle();
+ handle = buffer_->native_handle();
}
- explicit NativeBufferConsumer(const std::shared_ptr<BufferConsumer>& buffer)
- : NativeBufferConsumer(buffer, 0) {}
-
virtual ~NativeBufferConsumer() {}
std::shared_ptr<BufferConsumer> buffer() const { return buffer_; }
diff --git a/libs/vr/libbufferhub/ion_buffer.cpp b/libs/vr/libbufferhub/ion_buffer.cpp
index 716ab42..cbaa24a 100644
--- a/libs/vr/libbufferhub/ion_buffer.cpp
+++ b/libs/vr/libbufferhub/ion_buffer.cpp
@@ -15,36 +15,36 @@
namespace android {
namespace dvr {
-IonBuffer::IonBuffer() : IonBuffer(nullptr, 0, 0, 0, 0, 0, 0, 0) {}
+IonBuffer::IonBuffer() : IonBuffer(nullptr, 0, 0, 0, 0, 0, 0) {}
IonBuffer::IonBuffer(uint32_t width, uint32_t height, uint32_t format,
uint64_t usage)
: IonBuffer() {
- Alloc(width, height, format, usage);
+ Alloc(width, height, kDefaultGraphicBufferLayerCount, format, usage);
}
IonBuffer::IonBuffer(buffer_handle_t handle, uint32_t width, uint32_t height,
uint32_t stride, uint32_t format, uint64_t usage)
- : IonBuffer(handle, width, height, 1, stride, 0, format, usage) {}
+ : IonBuffer(handle, width, height, kDefaultGraphicBufferLayerCount, stride,
+ format, usage) {}
IonBuffer::IonBuffer(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t layer_count, uint32_t stride,
- uint32_t layer_stride, uint32_t format, uint64_t usage)
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage)
: buffer_(nullptr) {
ALOGD_IF(TRACE,
"IonBuffer::IonBuffer: handle=%p width=%u height=%u layer_count=%u "
- "stride=%u layer stride=%u format=%u usage=%" PRIx64,
- handle, width, height, layer_count, stride, layer_stride, format,
- usage);
+ "stride=%u format=%u usage=%" PRIx64,
+ handle, width, height, layer_count, stride, format, usage);
if (handle != 0) {
- Import(handle, width, height, stride, format, usage);
+ Import(handle, width, height, layer_count, stride, format, usage);
}
}
IonBuffer::~IonBuffer() {
ALOGD_IF(TRACE,
"IonBuffer::~IonBuffer: handle=%p width=%u height=%u stride=%u "
- "format=%u usage=%x",
+ "format=%u usage=%" PRIx64,
handle(), width(), height(), stride(), format(), usage());
FreeHandle();
}
@@ -71,14 +71,14 @@
}
}
-int IonBuffer::Alloc(uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage) {
+int IonBuffer::Alloc(uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage) {
ALOGD_IF(TRACE,
- "IonBuffer::Alloc: width=%u height=%u format=%u usage=%" PRIx64,
- width, height, format, usage);
+ "IonBuffer::Alloc: width=%u height=%u layer_count=%u format=%u "
+ "usage=%" PRIx64, width, height, layer_count, format, usage);
- sp<GraphicBuffer> buffer = new GraphicBuffer(
- width, height, format, kDefaultGraphicBufferLayerCount, usage);
+ sp<GraphicBuffer> buffer =
+ new GraphicBuffer(width, height, format, layer_count, usage);
if (buffer->initCheck() != OK) {
ALOGE("IonBuffer::Aloc: Failed to allocate buffer");
return -EINVAL;
@@ -89,26 +89,27 @@
}
void IonBuffer::Reset(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t stride, uint32_t format, uint64_t usage) {
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage) {
ALOGD_IF(TRACE,
- "IonBuffer::Reset: handle=%p width=%u height=%u stride=%u format=%u "
- "usage=%" PRIx64,
- handle, width, height, stride, format, usage);
- Import(handle, width, height, stride, format, usage);
+ "IonBuffer::Reset: handle=%p width=%u height=%u layer_count=%u "
+ "stride=%u format=%u usage=%" PRIx64,
+ handle, width, height, layer_count, stride, format, usage);
+ Import(handle, width, height, layer_count, stride, format, usage);
}
int IonBuffer::Import(buffer_handle_t handle, uint32_t width, uint32_t height,
- uint32_t stride, uint32_t format, uint64_t usage) {
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage) {
ATRACE_NAME("IonBuffer::Import1");
- ALOGD_IF(
- TRACE,
- "IonBuffer::Import: handle=%p width=%u height=%u stride=%u format=%u "
- "usage=%" PRIx64,
- handle, width, height, stride, format, usage);
+ ALOGD_IF(TRACE,
+ "IonBuffer::Import: handle=%p width=%u height=%u layer_count=%u "
+ "stride=%u format=%u usage=%" PRIx64,
+ handle, width, height, layer_count, stride, format, usage);
FreeHandle();
- sp<GraphicBuffer> buffer = new GraphicBuffer(
- handle, GraphicBuffer::TAKE_UNREGISTERED_HANDLE, width, height, format,
- kDefaultGraphicBufferLayerCount, usage, stride);
+ sp<GraphicBuffer> buffer =
+ new GraphicBuffer(handle, GraphicBuffer::TAKE_UNREGISTERED_HANDLE, width,
+ height, format, layer_count, usage, stride);
if (buffer->initCheck() != OK) {
ALOGE("IonBuffer::Import: Failed to import buffer");
return -EINVAL;
@@ -120,12 +121,14 @@
int IonBuffer::Import(const int* fd_array, int fd_count, const int* int_array,
int int_count, uint32_t width, uint32_t height,
- uint32_t stride, uint32_t format, uint64_t usage) {
+ uint32_t layer_count, uint32_t stride, uint32_t format,
+ uint64_t usage) {
ATRACE_NAME("IonBuffer::Import2");
ALOGD_IF(TRACE,
"IonBuffer::Import: fd_count=%d int_count=%d width=%u height=%u "
- "stride=%u format=%u usage=%" PRIx64,
- fd_count, int_count, width, height, stride, format, usage);
+ "layer_count=%u stride=%u format=%u usage=%" PRIx64,
+ fd_count, int_count, width, height, layer_count, stride, format,
+ usage);
if (fd_count < 0 || int_count < 0) {
ALOGE("IonBuffer::Import: invalid arguments.");
@@ -143,7 +146,8 @@
memcpy(handle->data, fd_array, sizeof(int) * fd_count);
memcpy(handle->data + fd_count, int_array, sizeof(int) * int_count);
- const int ret = Import(handle, width, height, stride, format, usage);
+ const int ret =
+ Import(handle, width, height, layer_count, stride, format, usage);
if (ret < 0) {
ALOGE("IonBuffer::Import: failed to import raw native handle: %s",
strerror(-ret));
@@ -179,8 +183,8 @@
sizeof(int) * int_count);
const int ret =
- Import(handle, other->width(), other->height(), other->stride(),
- other->format(), other->usage());
+ Import(handle, other->width(), other->height(), other->layer_count(),
+ other->stride(), other->format(), other->usage());
if (ret < 0) {
ALOGE("IonBuffer::Duplicate: Failed to import duplicate native handle: %s",
strerror(-ret));
diff --git a/libs/vr/libbufferhub/mocks/ion_buffer/private/dvr/ion_buffer.h b/libs/vr/libbufferhub/mocks/ion_buffer/private/dvr/ion_buffer.h
index fac6db0..8929c95 100644
--- a/libs/vr/libbufferhub/mocks/ion_buffer/private/dvr/ion_buffer.h
+++ b/libs/vr/libbufferhub/mocks/ion_buffer/private/dvr/ion_buffer.h
@@ -17,10 +17,11 @@
IonBufferMock() {}
MOCK_METHOD0(GetGrallocModuleImpl, gralloc_module_t const*());
MOCK_METHOD6(Import, int(buffer_handle_t handle, int width, int height,
- int stride, int format, int usage));
- MOCK_METHOD9(Import, int(const int* fd_array, int fd_count,
- const int* int_array, int int_count, int width,
- int height, int stride, int format, int usage));
+ int layer_count, int stride, int format, int usage));
+ MOCK_METHOD9(Import,
+ int(const int* fd_array, int fd_count, const int* int_array,
+ int int_count, int width, int height, int layer_count,
+ int stride, int format, int usage));
MOCK_METHOD6(Lock, int(int usage, int x, int y, int width, int height,
void** address));
MOCK_METHOD0(Unlock, int());
@@ -29,7 +30,6 @@
MOCK_CONST_METHOD0(height, int());
MOCK_CONST_METHOD0(layer_count, int());
MOCK_CONST_METHOD0(stride, int());
- MOCK_CONST_METHOD0(layer_stride, int());
MOCK_CONST_METHOD0(format, int());
MOCK_CONST_METHOD0(usage, int());
};
@@ -46,15 +46,16 @@
static gralloc_module_t const* GetGrallocModule() {
return staticObject->GetGrallocModuleImpl();
}
- int Import(buffer_handle_t handle, int width, int height, int stride,
- int format, int usage) {
- return mock_->Import(handle, width, height, stride, format, usage);
+ int Import(buffer_handle_t handle, int width, int height, int layer_count,
+ int stride, int format, int usage) {
+ return mock_->Import(handle, width, height, layer_count, stride, format,
+ usage);
}
int Import(const int* fd_array, int fd_count, const int* int_array,
- int int_count, int width, int height, int stride, int format,
- int usage) {
+ int int_count, int width, int height, int layer_count, int stride,
+ int format, int usage) {
return mock_->Import(fd_array, fd_count, int_array, int_count, width,
- height, stride, format, usage);
+ height, layer_count, stride, format, usage);
}
int Lock(int usage, int x, int y, int width, int height, void** address) {
return mock_->Lock(usage, x, y, width, height, address);
@@ -65,7 +66,6 @@
int height() const { return mock_->height(); }
int layer_count() const { return mock_->layer_count(); }
int stride() const { return mock_->stride(); }
- int layer_stride() const { return mock_->layer_stride(); }
int format() const { return mock_->format(); }
int usage() const { return mock_->usage(); }
std::unique_ptr<IonBufferMock> mock_;
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
index b381d22..012a4e7 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
@@ -2,8 +2,8 @@
#include <inttypes.h>
#include <log/log.h>
-#include <sys/epoll.h>
#include <poll.h>
+#include <sys/epoll.h>
#include <array>
@@ -389,8 +389,8 @@
}
int ProducerQueue::AllocateBuffer(uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage,
- size_t slice_count, size_t* out_slot) {
+ uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t* out_slot) {
if (out_slot == nullptr) {
ALOGE("ProducerQueue::AllocateBuffer: Parameter out_slot cannot be null.");
return -EINVAL;
@@ -405,7 +405,7 @@
const size_t kBufferCount = 1U;
Status<std::vector<std::pair<LocalChannelHandle, size_t>>> status =
InvokeRemoteMethod<BufferHubRPC::ProducerQueueAllocateBuffers>(
- width, height, format, usage, slice_count, kBufferCount);
+ width, height, layer_count, format, usage, kBufferCount);
if (!status) {
ALOGE("ProducerQueue::AllocateBuffer failed to create producer buffer: %s",
status.GetErrorMessage().c_str());
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
index 00ff137..31cccf0 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
@@ -33,13 +33,14 @@
unique_id_(getUniqueId()) {}
status_t BufferHubQueueCore::AllocateBuffer(uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage,
- size_t slice_count) {
+ uint32_t layer_count,
+ PixelFormat format,
+ uint64_t usage) {
size_t slot;
// Allocate new buffer through BufferHub and add it into |producer_| queue for
// bookkeeping.
- if (producer_->AllocateBuffer(width, height, format, usage, slice_count,
+ if (producer_->AllocateBuffer(width, height, layer_count, format, usage,
&slot) < 0) {
ALOGE("Failed to allocate new buffer in BufferHub.");
return NO_MEMORY;
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
index 0a36156..5121508 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
@@ -121,6 +121,7 @@
return NO_INIT;
}
+ const uint32_t kLayerCount = 1;
if (static_cast<int32_t>(core_->producer_->capacity()) <
max_dequeued_buffer_count_ +
BufferHubQueueCore::kDefaultUndequeuedBuffers) {
@@ -128,7 +129,7 @@
// |max_dequeued_buffer_count_|, allocate new buffer.
// TODO(jwcai) To save memory, the really reasonable thing to do is to go
// over existing slots and find first existing one to dequeue.
- ret = core_->AllocateBuffer(width, height, format, usage, 1);
+ ret = core_->AllocateBuffer(width, height, kLayerCount, format, usage);
if (ret < 0)
return ret;
}
@@ -138,12 +139,12 @@
for (size_t retry = 0; retry < BufferHubQueue::kMaxQueueCapacity; retry++) {
LocalHandle fence;
- auto buffer_status =
+ auto buffer_status =
core_->producer_->Dequeue(core_->dequeue_timeout_ms_, &slot, &fence);
- if (!buffer_producer)
- return NO_MEMORY;
buffer_producer = buffer_status.take();
+ if (!buffer_producer)
+ return NO_MEMORY;
if (width == buffer_producer->width() &&
height == buffer_producer->height() &&
@@ -172,7 +173,7 @@
// there are already multiple buffers in the queue, the next one returned
// from |core_->producer_->Dequeue| may not be the new buffer we just
// reallocated. Retry up to BufferHubQueue::kMaxQueueCapacity times.
- ret = core_->AllocateBuffer(width, height, format, usage, 1);
+ ret = core_->AllocateBuffer(width, height, kLayerCount, format, usage);
if (ret < 0)
return ret;
}
@@ -534,7 +535,8 @@
status_t BufferHubQueueProducer::setSharedBufferMode(bool shared_buffer_mode) {
if (shared_buffer_mode) {
- ALOGE("BufferHubQueueProducer::setSharedBufferMode(true) is not supported.");
+ ALOGE(
+ "BufferHubQueueProducer::setSharedBufferMode(true) is not supported.");
// TODO(b/36373181) Front buffer mode for buffer hub queue as ANativeWindow.
return INVALID_OPERATION;
}
diff --git a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
index c5dbbde..ed67f79 100644
--- a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
+++ b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
@@ -336,8 +336,8 @@
// use (i.e. in |Gain|'ed mode).
// Returns Zero on success and negative error code when buffer allocation
// fails.
- int AllocateBuffer(uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count, size_t* out_slot);
+ int AllocateBuffer(uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage, size_t* out_slot);
// Add a producer buffer to populate the queue. Once added, a producer buffer
// is available to use (i.e. in |Gain|'ed mode).
diff --git a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_core.h b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_core.h
index 9a8a2c9..180906b 100644
--- a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_core.h
+++ b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_core.h
@@ -118,8 +118,8 @@
BufferHubQueueCore();
// Allocate a new buffer producer through BufferHub.
- int AllocateBuffer(uint32_t width, uint32_t height, PixelFormat format,
- uint32_t usage, size_t slice_count);
+ int AllocateBuffer(uint32_t width, uint32_t height, uint32_t layer_count,
+ PixelFormat format, uint64_t usage);
// Detach a buffer producer through BufferHub.
int DetachBuffer(size_t slot);
diff --git a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
index b345498..bf916ba 100644
--- a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
+++ b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
@@ -29,8 +29,7 @@
// See |IGraphicBufferProducer::dequeueBuffer|
status_t dequeueBuffer(int* out_slot, sp<Fence>* out_fence, uint32_t width,
- uint32_t height, PixelFormat format,
- uint32_t usage,
+ uint32_t height, PixelFormat format, uint32_t usage,
FrameEventHistoryDelta* outTimestamps) override;
// See |IGraphicBufferProducer::detachBuffer|
@@ -41,7 +40,8 @@
sp<Fence>* out_fence) override;
// See |IGraphicBufferProducer::attachBuffer|
- status_t attachBuffer(int* out_slot, const sp<GraphicBuffer>& buffer) override;
+ status_t attachBuffer(int* out_slot,
+ const sp<GraphicBuffer>& buffer) override;
// See |IGraphicBufferProducer::queueBuffer|
status_t queueBuffer(int slot, const QueueBufferInput& input,
@@ -59,7 +59,8 @@
QueueBufferOutput* output) override;
// See |IGraphicBufferProducer::disconnect|
- status_t disconnect(int api, DisconnectMode mode = DisconnectMode::Api) override;
+ status_t disconnect(int api,
+ DisconnectMode mode = DisconnectMode::Api) override;
// See |IGraphicBufferProducer::setSidebandStream|
status_t setSidebandStream(const sp<NativeHandle>& stream) override;
diff --git a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
index ba9c179..fe0b12a 100644
--- a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
+++ b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
@@ -15,9 +15,9 @@
constexpr int kBufferWidth = 100;
constexpr int kBufferHeight = 1;
+constexpr int kBufferLayerCount = 1;
constexpr int kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
constexpr int kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
-constexpr int kBufferSliceCount = 1; // number of slices in each buffer
class BufferHubQueueTest : public ::testing::Test {
public:
@@ -55,8 +55,8 @@
// Create producer buffer.
size_t slot;
int ret = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
- kBufferFormat, kBufferUsage,
- kBufferSliceCount, &slot);
+ kBufferLayerCount, kBufferFormat,
+ kBufferUsage, &slot);
ASSERT_EQ(ret, 0);
}
@@ -347,9 +347,9 @@
// When allocation, leave out |set_mask| from usage bits on purpose.
size_t slot;
- int ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage & ~set_mask,
- kBufferSliceCount, &slot);
+ int ret = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
+ kBufferFormat, kBufferLayerCount,
+ kBufferUsage & ~set_mask, &slot);
ASSERT_EQ(0, ret);
LocalHandle fence;
@@ -365,9 +365,9 @@
// When allocation, add |clear_mask| into usage bits on purpose.
size_t slot;
- int ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage | clear_mask,
- kBufferSliceCount, &slot);
+ int ret = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
+ kBufferLayerCount, kBufferFormat,
+ kBufferUsage | clear_mask, &slot);
ASSERT_EQ(0, ret);
LocalHandle fence;
@@ -385,14 +385,14 @@
// be able to succeed.
size_t slot;
int ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage & ~deny_set_mask,
- kBufferSliceCount, &slot);
+ kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
+ kBufferUsage & ~deny_set_mask, &slot);
ASSERT_EQ(ret, 0);
// While allocation with those bits should fail.
- ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage | deny_set_mask,
- kBufferSliceCount, &slot);
+ ret = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
+ kBufferLayerCount, kBufferFormat,
+ kBufferUsage | deny_set_mask, &slot);
ASSERT_EQ(ret, -EINVAL);
}
@@ -404,14 +404,14 @@
// mandatory), allocation with those bits should be able to succeed.
size_t slot;
int ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat,
- kBufferUsage | deny_clear_mask, kBufferSliceCount, &slot);
+ kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
+ kBufferUsage | deny_clear_mask, &slot);
ASSERT_EQ(ret, 0);
// While allocation without those bits should fail.
- ret = producer_queue_->AllocateBuffer(
- kBufferWidth, kBufferHeight, kBufferFormat,
- kBufferUsage & ~deny_clear_mask, kBufferSliceCount, &slot);
+ ret = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
+ kBufferLayerCount, kBufferFormat,
+ kBufferUsage & ~deny_clear_mask, &slot);
ASSERT_EQ(ret, -EINVAL);
}
diff --git a/libs/vr/libdisplay/display_client.cpp b/libs/vr/libdisplay/display_client.cpp
index dbee9f2..5c9ebd4 100644
--- a/libs/vr/libdisplay/display_client.cpp
+++ b/libs/vr/libdisplay/display_client.cpp
@@ -123,15 +123,13 @@
return {std::move(producer_queue)};
}
-Status<std::unique_ptr<ProducerQueue>> Surface::CreateQueue(uint32_t width,
- uint32_t height,
- uint32_t format,
- uint64_t usage,
- size_t capacity) {
+Status<std::unique_ptr<ProducerQueue>> Surface::CreateQueue(
+ uint32_t width, uint32_t height, uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t capacity) {
ALOGD_IF(TRACE,
- "Surface::CreateQueue: width=%u height=%u format=%u usage=%" PRIx64
- " capacity=%zu",
- width, height, format, usage, capacity);
+ "Surface::CreateQueue: width=%u height=%u layer_count=%u format=%u "
+ "usage=%" PRIx64 " capacity=%zu",
+ width, height, layer_count, format, usage, capacity);
auto status = CreateQueue();
if (!status)
return status.error_status();
@@ -141,9 +139,8 @@
ALOGD_IF(TRACE, "Surface::CreateQueue: Allocating %zu buffers...", capacity);
for (size_t i = 0; i < capacity; i++) {
size_t slot;
- const size_t kSliceCount = 1;
- const int ret = producer_queue->AllocateBuffer(width, height, format, usage,
- kSliceCount, &slot);
+ const int ret = producer_queue->AllocateBuffer(width, height, layer_count,
+ format, usage, &slot);
if (ret < 0) {
ALOGE(
"Surface::CreateQueue: Failed to allocate buffer on queue_id=%d: %s",
diff --git a/libs/vr/libdisplay/graphics.cpp b/libs/vr/libdisplay/graphics.cpp
index f0e37f8..36f8095 100644
--- a/libs/vr/libdisplay/graphics.cpp
+++ b/libs/vr/libdisplay/graphics.cpp
@@ -310,7 +310,7 @@
if (!direct_surface) {
auto queue_status = surface->CreateQueue(
- sizeof(DisplaySurfaceMetadata), 1, HAL_PIXEL_FORMAT_BLOB,
+ sizeof(DisplaySurfaceMetadata), 1, 1, HAL_PIXEL_FORMAT_BLOB,
GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET |
GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN |
GRALLOC1_CONSUMER_USAGE_GPU_DATA_BUFFER,
@@ -809,8 +809,7 @@
// so that anyone who tries to bind an FBO to context->texture_id
// will not get an incomplete buffer.
context->current_buffer = context->buffer_queue->Dequeue();
- LOG_ALWAYS_FATAL_IF(context->gl.texture_count !=
- context->current_buffer->buffer()->slice_count());
+ LOG_ALWAYS_FATAL_IF(context->gl.texture_count != 1);
for (int i = 0; i < context->gl.texture_count; ++i) {
glBindTexture(context->gl.texture_target_type, context->gl.texture_id[i]);
glEGLImageTargetTexture2DOES(context->gl.texture_target_type,
@@ -1277,8 +1276,7 @@
float32x4_t is_late_latch = DVR_POSE_LATE_LATCH;
if (render_pose_orientation[0] != is_late_latch[0]) {
volatile DisplaySurfaceMetadata* data = graphics_context->surface_metadata;
- uint32_t buffer_index =
- graphics_context->current_buffer->surface_buffer_index();
+ uint32_t buffer_index = 0;
ALOGE_IF(TRACE, "write pose index %d %f %f", buffer_index,
render_pose_orientation[0], render_pose_orientation[1]);
data->orientation[buffer_index] = render_pose_orientation;
diff --git a/libs/vr/libdisplay/include/private/dvr/display_client.h b/libs/vr/libdisplay/include/private/dvr/display_client.h
index 668532d..7a7f670 100644
--- a/libs/vr/libdisplay/include/private/dvr/display_client.h
+++ b/libs/vr/libdisplay/include/private/dvr/display_client.h
@@ -42,6 +42,7 @@
// parameters.
pdx::Status<std::unique_ptr<ProducerQueue>> CreateQueue(uint32_t width,
uint32_t height,
+ uint32_t layer_count,
uint32_t format,
uint64_t usage,
size_t capacity);
diff --git a/libs/vr/libdisplay/native_buffer_queue.cpp b/libs/vr/libdisplay/native_buffer_queue.cpp
index 762db32..1bb05d8 100644
--- a/libs/vr/libdisplay/native_buffer_queue.cpp
+++ b/libs/vr/libdisplay/native_buffer_queue.cpp
@@ -27,8 +27,8 @@
for (size_t i = 0; i < capacity; i++) {
size_t slot;
// TODO(jwcai) Should change to use BufferViewPort's spec to config.
- const int ret = producer_queue_->AllocateBuffer(width_, height_, format_,
- usage_, 1, &slot);
+ const int ret = producer_queue_->AllocateBuffer(width_, height_, 1, format_,
+ usage_, &slot);
if (ret < 0) {
ALOGE(
"NativeBufferQueue::NativeBufferQueue: Failed to allocate buffer: %s",
diff --git a/libs/vr/libdvr/Android.bp b/libs/vr/libdvr/Android.bp
new file mode 100644
index 0000000..3876745
--- /dev/null
+++ b/libs/vr/libdvr/Android.bp
@@ -0,0 +1,67 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+cc_library_headers {
+ name: "libdvr_headers",
+ owner: "google",
+ export_include_dirs: ["include"],
+}
+
+cc_library_static {
+ name: "libdvr",
+ owner: "google",
+
+ cflags: [
+ "-fvisibility=hidden",
+ "-DLOG_TAG=\"libdvr\"",
+ "-DDVR_EXPORT=__attribute__((visibility(\"default\")))",
+ ],
+
+ header_libs: ["libdvr_headers"],
+ export_header_lib_headers: ["libdvr_headers"],
+
+ srcs: [
+ "dvr_api.cpp",
+ "dvr_buffer.cpp",
+ "dvr_buffer_queue.cpp",
+ "dvr_display_manager.cpp",
+ "dvr_hardware_composer_client.cpp",
+ "dvr_surface.cpp",
+ "dvr_vsync.cpp",
+ ],
+
+ static_libs: [
+ "libbufferhub",
+ "libbufferhubqueue",
+ "libdisplay",
+ "libvrsensor",
+ "libvirtualtouchpadclient",
+ "libvr_hwc-impl",
+ "libvr_hwc-binder",
+ "libgrallocusage",
+ ],
+
+ shared_libs: [
+ "android.hardware.graphics.bufferqueue@1.0",
+ "android.hidl.token@1.0-utils",
+ "libbase",
+ "libnativewindow",
+ ],
+
+}
+
+subdirs = [
+ "tests",
+]
diff --git a/libs/vr/libdvr/Android.mk b/libs/vr/libdvr/Android.mk
deleted file mode 100644
index 2375b5a..0000000
--- a/libs/vr/libdvr/Android.mk
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libdvr
-LOCAL_MODULE_OWNER := google
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-
-LOCAL_CFLAGS += \
- -fvisibility=hidden \
- -DLOG_TAG=\"libdvr\" \
- -D DVR_EXPORT='__attribute__ ((visibility ("default")))'
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/include \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := \
- $(LOCAL_PATH)/include \
-
-LOCAL_SRC_FILES := \
- dvr_api.cpp \
- dvr_buffer.cpp \
- dvr_buffer_queue.cpp \
- dvr_display_manager.cpp \
- dvr_hardware_composer_client.cpp \
- dvr_surface.cpp \
- dvr_vsync.cpp \
-
-LOCAL_STATIC_LIBRARIES := \
- libbufferhub \
- libbufferhubqueue \
- libdisplay \
- libvrsensor \
- libvirtualtouchpadclient \
- libvr_hwc-impl \
- libvr_hwc-binder \
- libgrallocusage \
-
-LOCAL_SHARED_LIBRARIES := \
- android.hardware.graphics.bufferqueue@1.0 \
- android.hidl.token@1.0-utils \
- libbase \
- libnativewindow \
-
-include $(BUILD_STATIC_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/libs/vr/libdvr/dvr_surface.cpp b/libs/vr/libdvr/dvr_surface.cpp
index b70f726..67e2ae8 100644
--- a/libs/vr/libdvr/dvr_surface.cpp
+++ b/libs/vr/libdvr/dvr_surface.cpp
@@ -133,7 +133,8 @@
int dvrSurfaceCreateWriteBufferQueue(DvrSurface* surface, uint32_t width,
uint32_t height, uint32_t format,
- uint64_t usage, size_t capacity,
+ uint32_t layer_count, uint64_t usage,
+ size_t capacity,
DvrWriteBufferQueue** out_writer) {
if (surface == nullptr || out_writer == nullptr) {
ALOGE(
@@ -143,8 +144,8 @@
return -EINVAL;
}
- auto status =
- surface->surface->CreateQueue(width, height, format, usage, capacity);
+ auto status = surface->surface->CreateQueue(width, height, layer_count,
+ format, usage, capacity);
if (!status) {
ALOGE("dvrSurfaceCreateWriteBufferQueue: Failed to create queue: %s",
status.GetErrorMessage().c_str());
diff --git a/libs/vr/libdvr/include/dvr/dvr_api.h b/libs/vr/libdvr/include/dvr/dvr_api.h
index 7124bee..8a203e0 100644
--- a/libs/vr/libdvr/include/dvr/dvr_api.h
+++ b/libs/vr/libdvr/include/dvr/dvr_api.h
@@ -162,7 +162,8 @@
size_t attribute_count);
typedef int (*DvrSurfaceCreateWriteBufferQueuePtr)(
DvrSurface* surface, uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t capacity, DvrWriteBufferQueue** queue_out);
+ uint32_t layer_count, uint64_t usage, size_t capacity,
+ DvrWriteBufferQueue** queue_out);
// vsync_client_api.h
typedef int (*DvrVSyncClientCreatePtr)(DvrVSyncClient** client_out);
@@ -261,13 +262,10 @@
// The buffer metadata that an Android Surface (a.k.a. ANativeWindow)
// will populate. A DvrWriteBufferQueue must be created with this metadata iff
-// ANativeWindow access is needed. Note that this struct must stay in sync with
-// BufferHubQueueCore::NativeBufferMetadata. Please do not remove, modify, or
-// reorder existing data members. If new fields need to be added, please take
-// extra care to make sure that new data field is padded properly the size of
-// the struct stays same.
-// TODO(b/37578558) Move |dvr_api.h| into a header library so that this
-// structure won't be copied between |dvr_api.h| and |buffer_hub_qeue_core.h|.
+// ANativeWindow access is needed. Please do not remove, modify, or reorder
+// existing data members. If new fields need to be added, please take extra care
+// to make sure that new data field is padded properly the size of the struct
+// stays same.
struct DvrNativeBufferMetadata {
// Timestamp of the frame.
int64_t timestamp;
diff --git a/libs/vr/libdvr/include/dvr/dvr_surface.h b/libs/vr/libdvr/include/dvr/dvr_surface.h
index 58f2a10..361488e 100644
--- a/libs/vr/libdvr/include/dvr/dvr_surface.h
+++ b/libs/vr/libdvr/include/dvr/dvr_surface.h
@@ -78,7 +78,8 @@
// @return 0 on success. Otherwise returns a negative error value.
int dvrSurfaceCreateWriteBufferQueue(DvrSurface* surface, uint32_t width,
uint32_t height, uint32_t format,
- uint64_t usage, size_t capacity,
+ uint32_t layer_count, uint64_t usage,
+ size_t capacity,
DvrWriteBufferQueue** queue_out);
// Get a named buffer from the display service.
diff --git a/libs/vr/libdvr/tests/Android.bp b/libs/vr/libdvr/tests/Android.bp
new file mode 100644
index 0000000..af202b4
--- /dev/null
+++ b/libs/vr/libdvr/tests/Android.bp
@@ -0,0 +1,53 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+shared_libraries = [
+ "libbase",
+ "libbinder",
+ "libcutils",
+ "libgui",
+ "liblog",
+ "libhardware",
+ "libui",
+ "libutils",
+ "libnativewindow",
+]
+
+static_libraries = [
+ "libdvr",
+ "libbufferhubqueue",
+ "libbufferhub",
+ "libchrome",
+ "libdvrcommon",
+ "libdisplay",
+ "libpdx_default_transport",
+]
+
+cc_test {
+ srcs: [
+ "dvr_buffer_queue-test.cpp",
+ "dvr_display_manager-test.cpp",
+ "dvr_named_buffer-test.cpp",
+ ],
+
+ static_libs: static_libraries,
+ shared_libs: shared_libraries,
+ cflags: [
+ "-DLOG_TAG=\"dvr_api-test\"",
+ "-DTRACE=0",
+ "-O0",
+ "-g",
+ ],
+ name: "dvr_api-test",
+}
diff --git a/libs/vr/libdvr/tests/Android.mk b/libs/vr/libdvr/tests/Android.mk
deleted file mode 100644
index d6ce99b..0000000
--- a/libs/vr/libdvr/tests/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-shared_libraries := \
- libbase \
- libbinder \
- libcutils \
- libgui \
- liblog \
- libhardware \
- libui \
- libutils \
- libnativewindow \
-
-static_libraries := \
- libdvr \
- libbufferhubqueue \
- libbufferhub \
- libchrome \
- libdvrcommon \
- libdisplay \
- libpdx_default_transport \
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := \
- dvr_buffer_queue-test.cpp \
- dvr_display_manager-test.cpp \
- dvr_named_buffer-test.cpp \
-
-LOCAL_STATIC_LIBRARIES := $(static_libraries)
-LOCAL_SHARED_LIBRARIES := $(shared_libraries)
-LOCAL_EXPORT_C_INCLUDE_DIRS := ${LOCAL_C_INCLUDES}
-LOCAL_CFLAGS := -DLOG_TAG=\"dvr_api-test\" -DTRACE=0 -O0 -g
-LOCAL_MODULE := dvr_api-test
-LOCAL_MODULE_TAGS := optional
-include $(BUILD_NATIVE_TEST)
diff --git a/libs/vr/libdvr/tests/dvr_buffer_queue-test.cpp b/libs/vr/libdvr/tests/dvr_buffer_queue-test.cpp
index 5f7f1bf..474e968 100644
--- a/libs/vr/libdvr/tests/dvr_buffer_queue-test.cpp
+++ b/libs/vr/libdvr/tests/dvr_buffer_queue-test.cpp
@@ -15,9 +15,9 @@
static constexpr int kBufferWidth = 100;
static constexpr int kBufferHeight = 1;
+static constexpr int kLayerCount = 1;
static constexpr int kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
static constexpr int kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
-static constexpr int kBufferSliceCount = 1; // number of slices in each buffer
static constexpr size_t kQueueCapacity = 3;
typedef uint64_t TestMeta;
@@ -40,10 +40,9 @@
void AllocateBuffers(size_t buffer_count) {
size_t out_slot;
for (size_t i = 0; i < buffer_count; i++) {
- int ret =
- GetProducerQueueFromDvrWriteBufferQueue(write_queue_)
- ->AllocateBuffer(kBufferWidth, kBufferHeight, kBufferFormat,
- kBufferUsage, kBufferSliceCount, &out_slot);
+ int ret = GetProducerQueueFromDvrWriteBufferQueue(write_queue_)
+ ->AllocateBuffer(kBufferWidth, kBufferHeight, kLayerCount,
+ kBufferFormat, kBufferUsage, &out_slot);
ASSERT_EQ(0, ret);
}
}
diff --git a/libs/vr/libdvr/tests/dvr_display_manager-test.cpp b/libs/vr/libdvr/tests/dvr_display_manager-test.cpp
index 0150984..a2414d6 100644
--- a/libs/vr/libdvr/tests/dvr_display_manager-test.cpp
+++ b/libs/vr/libdvr/tests/dvr_display_manager-test.cpp
@@ -56,10 +56,11 @@
Status<UniqueDvrWriteBufferQueue> CreateSurfaceQueue(
const UniqueDvrSurface& surface, uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage, size_t capacity) {
+ uint32_t format, uint32_t layer_count, uint64_t usage, size_t capacity) {
DvrWriteBufferQueue* queue;
- const int ret = dvrSurfaceCreateWriteBufferQueue(
- surface.get(), width, height, format, usage, capacity, &queue);
+ const int ret =
+ dvrSurfaceCreateWriteBufferQueue(surface.get(), width, height, format,
+ layer_count, usage, capacity, &queue);
if (ret < 0)
return ErrorStatus(-ret);
else
@@ -484,7 +485,7 @@
// Create a new queue in the surface.
auto write_queue_status = CreateSurfaceQueue(
- surface, 320, 240, AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
+ surface, 320, 240, AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, 1,
AHARDWAREBUFFER_USAGE_CPU_READ_RARELY, 1);
ASSERT_STATUS_OK(write_queue_status);
UniqueDvrWriteBufferQueue write_queue = write_queue_status.take();
@@ -533,6 +534,45 @@
ASSERT_EQ(0u, queue_ids.size());
}
+TEST_F(DvrDisplayManagerTest, MultiLayerBufferQueue) {
+ // Create an application surface.
+ auto surface_status = CreateApplicationSurface();
+ ASSERT_STATUS_OK(surface_status);
+ UniqueDvrSurface surface = surface_status.take();
+ ASSERT_NE(nullptr, surface.get());
+
+ // Get surface state and verify there is one surface.
+ ASSERT_STATUS_OK(manager_->WaitForUpdate());
+ ASSERT_STATUS_EQ(1u, manager_->GetSurfaceCount());
+
+ // Create a new queue in the surface.
+ const uint32_t kLayerCount = 3;
+ auto write_queue_status = CreateSurfaceQueue(
+ surface, 320, 240, AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, kLayerCount,
+ AHARDWAREBUFFER_USAGE_CPU_READ_RARELY, 1);
+ ASSERT_STATUS_OK(write_queue_status);
+ UniqueDvrWriteBufferQueue write_queue = write_queue_status.take();
+ ASSERT_NE(nullptr, write_queue.get());
+
+ DvrWriteBuffer* buffer = nullptr;
+ dvrWriteBufferCreateEmpty(&buffer);
+ int fence_fd = -1;
+ int error =
+ dvrWriteBufferQueueDequeue(write_queue.get(), 1000, buffer, &fence_fd);
+ ASSERT_EQ(0, error);
+
+ AHardwareBuffer* hardware_buffer = nullptr;
+ error = dvrWriteBufferGetAHardwareBuffer(buffer, &hardware_buffer);
+ ASSERT_EQ(0, error);
+
+ AHardwareBuffer_Desc desc = {};
+ AHardwareBuffer_describe(hardware_buffer, &desc);
+ ASSERT_EQ(kLayerCount, desc.layers);
+
+ AHardwareBuffer_release(hardware_buffer);
+ dvrWriteBufferDestroy(buffer);
+}
+
} // namespace
} // namespace dvr
diff --git a/libs/vr/libeds/Android.bp b/libs/vr/libeds/Android.bp
deleted file mode 100644
index a149853..0000000
--- a/libs/vr/libeds/Android.bp
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-sourceFiles = [
- "eds.cpp",
- "eds_mesh.cpp",
- "composite_hmd.cpp",
- "display_metrics.cpp",
- "distortion_renderer.cpp",
- "device_metrics.cpp",
- "polynomial_radial_distortion.cpp",
-]
-
-localIncludeFiles = [
- "include",
-]
-
-sharedLibraries = [
- "libbase",
- "libcutils",
- "liblog",
- "libEGL",
- "libGLESv1_CM",
- "libGLESv2",
- "libui",
- "libutils",
- "libvulkan",
-]
-
-staticLibraries = [
- "libdisplay",
- "libdvrcommon",
- "libdvrgraphics",
- "libvrsensor",
- "libpdx_default_transport",
-]
-
-cc_library_static {
- srcs: sourceFiles,
- cflags: [
- "-DGL_GLEXT_PROTOTYPES",
- "-DEGL_EGLEXT_PROTOTYPES",
- "-Wno-unused-parameter"],
- // Enable debug options below to show GL errors and use gdb.
- // + ["-UNDEBUG", "-DDEBUG", "-O0", "-g", ]
- export_include_dirs: localIncludeFiles,
- shared_libs: sharedLibraries,
- static_libs: staticLibraries,
- name: "libeds",
-}
-
-testFiles = ["tests/eds_app_tests.cpp"]
-
-cc_test {
- name: "eds_app_tests",
- tags: ["optional"],
-
- srcs: testFiles,
-
- shared_libs: [
- "libhardware",
- "libsync",
- ] + sharedLibraries,
-
- static_libs: [
- "libgmock_main",
- "libgmock",
- "libeds",
- ] + staticLibraries + [
- "libbufferhub",
- "libbufferhubqueue",
- ],
-
-}
diff --git a/libs/vr/libeds/composite_hmd.cpp b/libs/vr/libeds/composite_hmd.cpp
deleted file mode 100644
index d6bf164..0000000
--- a/libs/vr/libeds/composite_hmd.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-#include "include/private/dvr/composite_hmd.h"
-
-#include <log/log.h>
-
-#include <private/dvr/numeric.h>
-
-namespace android {
-namespace dvr {
-
-CompositeHmd::CompositeHmd(const HeadMountMetrics& head_mount_metrics,
- const DisplayMetrics& display_metrics)
- : head_mount_metrics_(head_mount_metrics),
- display_metrics_(display_metrics) {
- MetricsChanged();
-}
-
-float CompositeHmd::GetTargetFrameDuration() const {
- return display_metrics_.GetFrameDurationSeconds();
-}
-
-vec2 CompositeHmd::ComputeDistortedPoint(EyeType eye, vec2 position,
- RgbColorChannel channel) const {
- position = TransformPoint(eye_tan_angle_from_norm_screen_matrix_[eye], position);
- vec2 distorted =
- head_mount_metrics_.GetColorChannelDistortion(channel).Distort(position);
- return TransformPoint(eye_norm_texture_from_tan_angle_matrix_[eye], distorted);
-}
-
-vec2 CompositeHmd::ComputeInverseDistortedPoint(EyeType eye, vec2 position,
- RgbColorChannel channel) const {
- position = TransformPoint(eye_norm_texture_from_tan_angle_inv_matrix_[eye], position);
- vec2 distorted =
- head_mount_metrics_.GetColorChannelDistortion(channel).DistortInverse(
- position);
- return TransformPoint(eye_tan_angle_from_norm_screen_inv_matrix_[eye], distorted);
-}
-
-void CompositeHmd::ComputeDistortedVertex(EyeType eye, vec2 uv_in,
- vec2* vertex_out,
- vec2* uv_out) const {
- // The mesh vertices holds the shape of the distortion.
- vec2 vertex_position = ComputeInverseDistortedPoint(eye, uv_in, kRed);
- *vertex_out = vec2(vertex_position.x() - 0.5f, vertex_position.y() - 0.5f);
-
- if (uv_out) {
- // Compute the texture coordinate for each vertex coordinate.
- // Red's is the inverse of the inverse, skip the calculation and use uv_in.
- uv_out[kRed] = uv_in;
- uv_out[kGreen] = ComputeDistortedPoint(eye, vertex_position, kGreen);
- uv_out[kBlue] = ComputeDistortedPoint(eye, vertex_position, kBlue);
- }
-}
-
-vec2i CompositeHmd::GetRecommendedRenderTargetSize() const {
- return recommended_render_target_size_;
-}
-
-Range2i CompositeHmd::GetDisplayRange() const { return display_range_; }
-
-mat4 CompositeHmd::GetEyeFromHeadMatrix(EyeType eye) const {
- return eye_from_head_matrix_[eye];
-}
-
-FieldOfView CompositeHmd::GetEyeFov(EyeType eye) const { return eye_fov_[eye]; }
-
-Range2i CompositeHmd::GetEyeViewportBounds(EyeType eye) const {
- return eye_viewport_range_[eye];
-}
-
-void CompositeHmd::SetHeadMountMetrics(
- const HeadMountMetrics& head_mount_metrics) {
- // Use the assignement operator to do memberwise copy.
- head_mount_metrics_ = head_mount_metrics;
- MetricsChanged();
-}
-
-const HeadMountMetrics& CompositeHmd::GetHeadMountMetrics() const {
- return head_mount_metrics_;
-}
-
-void CompositeHmd::SetDisplayMetrics(const DisplayMetrics& display_metrics) {
- // Use the assignment operator to do memberwise copy.
- display_metrics_ = display_metrics;
- MetricsChanged();
-}
-
-const DisplayMetrics& CompositeHmd::GetDisplayMetrics() const {
- return display_metrics_;
-}
-
-void CompositeHmd::MetricsChanged() {
- // Abbreviations in variable names:
- // "vp": viewport
- // "ta": tan-angle
- const HeadMountMetrics& mount = head_mount_metrics_;
- DisplayMetrics display = display_metrics_;
-
- if (display.IsPortrait()) {
- // If we're in portrait mode, toggle the orientation so that all
- // calculations are done in landscape mode.
- display.ToggleOrientation();
- }
-
- float display_width_meters = display.GetSizeMeters()[0];
- float display_height_meters = display.GetSizeMeters()[1];
-
- vec2 pixels_per_meter = vec2(1.0f / display.GetMetersPerPixel()[0],
- 1.0f / display.GetMetersPerPixel()[1]);
-
- // virtual_eye_to_screen_dist is the distance from the screen to the eye
- // after it has been projected through the lens. This would normally be
- // slightly different from the distance to the actual eye.
- float virtual_eye_to_screen_dist = mount.GetVirtualEyeToScreenDistance();
- float meters_per_tan_angle = virtual_eye_to_screen_dist;
- vec2 pixels_per_tan_angle = pixels_per_meter * meters_per_tan_angle;
-
- LOG_ALWAYS_FATAL_IF(0.0f == display_width_meters);
- LOG_ALWAYS_FATAL_IF(0.0f == display_height_meters);
- LOG_ALWAYS_FATAL_IF(0.0f == virtual_eye_to_screen_dist);
-
- // Height of lenses from the bottom of the screen.
- float lens_y_center = 0;
- float bottom_dist = 0;
- float top_dist = 0;
-
- // bottom_display_dist and top_display_dist represent the distance from the
- // lens center to the edge of the display.
- float bottom_display_dist = 0;
- float top_display_dist = 0;
- switch (mount.GetVerticalAlignment()) {
- case HeadMountMetrics::kBottom:
- lens_y_center =
- mount.GetTrayToLensDistance() - display.GetBorderSizeMeters();
- bottom_dist = lens_y_center;
- top_dist = lens_y_center;
- bottom_display_dist = lens_y_center;
- top_display_dist = display_height_meters - lens_y_center;
- break;
- case HeadMountMetrics::kCenter:
- // TODO(hendrikw): This should respect the border size, but since we
- // currently hard code the border size, it would break
- // the distortion on some devices. Revisit when border
- // size is fixed.
- lens_y_center = display_height_meters * 0.5f;
- bottom_dist = lens_y_center;
- top_dist = lens_y_center;
- bottom_display_dist = lens_y_center;
- top_display_dist = lens_y_center;
- break;
- case HeadMountMetrics::kTop:
- lens_y_center = display_height_meters - (mount.GetTrayToLensDistance() -
- display.GetBorderSizeMeters());
- bottom_dist =
- mount.GetTrayToLensDistance() - display.GetBorderSizeMeters();
- top_dist = bottom_dist;
- bottom_display_dist = lens_y_center;
- top_display_dist = display_height_meters - lens_y_center;
- break;
- }
-
- float inner_dist = mount.GetScreenCenterToLensDistance();
- float outer_dist = display_width_meters * 0.5f - inner_dist;
-
- // We don't take chromatic aberration into account yet for computing FOV,
- // viewport, etc, so we only use the green channel for now. Note the actual
- // Distort function *does* implement chromatic aberration.
- const ColorChannelDistortion& distortion =
- mount.GetColorChannelDistortion(kGreen);
-
- vec2 outer_point(outer_dist / virtual_eye_to_screen_dist, 0.0f);
- vec2 inner_point(inner_dist / virtual_eye_to_screen_dist, 0.0f);
- vec2 bottom_point(0.0f, bottom_dist / virtual_eye_to_screen_dist);
- vec2 top_point(0.0f, top_dist / virtual_eye_to_screen_dist);
-
- float outer_angle = atanf(distortion.Distort(outer_point)[0]);
- float inner_angle = atanf(distortion.Distort(inner_point)[0]);
- float bottom_angle = atanf(distortion.Distort(bottom_point)[1]);
- float top_angle = atanf(distortion.Distort(top_point)[1]);
-
- for (EyeType eye : {kLeftEye, kRightEye}) {
- const FieldOfView max_fov = mount.GetEyeMaxFov(eye);
- float left_angle = (eye == kLeftEye) ? outer_angle : inner_angle;
- float right_angle = (eye == kLeftEye) ? inner_angle : outer_angle;
-
- eye_fov_[eye] = FieldOfView(std::min(left_angle, max_fov.GetLeft()),
- std::min(right_angle, max_fov.GetRight()),
- std::min(bottom_angle, max_fov.GetBottom()),
- std::min(top_angle, max_fov.GetTop()));
-
- vec2 texture_vp_ta_p1 =
- vec2(-tanf(eye_fov_[eye].GetLeft()), -tanf(eye_fov_[eye].GetBottom()));
- vec2 texture_vp_ta_p2 =
- vec2(tanf(eye_fov_[eye].GetRight()), tanf(eye_fov_[eye].GetTop()));
- vec2 texture_vp_size_ta = texture_vp_ta_p2 - texture_vp_ta_p1;
-
- vec2 texture_vp_sizef_pixels =
- texture_vp_size_ta.array() * pixels_per_tan_angle.array();
-
- vec2i texture_vp_size_pixels =
- vec2i(static_cast<int32_t>(roundf(texture_vp_sizef_pixels[0])),
- static_cast<int32_t>(roundf(texture_vp_sizef_pixels[1])));
- int vp_start_x =
- (eye == kLeftEye) ? 0 : eye_viewport_range_[kLeftEye].p2[0];
-
- eye_viewport_range_[eye] =
- Range2i::FromSize(vec2i(vp_start_x, 0), texture_vp_size_pixels);
- float left_dist = (eye == kLeftEye) ? outer_dist : inner_dist;
- float right_dist = (eye == kLeftEye) ? inner_dist : outer_dist;
- vec2 screen_ta_p1(-left_dist / virtual_eye_to_screen_dist,
- -bottom_display_dist / virtual_eye_to_screen_dist);
- vec2 screen_ta_p2(right_dist / virtual_eye_to_screen_dist,
- top_display_dist / virtual_eye_to_screen_dist);
- vec2 screen_ta_size = screen_ta_p2 - screen_ta_p1;
-
- // Align the tan angle coordinates to the nearest pixel. This will ensure
- // that the optical center doesn't straddle multiple pixels.
- // TODO(hendrikw): verify that this works correctly for Daydream View.
- vec2 tan_angle_per_pixel(screen_ta_size.array() /
- texture_vp_size_pixels.cast<float>().array());
- vec2 pixel_p1(screen_ta_p1.array() / tan_angle_per_pixel.array());
- vec2 pixel_shift(roundf(pixel_p1.x()) - pixel_p1.x(),
- roundf(pixel_p1.y()) - pixel_p1.y());
- screen_ta_p1 +=
- (tan_angle_per_pixel.array() * pixel_shift.array()).matrix();
- screen_ta_p2 +=
- (tan_angle_per_pixel.array() * pixel_shift.array()).matrix();
-
- // Calculate the transformations needed for the distortions.
- eye_tan_angle_from_norm_screen_matrix_[eye] =
- TranslationMatrix(vec2(screen_ta_p1)) *
- ScaleMatrix(screen_ta_size);
- eye_tan_angle_from_norm_screen_inv_matrix_[eye] =
- eye_tan_angle_from_norm_screen_matrix_[eye].inverse();
-
- eye_norm_texture_from_tan_angle_inv_matrix_[eye] =
- TranslationMatrix(texture_vp_ta_p1) *
- ScaleMatrix(texture_vp_size_ta);
- eye_norm_texture_from_tan_angle_matrix_[eye] =
- eye_norm_texture_from_tan_angle_inv_matrix_[eye].inverse();
- }
- vec2i left_vp_size = eye_viewport_range_[kLeftEye].GetSize();
- vec2i right_vp_size = eye_viewport_range_[kRightEye].GetSize();
-
- recommended_render_target_size_ =
- vec2i(left_vp_size[0] + right_vp_size[0],
- std::max(left_vp_size[1], right_vp_size[1]));
-
- display_range_ = Range2i::FromSize(vec2i(0, 0), display.GetSizePixels());
-
- eye_from_head_matrix_[kLeftEye] = Eigen::Translation3f(
- vec3(mount.GetScreenCenterToLensDistance(), 0.0f, 0.0f));
- eye_from_head_matrix_[kRightEye] = Eigen::Translation3f(
- vec3(-mount.GetScreenCenterToLensDistance(), 0.0f, 0.0f));
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/device_metrics.cpp b/libs/vr/libeds/device_metrics.cpp
deleted file mode 100644
index 68ee186..0000000
--- a/libs/vr/libeds/device_metrics.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-#include <private/dvr/device_metrics.h>
-
-#include <cutils/properties.h>
-#include <private/dvr/head_mount_metrics.h>
-#include <private/dvr/identity_distortion.h>
-#include <private/dvr/polynomial_radial_distortion.h>
-#include <private/dvr/types.h>
-#include "include/private/dvr/display_metrics.h"
-
-namespace {
-
-static constexpr char kRPolynomial[] = "persist.dvr.r_poly";
-static constexpr char kGPolynomial[] = "persist.dvr.g_poly";
-static constexpr char kBPolynomial[] = "persist.dvr.b_poly";
-static constexpr char kLensDistance[] = "persist.dvr.lens_distance";
-static constexpr char kDisplayGap[] = "persist.dvr.display_gap";
-static constexpr char kVEyeToDisplay[] = "persist.dvr.v_eye_to_display";
-static constexpr char kFovIOBT[] = "persist.dvr.fov_iobt";
-static constexpr char kScreenSize[] = "persist.dvr.screen_size";
-
-bool StringToFloat(const char* str, float* result) {
- char* endptr = nullptr;
- *result = std::strtof(str, &endptr);
- return !(str == endptr || !endptr);
-}
-
-std::vector<std::string> SplitString(const std::string& string_to_split,
- char deliminator) {
- std::vector<std::string> result;
- std::string sub_string;
- std::stringstream ss(string_to_split);
- while (std::getline(ss, sub_string, deliminator))
- result.push_back(sub_string);
- return result;
-}
-
-std::vector<float> GetProperty(const char* name,
- const std::vector<float>& default_values) {
- char prop[PROPERTY_VALUE_MAX + 1] = {};
- property_get(name, prop, "");
- std::vector<std::string> values = SplitString(prop, ',');
- std::vector<float> results;
- for (const auto& value : values) {
- float result = 0.0f;
- if (StringToFloat(value.c_str(), &result)) {
- results.push_back(static_cast<float>(result));
- }
- }
- if (results.empty()) {
- return default_values;
- }
- return results;
-}
-
-float GetProperty(const char* name, float default_value) {
- char prop[PROPERTY_VALUE_MAX + 1] = {};
- property_get(name, prop, "");
- float result = 0.0f;
- if (StringToFloat(prop, &result)) {
- return static_cast<float>(result);
- }
- return default_value;
-}
-
-float GetInterLensDistance() { return GetProperty(kLensDistance, 0.064f); }
-
-float GetDisplayGap() { return GetProperty(kDisplayGap, 0.0f); }
-
-float GetTrayToLensDistance() { return 0.035f; }
-
-float GetVEyeToDisplay() { return GetProperty(kVEyeToDisplay, 0.042f); }
-
-android::dvr::vec2 GetDisplaySize() {
- static const std::vector<float> default_size = {0.0742177f, 0.131943f};
- std::vector<float> sizes = GetProperty(kScreenSize, default_size);
- if (sizes.size() != 0)
- sizes = default_size;
- return android::dvr::vec2(sizes[0], sizes[1]);
-}
-
-std::vector<float> GetMaxFOVs() {
- static const std::vector<float> defaults = {43.7f, 47.8f, 54.2f, 54.2f};
- std::vector<float> fovs = GetProperty(kFovIOBT, defaults);
- if (fovs.size() != 4)
- fovs = defaults;
- for (auto& value : fovs) {
- value = value * M_PI / 180.0f;
- }
- return fovs;
-}
-
-static const android::dvr::HeadMountMetrics::VerticalAlignment
- kDefaultVerticalAlignment = android::dvr::HeadMountMetrics::kCenter;
-
-// Default border size in meters.
-static const float kScreenBorderSize = 0.004f;
-
-// Refresh rate.
-static const float kScreenRefreshRate = 60.0f;
-
-// Default display orientation is portrait.
-static const android::dvr::DisplayOrientation kDisplayOrientation =
- android::dvr::DisplayOrientation::kPortrait;
-
-} // anonymous namespace
-
-namespace android {
-namespace dvr {
-
-HeadMountMetrics CreateHeadMountMetrics(const FieldOfView& l_fov,
- const FieldOfView& r_fov) {
- static const std::vector<float> default_r = {
- 0.00103f, 2.63917f, -7.14427f, 8.98036f, -4.10586f, 0.83705f, 0.00130f};
- static const std::vector<float> default_g = {
- 0.08944f, 2.26005f, -6.30924f, 7.94561f, -3.22788f, 0.45577f, 0.07300f};
- static const std::vector<float> default_b = {
- 0.16364f, 1.94083f, -5.55033f, 6.89578f, -2.19053f, -0.04050f, 0.17380f};
- std::vector<float> poly_r = GetProperty(kRPolynomial, default_r);
- std::vector<float> poly_g = GetProperty(kGPolynomial, default_g);
- std::vector<float> poly_b = GetProperty(kBPolynomial, default_b);
-
- std::shared_ptr<ColorChannelDistortion> distortion_r(
- new PolynomialRadialDistortion(poly_r));
- std::shared_ptr<ColorChannelDistortion> distortion_g(
- new PolynomialRadialDistortion(poly_g));
- std::shared_ptr<ColorChannelDistortion> distortion_b(
- new PolynomialRadialDistortion(poly_b));
-
- return HeadMountMetrics(GetInterLensDistance(), GetTrayToLensDistance(),
- GetVEyeToDisplay(), kDefaultVerticalAlignment, l_fov,
- r_fov, distortion_r, distortion_g, distortion_b,
- HeadMountMetrics::EyeOrientation::kCCW0Degrees,
- HeadMountMetrics::EyeOrientation::kCCW0Degrees,
- (GetInterLensDistance() - GetDisplayGap()) / 2.0f);
-}
-
-HeadMountMetrics CreateHeadMountMetrics() {
- std::vector<float> fovs = GetMaxFOVs();
- FieldOfView l_fov(fovs[1], fovs[0], fovs[2], fovs[3]);
- FieldOfView r_fov(fovs[0], fovs[1], fovs[2], fovs[3]);
- return CreateHeadMountMetrics(l_fov, r_fov);
-}
-
-DisplayMetrics CreateDisplayMetrics(vec2i screen_size) {
- android::dvr::vec2 size_in_meters = GetDisplaySize();
- vec2 meters_per_pixel(size_in_meters[0] / static_cast<float>(screen_size[0]),
- size_in_meters[1] / static_cast<float>(screen_size[1]));
- return DisplayMetrics(screen_size, meters_per_pixel, kScreenBorderSize,
- 1000.0f / kScreenRefreshRate, kDisplayOrientation);
-}
-
-HeadMountMetrics CreateUndistortedHeadMountMetrics() {
- std::vector<float> fovs = GetMaxFOVs();
- FieldOfView l_fov(fovs[1], fovs[0], fovs[2], fovs[3]);
- FieldOfView r_fov(fovs[0], fovs[1], fovs[2], fovs[3]);
- return CreateUndistortedHeadMountMetrics(l_fov, r_fov);
-}
-
-HeadMountMetrics CreateUndistortedHeadMountMetrics(const FieldOfView& l_fov,
- const FieldOfView& r_fov) {
- auto distortion_all = std::make_shared<IdentityDistortion>();
-
- return HeadMountMetrics(GetInterLensDistance(), GetVEyeToDisplay(),
- GetVEyeToDisplay(), kDefaultVerticalAlignment, l_fov,
- r_fov, distortion_all, distortion_all, distortion_all,
- HeadMountMetrics::EyeOrientation::kCCW0Degrees,
- HeadMountMetrics::EyeOrientation::kCCW0Degrees,
- (GetInterLensDistance() - GetDisplayGap()) / 2.0f);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/display_metrics.cpp b/libs/vr/libeds/display_metrics.cpp
deleted file mode 100644
index e129395..0000000
--- a/libs/vr/libeds/display_metrics.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-#include "include/private/dvr/display_metrics.h"
-
-namespace android {
-namespace dvr {
-
-DisplayMetrics::DisplayMetrics(vec2i size_pixels, vec2 meters_per_pixel,
- float border_size_meters,
- float frame_duration_seconds,
- DisplayOrientation orientation)
- : size_pixels_(size_pixels),
- meters_per_pixel_(meters_per_pixel),
- border_size_meters_(border_size_meters),
- frame_duration_seconds_(frame_duration_seconds),
- orientation_(orientation) {}
-
-void DisplayMetrics::ToggleOrientation() {
- std::swap(size_pixels_[0], size_pixels_[1]);
- std::swap(meters_per_pixel_[0], meters_per_pixel_[1]);
- if (orientation_ == DisplayOrientation::kPortrait)
- orientation_ = DisplayOrientation::kLandscape;
- else
- orientation_ = DisplayOrientation::kPortrait;
-}
-
-DisplayMetrics::DisplayMetrics()
- : DisplayMetrics(vec2i(0, 0), vec2(0.0f, 0.0f), 0.0f, 0.0f,
- DisplayOrientation::kLandscape) {}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/distortion_renderer.cpp b/libs/vr/libeds/distortion_renderer.cpp
deleted file mode 100644
index 13090ca..0000000
--- a/libs/vr/libeds/distortion_renderer.cpp
+++ /dev/null
@@ -1,792 +0,0 @@
-#include "include/private/dvr/distortion_renderer.h"
-
-#include <float.h>
-
-#include <string>
-
-#include <utils/Log.h>
-#define ATRACE_TAG ATRACE_TAG_GRAPHICS
-#include <utils/Trace.h>
-
-#include <log/log.h>
-#include <private/dvr/clock_ns.h>
-#include <private/dvr/composite_hmd.h>
-#include <private/dvr/debug.h>
-#include <private/dvr/graphics/gpu_profiler.h>
-#include <private/dvr/ortho.h>
-#include <private/dvr/sensor_constants.h>
-
-#define STRINGIFY2(s) #s
-#define STRINGIFY(s) STRINGIFY2(s)
-
-#define POSITION_ATTR 0
-#define VIEWPORT_COORD_R_ATTR 1
-#define VIEWPORT_COORD_G_ATTR 2
-#define VIEWPORT_COORD_B_ATTR 3
-
-// Pose data uniform buffer bindings. Must be sequential.
-#define POSE_BINDING 0
-#define POSE_BINDING2 1
-
-// Texture unit bindings. Must be sequential.
-// Things break if we start at binding 0 (samples come back black).
-#define SAMPLER_BINDING 1
-#define SAMPLER_BINDING2 2
-
-#define GLSL_VIGNETTE_FUNC \
- "float vignette(vec2 texCoords) {\n" \
- " const float fadeDist = 0.01;\n" \
- " const float fadeDistInv = 1.0 / fadeDist;\n" \
- " const float inset = 0.02;\n" \
- " vec2 lowEdge = vec2(inset - fadeDist);\n" \
- " vec2 highEdge = vec2(1.0 - inset + fadeDist);\n" \
- " vec2 vignetteMin = " \
- " clamp(-fadeDistInv * (lowEdge - texCoords), 0.0, 1.0);\n" \
- " vec2 vignetteMax = " \
- " clamp(fadeDistInv * (highEdge - texCoords), 0.0, 1.0);\n" \
- " vec2 vignette = vignetteMin * vignetteMax;\n" \
- " return vignette.x * vignette.y;\n" \
- "}\n"
-
-namespace {
-
-// If enabled, the pixel shader will blend by reading back the current pixel
-// from the framebuffer.
-// TODO(jbates) With framebuffer read coherency disabled, this seems to perform
-// well enough. That requires a GL extension, so for now we disable this path.
-constexpr bool kUseFramebufferReadback = false;
-
-static const char* kVertexShaderChromaticAberrationString =
- "uniform mat4 uProjectionMatrix;\n"
- "layout(binding = " STRINGIFY(POSE_BINDING) ", std140)\n"
- "uniform LateLatchData {\n"
- " mat4 uTexFromRecommendedViewportMatrix;\n"
- "};\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "layout(binding = " STRINGIFY(POSE_BINDING2) ", std140)\n"
- "uniform LateLatchData2 {\n"
- " mat4 uTexFromRecommendedViewportMatrix2;\n"
- "};\n"
- "#endif\n"
- "uniform vec4 uTexXMinMax;\n"
- "layout(location = " STRINGIFY(POSITION_ATTR) ") in vec2 aPosition;\n"
- "layout(location = " STRINGIFY(VIEWPORT_COORD_R_ATTR)
- ") in vec2 aViewportCoordsR;\n"
- "layout(location = " STRINGIFY(VIEWPORT_COORD_G_ATTR)
- ") in vec2 aViewportCoordsG;\n"
- "layout(location = " STRINGIFY(VIEWPORT_COORD_B_ATTR)
- ") in vec2 aViewportCoordsB;\n"
- "mediump out vec4 vTexCoordsRG;\n"
- "mediump out vec2 vTexCoordsB;\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "mediump out vec4 vTexCoordsRG2;\n"
- "mediump out vec2 vTexCoordsB2;\n"
- "#endif\n"
- "mediump out vec3 vVignette;\n"
- "\n" GLSL_VIGNETTE_FUNC
- "void main(void) {\n"
- " vVignette.r = vignette(aViewportCoordsR);\n"
- " vVignette.g = vignette(aViewportCoordsG);\n"
- " vVignette.b = vignette(aViewportCoordsB);\n"
- " vec4 redTexCoords = (uTexFromRecommendedViewportMatrix * \n"
- " vec4(aViewportCoordsR, 0., 1.));\n"
- " vec4 greenTexCoords = (uTexFromRecommendedViewportMatrix * \n"
- " vec4(aViewportCoordsG, 0., 1.));\n"
- " vec4 blueTexCoords = (uTexFromRecommendedViewportMatrix * \n"
- " vec4(aViewportCoordsB, 0., 1.));\n"
- " vTexCoordsRG.xy = redTexCoords.xy / redTexCoords.w;\n"
- " vTexCoordsRG.zw = greenTexCoords.xy / greenTexCoords.w;\n"
- " vTexCoordsB = blueTexCoords.xy / blueTexCoords.w;\n"
- " vTexCoordsRG.x = clamp(vTexCoordsRG.x, uTexXMinMax.x, uTexXMinMax.y);\n"
- " vTexCoordsRG.z = clamp(vTexCoordsRG.z, uTexXMinMax.x, uTexXMinMax.y);\n"
- " vTexCoordsB.x = clamp(vTexCoordsB.x, uTexXMinMax.x, uTexXMinMax.y);\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- " redTexCoords = (uTexFromRecommendedViewportMatrix2 * \n"
- " vec4(aViewportCoordsR, 0., 1.));\n"
- " greenTexCoords = (uTexFromRecommendedViewportMatrix2 * \n"
- " vec4(aViewportCoordsG, 0., 1.));\n"
- " blueTexCoords = (uTexFromRecommendedViewportMatrix2 * \n"
- " vec4(aViewportCoordsB, 0., 1.));\n"
- " vTexCoordsRG2.xy = redTexCoords.xy / redTexCoords.w;\n"
- " vTexCoordsRG2.zw = greenTexCoords.xy / greenTexCoords.w;\n"
- " vTexCoordsB2 = blueTexCoords.xy / blueTexCoords.w;\n"
- " vTexCoordsRG2.x = clamp(vTexCoordsRG2.x,\n"
- " uTexXMinMax.z, uTexXMinMax.w);\n"
- " vTexCoordsRG2.z = clamp(vTexCoordsRG2.z, uTexXMinMax.z,\n"
- " uTexXMinMax.w);\n"
- " vTexCoordsB2.x = clamp(vTexCoordsB2.x, uTexXMinMax.z, uTexXMinMax.w);\n"
- "#endif\n"
- " gl_Position = uProjectionMatrix * vec4(aPosition, 0., 1.);\n"
- "}\n";
-
-static const char* kFragmentShaderChromaticAberrationString =
- "#ifdef GL_ES\n"
- "precision mediump float;\n"
- "#endif\n"
- " \n"
- "layout(binding = " STRINGIFY(SAMPLER_BINDING) ")\n"
- "uniform sampler2D uDistortionTexture; \n"
- "mediump in vec4 vTexCoordsRG;\n"
- "mediump in vec2 vTexCoordsB;\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "layout(binding = " STRINGIFY(SAMPLER_BINDING2) ")\n"
- "uniform sampler2D uDistortionTexture2; \n"
- "mediump in vec4 vTexCoordsRG2;\n"
- "mediump in vec2 vTexCoordsB2;\n"
- "#endif\n"
- "mediump in vec3 vVignette;\n"
- "#ifdef BLEND_WITH_PREVIOUS_LAYER \n"
- "inout vec4 fragColor; \n"
- "#else \n"
- "out vec4 fragColor; \n"
- "#endif \n"
- " \n"
- "void main(void) { \n"
- " vec4 ra = texture(uDistortionTexture, vTexCoordsRG.xy); \n"
- " vec4 ga = texture(uDistortionTexture, vTexCoordsRG.zw); \n"
- " vec4 ba = texture(uDistortionTexture, vTexCoordsB); \n"
- "#ifdef BLEND_WITH_PREVIOUS_LAYER \n"
- " vec3 alpha1 = vec3(ra.a, ga.a, ba.a); \n"
- " vec3 color = (vec3(1.0) - alpha1) * fragColor.rgb + \n"
- " alpha1 * vec3(ra.r, ga.g, ba.b); \n"
- "#else // BLEND_WITH_PREVIOUS_LAYER \n"
- " vec3 color = vec3(ra.r, ga.g, ba.b); \n"
- "#endif // BLEND_WITH_PREVIOUS_LAYER \n"
- "#ifdef COMPOSITE_LAYER_2 \n"
- " // Alpha blend layer 2 onto layer 1. \n"
- " vec4 ra2 = texture(uDistortionTexture2, vTexCoordsRG2.xy); \n"
- " vec4 ga2 = texture(uDistortionTexture2, vTexCoordsRG2.zw); \n"
- " vec4 ba2 = texture(uDistortionTexture2, vTexCoordsB2); \n"
- " vec3 color2 = vec3(ra2.r, ga2.g, ba2.b); \n"
- " vec3 alpha2 = vec3(ra2.a, ga2.a, ba2.a); \n"
- " color = (vec3(1.0) - alpha2) * color + alpha2 * color2; \n"
- "#endif \n"
- "#ifdef ALPHA_VIGNETTE\n"
- " fragColor = vec4(color, vVignette.b * ga.a); \n"
- "#else // ALPHA_VIGNETTE\n"
- " fragColor = vec4(vVignette.rgb * color, ga.a); \n"
- "#endif // ALPHA_VIGNETTE\n"
- "} \n";
-
-static const char* kVertexShaderNoChromaticAberrationString =
- "uniform mat4 uProjectionMatrix;\n"
- "layout(binding = " STRINGIFY(POSE_BINDING) ", std140)\n"
- "uniform LateLatchData {\n"
- " mat4 uTexFromRecommendedViewportMatrix;\n"
- "};\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "layout(binding = " STRINGIFY(POSE_BINDING2) ", std140)\n"
- "uniform LateLatchData2 {\n"
- " mat4 uTexFromRecommendedViewportMatrix2;\n"
- "};\n"
- "#endif\n"
- "uniform vec4 uTexXMinMax;\n"
- "layout(location = " STRINGIFY(POSITION_ATTR) ") in vec2 aPosition;\n"
- "layout(location = " STRINGIFY(VIEWPORT_COORD_G_ATTR)
- ") in vec2 aViewportCoords;\n"
- "mediump out vec2 vTexCoords;\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "mediump out vec2 vTexCoords2;\n"
- "#endif\n"
- "mediump out vec3 vVignette;\n"
- "\n" GLSL_VIGNETTE_FUNC
- "void main(void) {\n"
- " float fVignette = vignette(aViewportCoords);\n"
- " vVignette = vec3(fVignette, fVignette, fVignette);\n"
- " vec4 texCoords = (uTexFromRecommendedViewportMatrix * \n"
- " vec4(aViewportCoords, 0., 1.));\n"
- " vTexCoords = texCoords.xy / texCoords.w;\n"
- " vTexCoords.x = clamp(vTexCoords.x, uTexXMinMax.x, uTexXMinMax.y);\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- " texCoords = (uTexFromRecommendedViewportMatrix2 * \n"
- " vec4(aViewportCoords, 0., 1.));\n"
- " vTexCoords2 = texCoords.xy / texCoords.w;\n"
- " vTexCoords2.x = clamp(vTexCoords2.x, uTexXMinMax.z, uTexXMinMax.w);\n"
- "#endif\n"
- " gl_Position = uProjectionMatrix * vec4(aPosition, 0., 1.);\n"
- "}\n";
-
-static const char* kFragmentShaderNoChromaticAberrationString =
- "#ifdef GL_ES\n"
- "precision mediump float;\n"
- "#endif\n"
- " \n"
- "layout(binding = " STRINGIFY(SAMPLER_BINDING) ")\n"
- "uniform sampler2D uDistortionTexture; \n"
- "mediump in vec2 vTexCoords;\n"
- "#ifdef COMPOSITE_LAYER_2\n"
- "layout(binding = " STRINGIFY(SAMPLER_BINDING2) ")\n"
- "uniform sampler2D uDistortionTexture2; \n"
- "mediump in vec2 vTexCoords2;\n"
- "#endif\n"
- "mediump in vec3 vVignette;\n"
- "out vec4 fragColor;\n"
- " \n"
- "void main(void) { \n"
- " vec4 color = texture(uDistortionTexture, vTexCoords); \n"
- "#ifdef COMPOSITE_LAYER_2 \n"
- " // Alpha blend layer 2 onto layer 1. \n"
- " vec4 color2 = texture(uDistortionTexture2, vTexCoords2); \n"
- " float alpha2 = color2.a; \n"
- " color.rgb = (1.0 - alpha2) * color.rgb + alpha2 * color2.rgb; \n"
- "#endif \n"
- " fragColor = vec4(vVignette * color.rgb, color.a); \n"
- "} \n";
-
-static const char* kVertexShaderSimpleVideoQuadString =
- "uniform mat4 uProjectionMatrix;\n"
- "layout(binding = " STRINGIFY(POSE_BINDING) ", std140)\n"
- "uniform LateLatchData {\n"
- " mat4 uEdsCorrection;\n"
- "};\n"
- "uniform mat4 uTexFromEyeMatrix;\n"
- "uniform mat4 uEyeFromViewportMatrix;\n"
- "layout(location = " STRINGIFY(POSITION_ATTR) ") in vec2 aPosition;\n"
- "layout(location = " STRINGIFY(VIEWPORT_COORD_G_ATTR)
- ") in vec2 aViewportCoords;\n"
- "mediump out vec2 vTexCoords;\n"
- "void main(void) {\n"
- " mat4 m = uTexFromEyeMatrix * inverse(uEdsCorrection) * uEyeFromViewportMatrix;\n"
- " mat3 uTexFromViewportMatrix = inverse(mat3(m[0].xyw, m[1].xyw, m[3].xyw)); \n"
- " vec3 texCoords = uTexFromViewportMatrix * vec3(aViewportCoords, 1.0);\n"
- " vTexCoords = texCoords.xy / texCoords.z;\n"
- " gl_Position = uProjectionMatrix * vec4(aPosition, 0.0, 1.0);\n"
- "}\n";
-
-static const char* kFragmentShaderSimpleVideoQuadString =
- "#extension GL_OES_EGL_image_external_essl3 : enable\n"
- " \n"
- "#ifdef GL_ES\n"
- "precision mediump float;\n"
- "#endif\n"
- " \n"
- "layout(binding = " STRINGIFY(SAMPLER_BINDING) ")\n"
- "uniform samplerExternalOES uDistortionTexture; \n"
- "mediump in vec2 vTexCoords;\n"
- "out vec4 fragColor;\n"
- " \n"
- "void main(void) { \n"
- " if (clamp(vTexCoords, 0.0, 1.0) != vTexCoords) { \n"
- " fragColor = vec4(0.0, 0.0, 0.0, 0.0); \n"
- " } else { \n"
- " fragColor = texture(uDistortionTexture, vTexCoords); \n"
- " } \n"
- "} \n";
-
-} // anonymous namespace
-
-namespace android {
-namespace dvr {
-
-// Note that converting from Clip Space ([-1,1]^3) to Viewport Space
-// for one eye ([0,1]x[0,1]) requires dividing by 2 in x and y.
-const mat4 DistortionRenderer::kViewportFromClipMatrix =
- Eigen::Translation3f(vec3(0.5f, 0.5f, 0)) *
- Eigen::DiagonalMatrix<float, 3>(vec3(0.5f, 0.5f, 1.0f));
-
-const mat4 DistortionRenderer::kClipFromViewportMatrix =
- Eigen::DiagonalMatrix<float, 3>(vec3(2.0f, 2.0f, 1.0f)) *
- Eigen::Translation3f(vec3(-0.5f, -0.5f, 0));
-
-void DistortionRenderer::EdsShader::load(const char* vertex,
- const char* fragment, int num_layers,
- bool use_alpha_vignette,
- float rotation, bool flip_vertical,
- bool blend_with_previous_layer) {
- std::string vert_builder = "#version 310 es\n";
- std::string frag_builder = "#version 310 es\n";
- if (blend_with_previous_layer && kUseFramebufferReadback) {
- frag_builder += "#extension GL_EXT_shader_framebuffer_fetch : require\n";
- }
-
- if (num_layers == 2) {
- vert_builder += "#define COMPOSITE_LAYER_2\n";
- frag_builder += "#define COMPOSITE_LAYER_2\n";
- } else {
- LOG_ALWAYS_FATAL_IF(num_layers != 1);
- }
- if (blend_with_previous_layer) {
- // Check for unsupported shader combinations:
- LOG_ALWAYS_FATAL_IF(num_layers != 1);
- LOG_ALWAYS_FATAL_IF(use_alpha_vignette);
- if (kUseFramebufferReadback)
- frag_builder += "#define BLEND_WITH_PREVIOUS_LAYER\n";
- }
- if (use_alpha_vignette) {
- vert_builder += "#define ALPHA_VIGNETTE\n";
- frag_builder += "#define ALPHA_VIGNETTE\n";
- }
-
- vert_builder += vertex;
- frag_builder += fragment;
- pgm.Link(vert_builder, frag_builder);
- LOG_ALWAYS_FATAL_IF(!pgm.IsUsable());
-
- pgm.Use();
-
- uProjectionMatrix =
- glGetUniformLocation(pgm.GetProgram(), "uProjectionMatrix");
- uTexFromEyeMatrix =
- glGetUniformLocation(pgm.GetProgram(), "uTexFromEyeMatrix");
- uEyeFromViewportMatrix =
- glGetUniformLocation(pgm.GetProgram(), "uEyeFromViewportMatrix");
- uTexXMinMax = glGetUniformLocation(pgm.GetProgram(), "uTexXMinMax");
- CHECK_GL();
-
- float vertical_multiply = flip_vertical ? -1.0 : 1.0;
- mat4 projectionMatrix = OrthoMatrix(-0.5f, 0.5f, vertical_multiply * -0.5f,
- vertical_multiply * 0.5f, -1.0f, 1.0f);
-
- // Rotate the mesh into the screen's orientation.
- // TODO(hendrikw): Once the display is finalized, and perhaps not portrait,
- // look into removing this matrix altogether.
- projectionMatrix =
- projectionMatrix * Eigen::AngleAxisf(rotation, vec3::UnitZ());
-
- LOG_ALWAYS_FATAL_IF(sizeof(mat4) != 4 * 4 * 4);
- glUniformMatrix4fv(uProjectionMatrix, 1, false, projectionMatrix.data());
-}
-
-DistortionRenderer::DistortionRenderer(
- const CompositeHmd& hmd, vec2i display_size, int distortion_mesh_resolution,
- bool flip_texture_horizontally, bool flip_texture_vertically,
- bool separated_eye_buffers, bool eds_enabled, bool late_latch_enabled)
- : shader_type_(kChromaticAberrationCorrection),
- eds_enabled_(eds_enabled),
- chromatic_aberration_correction_enabled_(true),
- use_alpha_vignette_(false),
- distortion_mesh_resolution_(distortion_mesh_resolution),
- last_distortion_texture_id_(0),
- app_texture_target_(GL_TEXTURE_2D),
- display_size_(display_size),
- separated_eye_buffers_(separated_eye_buffers) {
- ATRACE_NAME("DistortionRenderer::DistortionRenderer");
-
- float device_rotation = 0.0;
-
- if (eds_enabled_) {
- // Late latch must be on if eds_enabled_ is true.
- if (!late_latch_enabled) {
- ALOGE("Cannot enable EDS without late latch. Force enabling late latch.");
- late_latch_enabled = true;
- }
- }
-
- // TODO(hendrikw): Look into moving this logic into DisplayMetrics.
- if (hmd.GetDisplayMetrics().IsPortrait()) {
- device_rotation = -M_PI / 2.0f;
- }
-
- // Create shader programs.
- shaders_[kNoChromaticAberrationCorrection].load(
- kVertexShaderNoChromaticAberrationString,
- kFragmentShaderNoChromaticAberrationString, 1, false, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kNoChromaticAberrationCorrectionTwoLayers].load(
- kVertexShaderNoChromaticAberrationString,
- kFragmentShaderNoChromaticAberrationString, 2, false, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kChromaticAberrationCorrection].load(
- kVertexShaderChromaticAberrationString,
- kFragmentShaderChromaticAberrationString, 1, false, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kChromaticAberrationCorrectionTwoLayers].load(
- kVertexShaderChromaticAberrationString,
- kFragmentShaderChromaticAberrationString, 2, false, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kChromaticAberrationCorrectionAlphaVignette].load(
- kVertexShaderChromaticAberrationString,
- kFragmentShaderChromaticAberrationString, 1, true, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kChromaticAberrationCorrectionAlphaVignetteTwoLayers].load(
- kVertexShaderChromaticAberrationString,
- kFragmentShaderChromaticAberrationString, 2, true, device_rotation,
- flip_texture_horizontally, false);
- shaders_[kChromaticAberrationCorrectionWithBlend].load(
- kVertexShaderChromaticAberrationString,
- kFragmentShaderChromaticAberrationString, 1, false, device_rotation,
- flip_texture_horizontally, true);
- shaders_[kSimpleVideoQuad].load(
- kVertexShaderSimpleVideoQuadString,
- kFragmentShaderSimpleVideoQuadString, 1, false, device_rotation,
- flip_texture_horizontally, true);
- CHECK_GL();
-
- mat4 tex_from_recommended_viewport_matrix[2][2][2];
- for (int eye = 0; eye < 2; ++eye) {
- // Near and far plane don't actually matter for the clip_from_eye_matrix
- // below since it is only used (for EDS) to transform coordinates for
- // which the Z has been dropped.
- static const float kNear = 0.1f, kFar = 100.0f;
- const FieldOfView& fov =
- (eye == kLeftEye ? hmd.GetEyeFov(kLeftEye) : hmd.GetEyeFov(kRightEye));
- mat4 c_clip_from_eye_matrix = fov.GetProjectionMatrix(kNear, kFar);
- mat4 c_eye_from_clip_matrix = c_clip_from_eye_matrix.inverse();
-
- // Compute tex_from_recommended_viewport_matrix.
-
- // flip_texture_vertically defines the default flip behavior.
- // do_flip[0] should be the default, while do_flip[1] should be the
- // inverse of the default.
- int do_flip[2] = {flip_texture_vertically ? 1 : 0,
- flip_texture_vertically ? 0 : 1};
- for (int flip = 0; flip < 2; ++flip) {
- vec2 flip_scale(1.0f, do_flip[flip] ? -1.0f : 1.0f);
- vec2 flip_offset(0.0f, do_flip[flip] ? 1.0f : 0.0f);
-
- for (int separate_eye = 0; separate_eye < 2; ++separate_eye) {
- vec2 viewport_corner_offset = (eye == kLeftEye || separate_eye)
- ? vec2(0.0f, 0.0f)
- : vec2(0.5f, 0.0f);
- const vec2 txy = viewport_corner_offset + flip_offset;
- const vec2 scalexy = vec2(separate_eye ? 1.0f : 0.5f, 1.0f);
- tex_from_recommended_viewport_matrix[eye][flip][separate_eye] =
- Eigen::Translation3f(vec3(txy.x(), txy.y(), 0.0f)) *
- Eigen::DiagonalMatrix<float, 3>(vec3(flip_scale.x() * scalexy.x(),
- flip_scale.y(), scalexy.y()));
-
- tex_from_eye_matrix_[eye][flip][separate_eye] =
- tex_from_recommended_viewport_matrix[eye][flip][separate_eye] *
- kViewportFromClipMatrix * c_clip_from_eye_matrix;
- }
- }
-
- eye_from_viewport_matrix_[eye] =
- c_eye_from_clip_matrix * kClipFromViewportMatrix;
- }
-
- // Create UBO for setting the EDS matrix to identity when EDS is disabled.
- glGenBuffers(2 * 2 * 2, &uTexFromRecommendedViewportMatrix[0][0][0]);
- for (int eye = 0; eye < 2; ++eye) {
- for (int flip = 0; flip < 2; ++flip) {
- for (int separate_eye = 0; separate_eye < 2; ++separate_eye) {
- glBindBuffer(
- GL_UNIFORM_BUFFER,
- uTexFromRecommendedViewportMatrix[eye][flip][separate_eye]);
- glBufferData(GL_UNIFORM_BUFFER, sizeof(mat4), 0, GL_STATIC_DRAW);
- CHECK_GL();
- mat4* mat = static_cast<mat4*>(glMapBufferRange(
- GL_UNIFORM_BUFFER, 0, sizeof(mat4), GL_MAP_WRITE_BIT));
- CHECK_GL();
- *mat = tex_from_recommended_viewport_matrix[eye][flip][separate_eye];
- glUnmapBuffer(GL_UNIFORM_BUFFER);
- }
- }
- }
- glBindBuffer(GL_UNIFORM_BUFFER, 0);
-
- // Create distortion meshes and associated GL resources.
- glGenBuffers(2, mesh_vbo_);
- glGenVertexArrays(2, mesh_vao_);
- glGenBuffers(2, mesh_ibo_);
- RecomputeDistortion(hmd);
-
- SetDisplaySize(display_size);
-
- if (hmd.GetDisplayMetrics().IsPortrait()) {
- eye_viewport_origin_[0] =
- vec2i(0, flip_texture_horizontally ? 0 : display_size_[1] / 2);
- eye_viewport_origin_[1] =
- vec2i(0, flip_texture_horizontally ? display_size_[1] / 2 : 0);
- eye_viewport_size_ = vec2i(display_size_[0], display_size_[1] / 2);
- } else {
- eye_viewport_origin_[0] = vec2i(0, 0);
- eye_viewport_origin_[1] = vec2i(display_size_[0] / 2, 0);
- eye_viewport_size_ = vec2i(display_size_[0] / 2, display_size_[1]);
- }
-
- CHECK_GL();
-}
-
-DistortionRenderer::~DistortionRenderer() {
- glDeleteBuffers(2 * 2 * 2, &uTexFromRecommendedViewportMatrix[0][0][0]);
- glDeleteBuffers(2, mesh_vbo_);
- glDeleteVertexArrays(2, mesh_vao_);
- glDeleteBuffers(2, mesh_ibo_);
-}
-
-void DistortionRenderer::ApplyDistortionCorrectionToTexture(
- EyeType eye, const GLuint* texture_ids, const bool* vertical_flip,
- const bool* separate_eye, const int* late_latch_layer, int num_textures,
- bool blend_with_previous_layer, bool do_gl_state_prep) {
- ATRACE_NAME(__PRETTY_FUNCTION__);
-
- bool use_gl_blend = use_alpha_vignette_ ||
- (blend_with_previous_layer && !kUseFramebufferReadback);
- if (use_gl_blend) {
- glEnable(GL_BLEND);
- glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
- }
- DrawEye(eye, texture_ids, vertical_flip, separate_eye, late_latch_layer,
- num_textures, blend_with_previous_layer, do_gl_state_prep);
- if (use_gl_blend) {
- glDisable(GL_BLEND);
- }
- CHECK_GL();
-}
-
-void DistortionRenderer::DrawVideoQuad(EyeType eye, int layer_i,
- GLuint texture_id,
- const mat4& transform) {
- shaders_[kSimpleVideoQuad].use();
-
- shaders_[kSimpleVideoQuad].SetTexFromEyeTransform(
- tex_from_eye_matrix_[eye][0][1]);
- shaders_[kSimpleVideoQuad].SetEyeFromViewportTransform(
- transform * kClipFromViewportMatrix);
-
- if (eds_enabled_) {
- // Bind late latch view-projection UBO that is produced by AddEdsLateLatch.
- late_latch_[layer_i]->BindUniformBuffer(
- POSE_BINDING, LateLatch::kViewMatrix, eye);
- CHECK_GL();
- } else {
- // When EDS is disabled we just set the matrix here with no pose offset.
- glBindBufferBase(GL_UNIFORM_BUFFER, POSE_BINDING + layer_i,
- uTexFromRecommendedViewportMatrix[eye][0][1]);
- CHECK_GL();
- }
-
- glActiveTexture(GL_TEXTURE0 + SAMPLER_BINDING);
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id);
- CHECK_GL();
-
- glDrawElements(GL_TRIANGLE_STRIP, mesh_node_[eye].indices.size(),
- GL_UNSIGNED_SHORT, nullptr);
-
- CHECK_GL();
-}
-
-void DistortionRenderer::DoLateLatch(uint32_t target_vsync_count,
- const uint32_t* render_buffer_index,
- const GLuint* render_pose_buffer_objects,
- const bool* vertical_flip,
- const bool* separate_eye,
- int num_textures) {
- if (eds_enabled_) {
- LateLatchInput data;
- memset(&data, 0, sizeof(data));
- for (int ti = 0; ti < num_textures; ++ti) {
- if (late_latch_[ti] == nullptr)
- late_latch_[ti].reset(new LateLatch(false));
-
- int flip_index = vertical_flip[ti] ? 1 : 0;
- int separate_eye_i = separate_eye[ti] ? 1 : 0;
- // Copy data into late latch input struct.
- for (int eye = 0; eye < 2; ++eye) {
- data.eds_mat1[eye] =
- tex_from_eye_matrix_[eye][flip_index][separate_eye_i];
- data.eds_mat2[eye] = eye_from_viewport_matrix_[eye];
- }
- data.pose_index = target_vsync_count & kPoseAsyncBufferIndexMask;
- data.render_pose_index = render_buffer_index[ti];
-
- late_latch_[ti]->AddEdsLateLatch(data, render_pose_buffer_objects[ti]);
- }
- }
-}
-
-void DistortionRenderer::PrepGlState(EyeType eye) {
- glViewport(eye_viewport_origin_[eye].x(), eye_viewport_origin_[eye].y(),
- eye_viewport_size_.x(), eye_viewport_size_.y());
-
- glBindVertexArray(mesh_vao_[eye]);
- CHECK_GL();
-
- glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh_ibo_[eye]);
- CHECK_GL();
-
- if (!eds_enabled_) {
- glMemoryBarrier(GL_UNIFORM_BARRIER_BIT);
- }
-}
-
-void DistortionRenderer::ResetGlState(int num_textures) {
- glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
- glBindBuffer(GL_ARRAY_BUFFER, 0);
- glBindVertexArray(0);
- if (eds_enabled_) {
- for (int ti = 0; ti < num_textures; ++ti)
- glBindBufferBase(GL_UNIFORM_BUFFER, POSE_BINDING + ti, 0);
- } else {
- glBindBuffer(GL_UNIFORM_BUFFER, 0);
- }
-
- CHECK_GL();
-
- // Unbind all texture inputs.
- for (int ti = 0; ti < num_textures; ++ti) {
- glActiveTexture(GL_TEXTURE0 + SAMPLER_BINDING + ti);
- glBindTexture(app_texture_target_, 0);
- }
- glActiveTexture(GL_TEXTURE0);
-}
-
-void DistortionRenderer::DrawEye(EyeType eye, const GLuint* texture_ids,
- const bool* vertical_flip,
- const bool* separate_eye,
- const int* late_latch_layer, int num_textures,
- bool blend_with_previous_layer,
- bool do_gl_state_prep) {
- if (do_gl_state_prep)
- PrepGlState(eye);
-
- if (num_textures > kMaxLayers) {
- ALOGE("Too many textures for DistortionRenderer");
- num_textures = kMaxLayers;
- }
-
- LOG_ALWAYS_FATAL_IF(num_textures != 1 && num_textures != 2);
-
- if (num_textures == 2) {
- if (chromatic_aberration_correction_enabled_) {
- if (use_alpha_vignette_) {
- shader_type_ = kChromaticAberrationCorrectionAlphaVignetteTwoLayers;
- } else {
- shader_type_ = kChromaticAberrationCorrectionTwoLayers;
- }
- } else {
- shader_type_ = kNoChromaticAberrationCorrectionTwoLayers;
- }
- } else {
- if (chromatic_aberration_correction_enabled_) {
- if (blend_with_previous_layer) {
- shader_type_ = kChromaticAberrationCorrectionWithBlend;
- } else if (use_alpha_vignette_) {
- shader_type_ = kChromaticAberrationCorrectionAlphaVignette;
- } else {
- shader_type_ = kChromaticAberrationCorrection;
- }
- } else {
- shader_type_ = kNoChromaticAberrationCorrection;
- }
- }
- shaders_[shader_type_].use();
-
- for (int ti = 0; ti < num_textures; ++ti) {
- int flip_index = vertical_flip[ti] ? 1 : 0;
- if (eds_enabled_) {
- // Bind late latch view-projection UBO that is produced by
- // AddEdsLateLatch.
- late_latch_[late_latch_layer[ti]]->BindUniformBuffer(
- POSE_BINDING + ti, LateLatch::kViewProjMatrix, eye);
- CHECK_GL();
- } else {
- // When EDS is disabled we just set the matrix here with no pose offset.
- // With app late-latching, we can't know the pose that the app used
- // because it's in the app's framebuffer.
- int separate_eye_i = separate_eye[ti] ? 1 : 0;
- glBindBufferBase(
- GL_UNIFORM_BUFFER, POSE_BINDING + ti,
- uTexFromRecommendedViewportMatrix[eye][flip_index][separate_eye_i]);
- CHECK_GL();
- }
-
- glActiveTexture(GL_TEXTURE0 + SAMPLER_BINDING + ti);
- glBindTexture(app_texture_target_, texture_ids[ti]);
- CHECK_GL();
- }
-
- // Prevents left eye data from bleeding into right eye and vice-versa.
- vec2 layer_min_max[kMaxLayers];
- for (int i = 0; i < kMaxLayers; ++i)
- layer_min_max[i] = vec2(0.0f, 0.0f);
- for (int ti = 0; ti < num_textures; ++ti) {
- if (separate_eye[ti]) {
- layer_min_max[ti] = vec2(0.0f, 1.0f); // Use the whole texture.
- } else if (eye == kLeftEye) {
- layer_min_max[ti] = vec2(0.0f, 0.499f);
- } else {
- layer_min_max[ti] = vec2(0.501f, 1.0f);
- }
- }
- // The second layer stores its x min and max in the z,w slots of the vec4.
- vec4 xTexMinMax(layer_min_max[0].x(), layer_min_max[0].y(),
- layer_min_max[1].x(), layer_min_max[1].y());
-
- glUniform4fv(shaders_[shader_type_].uTexXMinMax, 1, &xTexMinMax[0]);
- CHECK_GL();
-
- glDrawElements(GL_TRIANGLE_STRIP, mesh_node_[eye].indices.size(),
- GL_UNSIGNED_SHORT, nullptr);
- CHECK_GL();
- if (do_gl_state_prep)
- ResetGlState(num_textures);
-}
-
-void DistortionRenderer::SetDisplaySize(vec2i display_size) {
- display_size_ = display_size;
-}
-
-void DistortionRenderer::SetEdsEnabled(bool enabled) { eds_enabled_ = enabled; }
-
-void DistortionRenderer::RecomputeDistortion(const CompositeHmd& hmd) {
- using std::placeholders::_1;
- using std::placeholders::_2;
- using std::placeholders::_3;
- using std::placeholders::_4;
- DistortionFunction distortion_function =
- std::bind(&CompositeHmd::ComputeDistortedVertex, &hmd, _1, _2, _3, _4);
-
- for (int i = 0; i < 2; ++i) {
- mesh_node_[i] =
- BuildDistortionMesh(static_cast<EyeType>(i),
- distortion_mesh_resolution_, distortion_function);
-
- glBindVertexArray(mesh_vao_[i]);
-
- glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo_[i]);
- glBufferData(GL_ARRAY_BUFFER,
- sizeof(EdsVertex) * mesh_node_[i].vertices.size(),
- &mesh_node_[i].vertices.front(), GL_STATIC_DRAW);
-
- glEnableVertexAttribArray(POSITION_ATTR);
- glEnableVertexAttribArray(VIEWPORT_COORD_R_ATTR);
- glEnableVertexAttribArray(VIEWPORT_COORD_G_ATTR);
- glEnableVertexAttribArray(VIEWPORT_COORD_B_ATTR);
-
- glVertexAttribPointer(
- POSITION_ATTR, 2, GL_FLOAT, GL_FALSE, sizeof(EdsVertex),
- reinterpret_cast<void*>(offsetof(EdsVertex, position)));
-
- glVertexAttribPointer(
- VIEWPORT_COORD_R_ATTR, 2, GL_FLOAT, GL_FALSE, sizeof(EdsVertex),
- reinterpret_cast<void*>(offsetof(EdsVertex, red_viewport_coords)));
-
- glVertexAttribPointer(
- VIEWPORT_COORD_G_ATTR, 2, GL_FLOAT, GL_FALSE, sizeof(EdsVertex),
- reinterpret_cast<void*>(offsetof(EdsVertex, green_viewport_coords)));
-
- glVertexAttribPointer(
- VIEWPORT_COORD_B_ATTR, 2, GL_FLOAT, GL_FALSE, sizeof(EdsVertex),
- reinterpret_cast<void*>(offsetof(EdsVertex, blue_viewport_coords)));
-
- glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh_ibo_[i]);
- glBufferData(GL_ELEMENT_ARRAY_BUFFER,
- sizeof(uint16_t) * mesh_node_[i].indices.size(),
- &mesh_node_[i].indices.front(), GL_STATIC_DRAW);
- CHECK_GL();
- }
- glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
- glBindBuffer(GL_ARRAY_BUFFER, 0);
-
- glBindVertexArray(0);
-}
-
-bool DistortionRenderer::GetLastEdsPose(LateLatchOutput* out_data, int layer_id) const {
- if (layer_id >= kMaxLayers) {
- ALOGE("Accessing invalid layer %d", layer_id);
- return false;
- }
-
- if (late_latch_[layer_id] != nullptr) {
- late_latch_[layer_id]->CaptureOutputData(out_data);
- return true;
- } else {
- ALOGE("Late latch shader not enabled.");
- return false;
- }
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/eds.cpp b/libs/vr/libeds/eds.cpp
deleted file mode 100644
index 8af5b27..0000000
--- a/libs/vr/libeds/eds.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <dvr/eds.h>
-
-#include <private/dvr/graphics/vr_gl_extensions.h>
-#include <private/dvr/late_latch.h>
-#include <private/dvr/types.h>
-
-// TODO(jbates) delete this file and eds.h
-
-extern "C" int dvrEdsInit(bool with_late_latch) { return 0; }
-
-extern "C" void dvrEdsDeinit() {}
-
-extern "C" int dvrEdsCapturePoseAsync(int eye, uint32_t target_vsync_count,
- const float* projection_matrix,
- const float* eye_from_head_matrix,
- const float* pose_offset_matrix) {
- return 0;
-}
-
-extern "C" int dvrEdsBindPose(int eye, uint32_t ubo_binding, intptr_t offset,
- ssize_t size) {
- return 0;
-}
-
-extern "C" int dvrEdsBlitPose(int eye, int viewport_width,
- int viewport_height) {
- return 0;
-}
-
-extern "C" int dvrEdsBlitPoseFromCpu(int eye, int viewport_width,
- int viewport_height,
- const float* pose_quaternion,
- const float* pose_position) {
- return 0;
-}
diff --git a/libs/vr/libeds/eds_mesh.cpp b/libs/vr/libeds/eds_mesh.cpp
deleted file mode 100644
index 01a90cf..0000000
--- a/libs/vr/libeds/eds_mesh.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-#include "include/private/dvr/eds_mesh.h"
-
-#include <log/log.h>
-#include <math.h>
-
-#include <private/dvr/types.h>
-
-namespace {
-
-using android::dvr::EdsVertex;
-using android::dvr::EyeType;
-using android::dvr::DistortionFunction;
-using android::dvr::vec2;
-
-// Computes the vertices for a distortion mesh with resolution |resolution| and
-// distortion provided by |hmd| and stores them in |vertices|.
-static void ComputeDistortionMeshVertices(
- EdsVertex* vertices, int resolution,
- const DistortionFunction& distortion_function, EyeType eye) {
- for (int row = 0; row < resolution; row++) {
- for (int col = 0; col < resolution; col++) {
- const float x_norm =
- static_cast<float>(col) / (static_cast<float>(resolution - 1U));
- const float y_norm =
- static_cast<float>(row) / (static_cast<float>(resolution - 1U));
-
- const vec2 xy_norm(x_norm, y_norm);
- const size_t index = col * resolution + row;
-
- // Evaluate distortion function to get the new coordinates for each color
- // channel. The distortion function returns the new coordinates relative
- // to a full viewport with 0 <= x <= 1 for each eye.
- vec2 coords[3];
- distortion_function(eye, xy_norm, &vertices[index].position, coords);
-
- // Store distortion mapping in texture coordinates.
- vertices[index].red_viewport_coords = coords[0];
- vertices[index].green_viewport_coords = coords[1];
- vertices[index].blue_viewport_coords = coords[2];
- }
- }
-}
-
-// Computes the triangle strip indices for a distortion mesh with resolution
-// |resolution| and stores them in |indices|.
-static void ComputeDistortionMeshIndices(uint16_t* indices, int resolution) {
- // The following strip method has been used in the Cardboard SDK
- // (java/com/google/vrtoolkit/cardboard/DistortionRenderer.java) and has
- // originally been described at:
- //
- // http://dan.lecocq.us/wordpress/2009/12/25/triangle-strip-for-grids-a-construction/
- //
- // For a grid with 4 rows and 4 columns of vertices, the strip would
- // look like:
- // ↻
- // 0 - 4 - 8 - 12
- // ↓ ↗ ↓ ↗ ↓ ↗ ↓
- // 1 - 5 - 9 - 13
- // ↓ ↖ ↓ ↖ ↓ ↖ ↓
- // 2 - 6 - 10 - 14
- // ↓ ↗ ↓ ↗ ↓ ↗ ↓
- // 3 - 7 - 11 - 15
- // ↺
- //
- // Note the little circular arrows next to 7 and 8 that indicate
- // repeating that vertex once so as to produce degenerate triangles.
- //
- // To facilitate scanline racing, the vertex order is left to right.
-
- int16_t index_offset = 0;
- int16_t vertex_offset = 0;
- for (int row = 0; row < resolution - 1; ++row) {
- if (row > 0) {
- indices[index_offset] = indices[index_offset - 1];
- ++index_offset;
- }
- for (int col = 0; col < resolution; ++col) {
- if (col > 0) {
- if (row % 2 == 0) {
- // Move right on even rows.
- ++vertex_offset;
- } else {
- --vertex_offset;
- }
- }
- // A cast to uint16_t is safe here as |vertex_offset| will not drop below
- // zero in this loop. As col is initially equal to zero |vertex_offset| is
- // always incremented before being decremented, is initialized to zero and
- // is only incremented outside of the loop.
- indices[index_offset++] = static_cast<uint16_t>(vertex_offset);
- indices[index_offset++] = static_cast<uint16_t>(
- vertex_offset + static_cast<int16_t>(resolution));
- }
- vertex_offset =
- static_cast<int16_t>(static_cast<int>(resolution) + vertex_offset);
- }
-}
-
-} // anonymous namespace
-
-namespace android {
-namespace dvr {
-
-// Builds a distortion mesh of resolution |resolution| using the distortion
-// provided by |hmd| for |eye|.
-EdsMesh BuildDistortionMesh(EyeType eye, int resolution,
- const DistortionFunction& distortion_function) {
- LOG_ALWAYS_FATAL_IF(resolution <= 2);
-
- // Number of indices produced by the strip method
- // (see comment in ComputeDistortionMeshIndices):
- //
- // 1 vertex per triangle
- // 2 triangles per quad, (rows - 1) * (cols - 1) quads
- // 2 vertices at the start of each row for the first triangle
- // 1 extra vertex per row (except first and last) for a
- // degenerate triangle
- //
- const uint16_t index_count =
- static_cast<uint16_t>(resolution * (2 * resolution - 1U) - 2U);
- const uint16_t vertex_count = static_cast<uint16_t>(resolution * resolution);
-
- EdsMesh mesh;
- mesh.vertices.resize(vertex_count);
- mesh.indices.resize(index_count);
-
- // Populate vertex and index buffer.
- ComputeDistortionMeshVertices(&mesh.vertices[0], resolution,
- distortion_function, eye);
- ComputeDistortionMeshIndices(&mesh.indices[0], resolution);
-
- return mesh;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/include/CPPLINT.cfg b/libs/vr/libeds/include/CPPLINT.cfg
deleted file mode 100644
index 2f8a3c0..0000000
--- a/libs/vr/libeds/include/CPPLINT.cfg
+++ /dev/null
@@ -1 +0,0 @@
-filter=-build/header_guard
diff --git a/libs/vr/libeds/include/dvr/eds.h b/libs/vr/libeds/include/dvr/eds.h
deleted file mode 100644
index 37b1297..0000000
--- a/libs/vr/libeds/include/dvr/eds.h
+++ /dev/null
@@ -1,150 +0,0 @@
-#ifndef ANDROID_DVR_EDS_H_
-#define ANDROID_DVR_EDS_H_
-
-#include <stdbool.h>
-#include <stdint.h>
-#include <sys/cdefs.h>
-#include <sys/types.h>
-
-__BEGIN_DECLS
-
-// This struct aligns with GLSL uniform blocks with std140 layout.
-// std140 allows padding between certain types, so padding must be explicitly
-// added as struct members.
-struct __attribute__((__packed__)) DvrLateLatchData {
- // Column-major order.
- float view_proj_matrix[16];
- // Column-major order.
- float view_matrix[16];
- float pose_quaternion[4];
- float pose_position[4];
-};
-
-//
-// These APIs are not thread safe and must be called on a single thread with an
-// actively bound GL context corresponding to a display surface.
-//
-
-// Prepares EDS and Late Latching system. Idempotent if called more than once.
-// The target GL context must be created and bound.
-//
-// If |with_late_latch| is true, a thread will be created that asynchronously
-// updates the pose in memory.
-//
-// The following GL states are modified as follows:
-// glBindBuffer(GL_ARRAY_BUFFER, 0);
-// glBindBuffer(GL_UNIFORM_BUFFER, 0);
-//
-// Returns 0 on success, negative error code on failure.
-// Check GL errors with glGetError for other error conditions.
-int dvrEdsInit(bool with_late_latch);
-
-// Stops and destroys the EDS Late Latching system.
-void dvrEdsDeinit();
-
-// Submits GL draw command that will capture the latest head pose into a uniform
-// buffer object. This should be called twice per frame, before the app begins
-// drawing for each eye.
-// For each eye, a later call to dvrEdsBlitPose will write this pose into
-// the application framebuffer corner so that the EDS service knows what pose
-// the frame was rendered with.
-//
-// |eye| is 0 for left eye and 1 for right eye.
-//
-// The following GL states are modified as follows:
-// glUseProgram(0);
-// glBindBuffer(GL_UNIFORM_BUFFER, 0);
-// glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, id);
-// glDisable(GL_RASTERIZER_DISCARD);
-//
-// Returns 0 on success, negative error code on failure:
-// EPERM - dvrEdsInit(true) was not called.
-// Check GL errors with glGetError for other error conditions.
-int dvrEdsCapturePoseAsync(int eye, uint32_t target_vsync_count,
- const float* projection_matrix,
- const float* eye_from_head_matrix,
- const float* pose_offset_matrix);
-
-// Binds the late-latch output data as a GL_UNIFORM_BUFFER so that your vertex
-// shaders can use the latest head pose. For example, to bind just the
-// view_matrix from the output:
-//
-// dvrEdsBindPose(eye, BINDING,
-// offsetof(DvrLateLatchData, view_matrix),
-// sizeof(DvrLateLatchData::view_matrix));
-//
-// Or more commonly, bind the view projection matrix:
-//
-// dvrEdsBindPose(eye, BINDING,
-// offsetof(DvrLateLatchData, view_proj_matrix),
-// sizeof(DvrLateLatchData::view_proj_matrix));
-//
-// BINDING in the above examples is the binding location of the uniform
-// interface block in the GLSL shader.
-//
-// Shader example (3 would be the |ubo_binding| passed to this function):
-// layout(binding = 3, std140) uniform LateLatchData {
-// mat4 uViewProjection;
-// };
-//
-// |eye| is 0 for left eye and 1 for right eye.
-//
-// The following GL states are modified as follows:
-// glBindBuffer(GL_UNIFORM_BUFFER, ...);
-// glBindBufferRange(GL_UNIFORM_BUFFER, ...);
-//
-// To clear the binding, call glBindBuffer(GL_UNIFORM_BUFFER, 0);
-//
-// Returns 0 on success, negative error code on failure:
-// EPERM - dvrEdsInit(true) was not called.
-// Check GL errors with glGetError for other error conditions.
-int dvrEdsBindPose(int eye, uint32_t ubo_binding, intptr_t offset,
- ssize_t size);
-
-// DEPRECATED
-//
-// Blits the pose captured previously into the currently bound framebuffer.
-// The current framebuffer is assumed to be the default framebuffer 0, the
-// surface that will be sent to the display and have EDS and lens warp applied
-// to it.
-//
-// |eye| is 0 for left eye and 1 for right eye.
-// |viewport_width| is the width of the viewport for this eye, which is
-// usually half the width of the framebuffer.
-// |viewport_height| is the height of the viewport for this eye, which is
-// usually the height of the framebuffer.
-//
-// The following GL states are modified as follows:
-// glUseProgram(0);
-// glBindBuffer(GL_UNIFORM_BUFFER, 0);
-// glBindBufferRange(GL_UNIFORM_BUFFER, 23, ...);
-//
-// Returns 0 on success, negative error code on failure:
-// EPERM - dvrEdsInit was not called.
-// Check GL errors with glGetError for other error conditions.
-int dvrEdsBlitPose(int eye, int viewport_width, int viewport_height);
-
-// DEPRECATED
-//
-// Same as dvrEdsBlitPose except that the pose is provided as an
-// parameter instead of getting it from dvrEdsBindPose. This is for
-// applications that want EDS but do not want late-latching.
-//
-// |pose_quaternion| should point to 4 floats that represent a quaternion.
-// |pose_position| should point to 3 floats that represent x,y,z position.
-//
-// GL states are modified as follows:
-// glUseProgram(0);
-// glBindBuffer(GL_UNIFORM_BUFFER, 0);
-// glBindBufferBase(GL_UNIFORM_BUFFER, 23, ...);
-//
-// Returns 0 on success, negative error code on failure:
-// EPERM - dvrEdsInit was not called.
-// Check GL errors with glGetError for other error conditions.
-int dvrEdsBlitPoseFromCpu(int eye, int viewport_width, int viewport_height,
- const float* pose_quaternion,
- const float* pose_position);
-
-__END_DECLS
-
-#endif // ANDROID_DVR_EDS_H_
diff --git a/libs/vr/libeds/include/private/dvr/color_channel_distortion.h b/libs/vr/libeds/include/private/dvr/color_channel_distortion.h
deleted file mode 100644
index 4e612cd..0000000
--- a/libs/vr/libeds/include/private/dvr/color_channel_distortion.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef ANDROID_DVR_COLOR_CHANNEL_DISTORTION_H_
-#define ANDROID_DVR_COLOR_CHANNEL_DISTORTION_H_
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-// ColorChannelDistortion encapsulates the way one color channel (wavelength)
-// is distorted optically when an image is viewed through a lens.
-class ColorChannelDistortion {
- public:
- virtual ~ColorChannelDistortion() {}
-
- // Given a 2d point p, returns the corresponding distorted point.
- // The units of both the input and output points are tan-angle units,
- // which can be computed as the distance on the screen divided by
- // distance from the virtual eye to the screen. For both the input
- // and output points, the intersection of the optical axis of the lens
- // with the screen defines the origin, the x axis points right, and
- // the y axis points up.
- virtual vec2 Distort(vec2 p) const = 0;
-
- virtual vec2 DistortInverse(vec2 p) const = 0;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_COLOR_CHANNEL_DISTORTION_H_
diff --git a/libs/vr/libeds/include/private/dvr/composite_hmd.h b/libs/vr/libeds/include/private/dvr/composite_hmd.h
deleted file mode 100644
index 70727e0..0000000
--- a/libs/vr/libeds/include/private/dvr/composite_hmd.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef ANDROID_DVR_COMPOSITE_HMD_H_
-#define ANDROID_DVR_COMPOSITE_HMD_H_
-
-#include <private/dvr/display_metrics.h>
-#include <private/dvr/head_mount_metrics.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-// An intermediate structure composed of a head mount (described by
-// HeadMountMetrics) and a display (described by DisplayMetrics).
-class CompositeHmd {
- public:
- // Constructs a new CompositeHmd given a HeadMountMetrics and a
- // DisplayMetrics.
- CompositeHmd(const HeadMountMetrics& head_mount_metrics,
- const DisplayMetrics& display_metrics);
-
- CompositeHmd(CompositeHmd&& composite_hmd) = delete;
- CompositeHmd(const CompositeHmd& composite_hmd) = delete;
- CompositeHmd& operator=(CompositeHmd&& composite_hmd) = delete;
- CompositeHmd& operator=(const CompositeHmd& composite_hmd) = delete;
-
- // Headset metadata.
- float GetTargetFrameDuration() const;
- void ComputeDistortedVertex(EyeType eye, vec2 uv_in, vec2* vertex_out,
- vec2* uv_out) const;
-
- // Eye-unspecific view accessors.
- vec2i GetRecommendedRenderTargetSize() const;
- Range2i GetDisplayRange() const;
-
- // Eye-specific view accessors.
- mat4 GetEyeFromHeadMatrix(EyeType eye) const;
- FieldOfView GetEyeFov(EyeType eye) const;
- Range2i GetEyeViewportBounds(EyeType eye) const;
-
- // Set HeadMountMetrics and recompute everything that depends on
- // HeadMountMetrics.
- void SetHeadMountMetrics(const HeadMountMetrics& head_mount_metrics);
-
- // Returns a reference to the |head_mount_metrics_| member.
- const HeadMountMetrics& GetHeadMountMetrics() const;
-
- // Set DisplayMetrics and recompute everything that depends on DisplayMetrics.
- void SetDisplayMetrics(const DisplayMetrics& display_metrics);
-
- // Returns a reference to the current display metrics.
- const DisplayMetrics& GetDisplayMetrics() const;
-
- // Compute the distorted point for a single channel.
- vec2 ComputeDistortedPoint(EyeType eye, vec2 position,
- RgbColorChannel channel) const;
-
- // Compute the inverse distorted point for a single channel.
- vec2 ComputeInverseDistortedPoint(EyeType eye, vec2 position,
- RgbColorChannel channel) const;
-
- private:
- FieldOfView eye_fov_[2];
- Range2i eye_viewport_range_[2];
- mat4 eye_from_head_matrix_[2];
- Range2i display_range_;
- vec2i recommended_render_target_size_;
-
- // Per-eye scale and translation to convert from normalized Screen Space
- // ([0:1]x[0:1]) to tan-angle space.
- mat3 eye_tan_angle_from_norm_screen_matrix_[2];
- mat3 eye_tan_angle_from_norm_screen_inv_matrix_[2];
-
- // Per-eye scale and translation to convert from tan-angle space to normalized
- // Texture Space ([0:1]x[0:1]).
- mat3 eye_norm_texture_from_tan_angle_matrix_[2];
- mat3 eye_norm_texture_from_tan_angle_inv_matrix_[2];
-
- HeadMountMetrics head_mount_metrics_;
- DisplayMetrics display_metrics_;
-
- // Called by SetHeadMountMetrics/SetDisplayMetrics after metrics get changed.
- // This function will update head_mount_metrics_/display_metrics_ based on the
- // metrics supplied in the above two methods.
- void MetricsChanged();
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_COMPOSITE_HMD_H_
diff --git a/libs/vr/libeds/include/private/dvr/device_metrics.h b/libs/vr/libeds/include/private/dvr/device_metrics.h
deleted file mode 100644
index 7985f28..0000000
--- a/libs/vr/libeds/include/private/dvr/device_metrics.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef ANDROID_DVR_DEVICE_METRICS_H_
-#define ANDROID_DVR_DEVICE_METRICS_H_
-
-#include <private/dvr/display_metrics.h>
-#include <private/dvr/head_mount_metrics.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-HeadMountMetrics CreateHeadMountMetrics();
-HeadMountMetrics CreateHeadMountMetrics(const FieldOfView& l_fov,
- const FieldOfView& r_fov);
-HeadMountMetrics CreateUndistortedHeadMountMetrics();
-HeadMountMetrics CreateUndistortedHeadMountMetrics(const FieldOfView& l_fov,
- const FieldOfView& r_fov);
-DisplayMetrics CreateDisplayMetrics(vec2i screen_size);
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_DEVICE_METRICS_H_
diff --git a/libs/vr/libeds/include/private/dvr/display_metrics.h b/libs/vr/libeds/include/private/dvr/display_metrics.h
deleted file mode 100644
index 87d9d04..0000000
--- a/libs/vr/libeds/include/private/dvr/display_metrics.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef ANDROID_DVR_DISPLAY_METRICS_H_
-#define ANDROID_DVR_DISPLAY_METRICS_H_
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-enum class DisplayOrientation { kPortrait, kLandscape };
-
-// DisplayMetrics encapsulates metrics describing a display to be used
-// with a head mount to create a head mounted display.
-class DisplayMetrics {
- public:
- DisplayMetrics();
- // Constructs a DisplayMetrics given a display size in pixels,
- // meters per pixel, border size in meters, and frame duration in
- // seconds.
- //
- // size_pixels The size of the display in pixels.
- // meters_per_pixel The meters per pixel in each dimension.
- // border_size_meters The size of the border around the display
- // in meters. When the device sits on a surface in the proper
- // orientation this is the distance from the surface to the edge
- // of the display.
- // frame_duration_seconds The duration in seconds of each frame
- // (i.e., 1 / framerate).
- DisplayMetrics(vec2i size_pixels, vec2 meters_per_pixel,
- float border_size_meters, float frame_duration_seconds,
- DisplayOrientation orientation);
-
- // Gets the size of the display in physical pixels (not logical pixels).
- vec2i GetSizePixels() const { return size_pixels_; }
-
- DisplayOrientation GetOrientation() const { return orientation_; }
- bool IsPortrait() const {
- return orientation_ == DisplayOrientation::kPortrait;
- }
-
- // Gets the size of the display in meters.
- vec2 GetSizeMeters() const {
- return vec2(static_cast<float>(size_pixels_[0]),
- static_cast<float>(size_pixels_[1]))
- .array() *
- meters_per_pixel_.array();
- }
-
- // Gets the meters per pixel.
- vec2 GetMetersPerPixel() const { return meters_per_pixel_; }
-
- // Gets the size of the border around the display.
- // For a phone in landscape position this would be the distance from
- // the bottom the edge of the phone to the bottom of the screen.
- float GetBorderSizeMeters() const { return border_size_meters_; }
-
- // Gets the frame duration in seconds for the display.
- float GetFrameDurationSeconds() const { return frame_duration_seconds_; }
-
- // Toggles the orientation and swaps all of the settings such that the
- // display is being held in the other orientation.
- void ToggleOrientation();
-
- // Override the meters per pixel.
- void SetMetersPerPixel(const vec2& meters_per_pixel) {
- meters_per_pixel_ = meters_per_pixel;
- }
-
- private:
- vec2i size_pixels_;
- vec2 meters_per_pixel_;
- float border_size_meters_;
- float frame_duration_seconds_;
- DisplayOrientation orientation_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_DISPLAY_METRICS_H_
diff --git a/libs/vr/libeds/include/private/dvr/distortion_renderer.h b/libs/vr/libeds/include/private/dvr/distortion_renderer.h
deleted file mode 100644
index 28fd48a..0000000
--- a/libs/vr/libeds/include/private/dvr/distortion_renderer.h
+++ /dev/null
@@ -1,232 +0,0 @@
-#ifndef ANDROID_DVR_DISTORTION_RENDERER_H_
-#define ANDROID_DVR_DISTORTION_RENDERER_H_
-
-#include <EGL/egl.h>
-#include <GLES2/gl2.h>
-#include <array>
-#include <functional>
-
-#include <private/dvr/eds_mesh.h>
-#include <private/dvr/graphics/shader_program.h>
-#include <private/dvr/late_latch.h>
-#include <private/dvr/render_texture_params.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-class CompositeHmd;
-
-// Encapsulates the rendering operations to correct for the HMD's lens
-// distortion.
-class DistortionRenderer {
- public:
- static constexpr int kMaxLayers = 2;
- static constexpr int kMaxLatchedLayers = 4;
-
- static const mat4 kViewportFromClipMatrix;
- static const mat4 kClipFromViewportMatrix;
-
- // Creates a distortion renderer for distortion function.
- //
- // distortion_function the black-box distortion function to apply.
- // display_size the resolution of the output of the distortion renderer.
- // distortion_mesh_resolution the amount of subdivision in the
- // distortion mesh.
- DistortionRenderer(const CompositeHmd& hmd, vec2i display_size,
- int distortion_mesh_resolution,
- bool flip_texture_horizontally,
- bool flip_texture_vertically, bool separated_eye_buffers,
- bool eds_enabled, bool late_latch_enabled);
- ~DistortionRenderer();
-
- // Returns the distortion factor array for the distortion function that was
- // passed in at creation time. The distortion factor array contains the
- // magnification factor induced by the distortion mesh at every vertex. There
- // is one entry per vertex, and entries are ordered in row-major major. The
- // array contains the magnification for both eyes averaged.
- const std::vector<float>& GetDistortionFactorArray();
-
- // |render_pose_buffer_object| is the per-texture pose array buffer object.
- // |render_buffer_index| is the per-texture index into the pose array buffer
- // object. This selects which pose was rendered into the
- // corresponding texture.
- void DoLateLatch(uint32_t target_vsync_count,
- const uint32_t* render_buffer_index,
- const GLuint* render_pose_buffer_objects,
- const bool* vertical_flip, const bool* separate_eye,
- int num_textures);
-
- // Convenience method that does no flipping.
- void DoLateLatch(uint32_t target_vsync_count,
- const uint32_t* render_buffer_index,
- const GLuint* render_pose_buffer_objects, int num_textures) {
- bool flip[kMaxLayers] = {false};
- bool separate[kMaxLayers] = {separated_eye_buffers_};
- DoLateLatch(target_vsync_count, render_buffer_index,
- render_pose_buffer_objects, flip, separate, num_textures);
- }
-
- void PrepGlState(EyeType eye);
- void ResetGlState(int num_textures);
-
- // Applies distortion correction to the given textures by rendering into the
- // current output target.
- //
- // eye Which eye is being corrected.
- // texture_ids The OpenGL texture IDs of the texture layers.
- // texture_sizes Dimensions of the corresponding textures.
- // vertical_flip Whether to flip each input texture vertically.
- // separate_eye Whether the correspending texture is a separate texture for
- // left and right eyes. If false, it is a shared texture with
- // the left view on the left half and right on the right half.
- // late_latch_layer Which late latch layer index to use for each texture.
- // Typically this is just {0, 1} unless blend_with_previous_layer is used.
- // num_textures Number of textures in texture_ids and texture_sizes.
- // blend_with_previous_layer If enabled, blend this single layer with the
- // existing framebuffer contents.
- void ApplyDistortionCorrectionToTexture(
- EyeType eye, const GLuint* texture_ids, const bool* vertical_flip,
- const bool* separate_eye, const int* late_latch_layer, int num_textures,
- bool blend_with_previous_layer, bool do_gl_state_prep);
-
- // Convenience method that does no flipping.
- void ApplyDistortionCorrectionToTexture(EyeType eye,
- const GLuint* texture_ids,
- int num_textures) {
- bool flip[kMaxLayers] = {false};
- bool separate[kMaxLayers] = {separated_eye_buffers_,
- separated_eye_buffers_};
- int latch_layer[kMaxLayers] = {0, 1};
- ApplyDistortionCorrectionToTexture(eye, texture_ids, flip, separate,
- latch_layer, num_textures, false, true);
- }
-
- // Draw a video quad based on the given video texture by rendering into the
- // current output target.
- //
- // eye Which eye is being corrected.
- // layer_id Which compositor layer the video mesh should be drawn into.
- // texture_ids The OpenGL texture IDs of the texture layers.
- // transform The transformation matrix that transforms the video mesh to its
- // desired eye space position for the target eye.
- void DrawVideoQuad(EyeType eye, int layer_id, GLuint texture_id,
- const mat4& transform);
-
- // Modifies the size of the output display. This is the number of physical
- // pixels per dimension covered by the display on the output device. Calling
- // this method is cheap; it only updates the state table of the two
- // eye-specific mesh nodes.
- void SetDisplaySize(vec2i size);
-
- void SetEdsEnabled(bool enabled);
- void SetChromaticAberrationCorrectionEnabled(bool enabled) {
- chromatic_aberration_correction_enabled_ = enabled;
- }
- void SetUseAlphaVignette(bool enabled) { use_alpha_vignette_ = enabled; }
-
- bool GetLastEdsPose(LateLatchOutput* out_data, int layer_id = 0) const;
-
- private:
- enum ShaderProgramType {
- kNoChromaticAberrationCorrection,
- kNoChromaticAberrationCorrectionTwoLayers,
- kChromaticAberrationCorrection,
- kChromaticAberrationCorrectionTwoLayers,
- kChromaticAberrationCorrectionAlphaVignette,
- kChromaticAberrationCorrectionAlphaVignetteTwoLayers,
- kChromaticAberrationCorrectionWithBlend,
- kSimpleVideoQuad,
- kNumShaderPrograms,
- };
-
- struct EdsShader {
- EdsShader() {}
- ~EdsShader() {
- }
-
- void load(const char* vertex, const char* fragment, int num_layers,
- bool use_alpha_vignette, float rotation, bool flip_vertical,
- bool blend_with_previous_layer);
- void use() { pgm.Use(); }
-
- // Update uTexFromEyeMatrix and uEyeFromViewportMatrix by the distortion
- // renderer with the transform matrix.
- void SetTexFromEyeTransform(const mat4& transform) {
- glUniformMatrix4fv(uTexFromEyeMatrix, 1, false, transform.data());
- }
-
- void SetEyeFromViewportTransform(const mat4& transform) {
- glUniformMatrix4fv(uEyeFromViewportMatrix, 1, false, transform.data());
- }
-
- ShaderProgram pgm;
-
- // Texture variables, named to match shader strings for convenience.
- GLint uProjectionMatrix;
- GLint uTexFromEyeMatrix;
- GLint uEyeFromViewportMatrix;
- GLint uTexXMinMax;
- };
-
- void DrawEye(EyeType eye, const GLuint* texture_ids,
- const bool* vertical_flip, const bool* separate_eye,
- const int* late_latch_layer, int num_textures,
- bool blend_with_previous_layer, bool do_gl_state_prep);
-
- // This function is called when there is an update on Hmd and distortion mesh
- // vertices and factor array will be updated.
- void RecomputeDistortion(const CompositeHmd& hmd);
-
- // Per-eye, per flip, per separate eye mode buffers for setting EDS matrix
- // when EDS is disabled.
- GLuint uTexFromRecommendedViewportMatrix[2][2][2];
-
- // Distortion mesh for the each eye.
- EdsMesh mesh_node_[2];
- // VBO (vertex buffer object) for distortion mesh vertices.
- GLuint mesh_vbo_[2];
- // VAO (vertex array object) for distortion mesh vertex array data.
- GLuint mesh_vao_[2];
- // IBO (index buffer object) for distortion mesh indices.
- GLuint mesh_ibo_[2];
-
- EdsShader shaders_[kNumShaderPrograms];
-
- // Enum to indicate which shader program is being used.
- ShaderProgramType shader_type_;
-
- bool eds_enabled_;
- bool chromatic_aberration_correction_enabled_;
- bool use_alpha_vignette_;
-
- // This keeps track of what distortion mesh resolution we are using currently.
- // When there is an update on Hmd, the distortion mesh vertices/factor array
- // will be re-computed with the old resolution that is stored here.
- int distortion_mesh_resolution_;
-
- // The OpenGL ID of the last texture passed to
- // ApplyDistortionCorrectionToTexture().
- GLuint last_distortion_texture_id_;
-
- // GL texture 2D target for application texture.
- GLint app_texture_target_;
-
- // Precomputed matrices for EDS and viewport transforms.
- mat4 tex_from_eye_matrix_[2][2][2];
- mat4 eye_from_viewport_matrix_[2];
-
- // Eye viewport locations.
- vec2i eye_viewport_origin_[2];
- vec2i eye_viewport_size_;
-
- vec2i display_size_;
-
- std::unique_ptr<LateLatch> late_latch_[kMaxLatchedLayers];
- bool separated_eye_buffers_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_DISTORTION_RENDERER_H_
diff --git a/libs/vr/libeds/include/private/dvr/eds_mesh.h b/libs/vr/libeds/include/private/dvr/eds_mesh.h
deleted file mode 100644
index d2c901e..0000000
--- a/libs/vr/libeds/include/private/dvr/eds_mesh.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef ANDROID_DVR_EDS_MESH_H_
-#define ANDROID_DVR_EDS_MESH_H_
-
-#include <stdint.h>
-#include <functional>
-#include <vector>
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-struct EdsVertex {
- vec2 position;
- vec2 red_viewport_coords;
- vec2 green_viewport_coords;
- vec2 blue_viewport_coords;
-};
-
-struct EdsMesh {
- std::vector<EdsVertex> vertices;
- std::vector<uint16_t> indices;
-};
-
-// Distortion function takes in a point in the range [0..1, 0..1] and returns
-// the vertex position and the three distorted points for separate R, G and B
-// channels.
-typedef std::function<void(EyeType, vec2, vec2*, vec2*)> DistortionFunction;
-
-// Builds a distortion mesh of resolution |resolution| using
-// the distortion provided by |hmd| for |eye|.
-EdsMesh BuildDistortionMesh(EyeType eye, int resolution,
- const DistortionFunction& distortion_function);
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_EDS_MESH_H_
diff --git a/libs/vr/libeds/include/private/dvr/head_mount_metrics.h b/libs/vr/libeds/include/private/dvr/head_mount_metrics.h
deleted file mode 100644
index f3e63a6..0000000
--- a/libs/vr/libeds/include/private/dvr/head_mount_metrics.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef ANDROID_DVR_HEAD_MOUNT_METRICS_H_
-#define ANDROID_DVR_HEAD_MOUNT_METRICS_H_
-
-#include <array>
-
-#include <private/dvr/color_channel_distortion.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-// HeadMountMetrics encapsulates metrics describing a head mount to be used
-// with a display to create a head mounted display.
-class HeadMountMetrics {
- public:
- // The vertical point of the HMD where the lens distance is measured from.
- enum VerticalAlignment { kBottom = 0, kCenter = 1, kTop = 2 };
-
- enum EyeOrientation {
- kCCW0Degrees = 0,
- kCCW90Degrees = 1,
- kCCW180Degrees = 2,
- kCCW270Degrees = 3,
- kCCW0DegreesMirrored = 4,
- kCCW90DegreesMirrored = 5,
- kCCW180DegreesMirrored = 6,
- kCCW270DegreesMirrored = 7,
-
- // Rotations that consist of an odd number of 90 degree rotations will swap
- // the height and width of any bounding boxes/viewports. This bit informs
- // any viewport manipulating code to perform the appropriate transformation.
- kRightAngleBit = 0x01,
- // Viewports are represented as four floating point values (four half
- // angles). Rotating this structure can be done through a shift operation.
- // This mask extracts the rotation portion of the orientation.
- kRotationMask = 0x03,
- // This mask specifies whether the output is mirrored.
- kMirroredBit = 0x04
- };
-
- HeadMountMetrics(
- float inter_lens_distance, float tray_to_lens_distance,
- float virtual_eye_to_screen_distance,
- VerticalAlignment vertical_alignment, const FieldOfView& left_eye_max_fov,
- const FieldOfView& right_eye_max_fov,
- const std::shared_ptr<ColorChannelDistortion>& red_distortion,
- const std::shared_ptr<ColorChannelDistortion>& green_distortion,
- const std::shared_ptr<ColorChannelDistortion>& blue_distortion,
- EyeOrientation left_eye_orientation, EyeOrientation right_eye_orientation,
- float screen_center_to_lens_distance)
- : inter_lens_distance_(inter_lens_distance),
- tray_to_lens_distance_(tray_to_lens_distance),
- virtual_eye_to_screen_distance_(virtual_eye_to_screen_distance),
- screen_center_to_lens_distance_(screen_center_to_lens_distance),
- vertical_alignment_(vertical_alignment),
- eye_max_fov_({{left_eye_max_fov, right_eye_max_fov}}),
- color_channel_distortion_(
- {{red_distortion, green_distortion, blue_distortion}}),
- supports_chromatic_aberration_correction_(true),
- eye_orientation_({{left_eye_orientation, right_eye_orientation}}) {
- // If we're missing the green or blur distortions, assume that we don't
- // correct for chromatic aberration.
- if (!green_distortion || !blue_distortion) {
- color_channel_distortion_[1] = red_distortion;
- color_channel_distortion_[2] = red_distortion;
- supports_chromatic_aberration_correction_ = false;
- }
- }
-
- // Returns the distance in meters between the optical centers of the two
- // lenses.
- float GetInterLensDistance() const { return inter_lens_distance_; }
-
- // Returns the distance in meters from the "tray" upon which the display
- // rests to the optical center of a lens.
- float GetTrayToLensDistance() const { return tray_to_lens_distance_; }
-
- // Returns the distance in meters from the virtual eye to the screen.
- // See http://go/vr-distortion-correction for an explanation of what
- // this distance is.
- float GetVirtualEyeToScreenDistance() const {
- return virtual_eye_to_screen_distance_;
- }
-
- // Returns the horizontal distance from the center of the screen to the center
- // of the lens, in meters.
- float GetScreenCenterToLensDistance() const {
- return screen_center_to_lens_distance_;
- }
-
- // Returns the vertical alignment of the HMD. The tray-to-lens distance
- // is relative to this position. Exception: if the alignment is kCenter,
- // then the offset has no meaning.
- VerticalAlignment GetVerticalAlignment() const { return vertical_alignment_; }
-
- // Returns the given eye's maximum field of view visible through the lens.
- // The actual rendered field of view will be limited by this and also by
- // the size of the screen.
- const FieldOfView& GetEyeMaxFov(EyeType eye) const {
- return eye_max_fov_[eye];
- }
-
- // Returns the ColorChannelDistortion object representing the distortion
- // caused by the lenses for the given color channel.
- const ColorChannelDistortion& GetColorChannelDistortion(
- RgbColorChannel channel) const {
- return *color_channel_distortion_[channel];
- }
-
- bool supports_chromatic_aberration_correction() const {
- return supports_chromatic_aberration_correction_;
- }
-
- EyeOrientation GetEyeOrientation(EyeType eye) const {
- return eye_orientation_[eye];
- }
-
- private:
- float inter_lens_distance_;
- float tray_to_lens_distance_;
- float virtual_eye_to_screen_distance_;
- float screen_center_to_lens_distance_;
- VerticalAlignment vertical_alignment_;
- std::array<FieldOfView, 2> eye_max_fov_;
- std::array<std::shared_ptr<ColorChannelDistortion>, 3>
- color_channel_distortion_;
- bool supports_chromatic_aberration_correction_;
- std::array<EyeOrientation, 2> eye_orientation_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_HEAD_MOUNT_METRICS_H_
diff --git a/libs/vr/libeds/include/private/dvr/identity_distortion.h b/libs/vr/libeds/include/private/dvr/identity_distortion.h
deleted file mode 100644
index b9c5cf6..0000000
--- a/libs/vr/libeds/include/private/dvr/identity_distortion.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef ANDROID_DVR_IDENTITY_DISTORTION_H_
-#define ANDROID_DVR_IDENTITY_DISTORTION_H_
-
-#include <private/dvr/color_channel_distortion.h>
-
-namespace android {
-namespace dvr {
-
-// Provides an identity distortion operation if running the device without any
-// lenses.
-class IdentityDistortion : public ColorChannelDistortion {
- public:
- IdentityDistortion() {}
-
- vec2 Distort(vec2 p) const override { return p; }
-
- vec2 DistortInverse(vec2 p) const override { return p; }
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_IDENTITY_DISTORTION_H_
diff --git a/libs/vr/libeds/include/private/dvr/polynomial_radial_distortion.h b/libs/vr/libeds/include/private/dvr/polynomial_radial_distortion.h
deleted file mode 100644
index 8f080aa..0000000
--- a/libs/vr/libeds/include/private/dvr/polynomial_radial_distortion.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef ANDROID_DVR_POLYNOMIAL_RADIAL_DISTORTION_H_
-#define ANDROID_DVR_POLYNOMIAL_RADIAL_DISTORTION_H_
-
-#include <vector>
-
-#include <private/dvr/color_channel_distortion.h>
-
-namespace android {
-namespace dvr {
-
-// PolynomialRadialDistortion implements a radial distortion based using
-// a set of coefficients describing a polynomial function.
-// See http://en.wikipedia.org/wiki/Distortion_(optics).
-//
-// Unless otherwise stated, the units used in this class are tan-angle units
-// which can be computed as distance on the screen divided by distance from the
-// virtual eye to the screen.
-class PolynomialRadialDistortion : public ColorChannelDistortion {
- public:
- // Construct a PolynomialRadialDistortion with coefficients for
- // the radial distortion equation:
- //
- // p' = p (1 + K1 r^2 + K2 r^4 + ... + Kn r^(2n))
- //
- // where r is the distance in tan-angle units from the optical center,
- // p the input point and p' the output point.
- // The provided vector contains the coefficients for the even monomials
- // in the distortion equation: coefficients[0] is K1, coefficients[1] is K2,
- // etc. Thus the polynomial used for distortion has degree
- // (2 * coefficients.size()).
- explicit PolynomialRadialDistortion(const std::vector<float>& coefficients);
-
- // Given a radius (measuring distance from the optical axis of the lens),
- // returns the distortion factor for that radius.
- float DistortionFactor(float r_squared) const;
-
- // Given a radius (measuring distance from the optical axis of the lens),
- // returns the corresponding distorted radius.
- float DistortRadius(float r) const;
-
- // Given a 2d point p, returns the corresponding distorted point.
- // distance from the virtual eye to the screen. The optical axis
- // of the lens defines the origin for both input and output points.
- vec2 Distort(vec2 p) const override;
-
- // Given a 2d point p, returns the point that would need to be passed to
- // Distort to get point p (approximately).
- vec2 DistortInverse(vec2 p) const override;
-
- // Returns the distortion coefficients.
- const std::vector<float>& GetCoefficients() const;
-
- private:
- std::vector<float> coefficients_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_POLYNOMIAL_RADIAL_DISTORTION_H_
diff --git a/libs/vr/libeds/include/private/dvr/raw_pose.h b/libs/vr/libeds/include/private/dvr/raw_pose.h
deleted file mode 100644
index 7058f1a..0000000
--- a/libs/vr/libeds/include/private/dvr/raw_pose.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef ANDROID_DVR_RAW_POSE_H_
-#define ANDROID_DVR_RAW_POSE_H_
-
-#include <atomic>
-
-namespace android {
-namespace dvr {
-
-// POD raw data of a head pose with a count field for read consistency checking.
-// Warning: The layout of this struct and RawPosePair are specific to match the
-// corresponding buffer type in the shader in late_latch.cpp.
-struct RawPose {
- void Reset(uint32_t new_count) volatile {
- qx = qy = qz = 0.0f;
- qw = 1.0f;
- px = py = pz = 0.0f;
- count = new_count;
- }
-
- float qx, qy, qz, qw;
- float px, py, pz;
- std::atomic<uint32_t> count;
-};
-
-// RawPosePair is used for lock-free writing at about 1khz by the CPU/DSP
-// and reading by the GPU. At creation time, pose1 is given count = 1 and
-// pose2 is given count = 2.
-//
-// The lock-free write pattern is:
-// - write to pose with least count.
-// - memory write barrier.
-// - write count = count + 2.
-//
-// For reads, there is an important assumption about the GPU: it generally
-// processes things contiguously, without arbitrary preemptions that save and
-// restore full cache states. In other words, if the GPU is preempted and then
-// later resumed, any data that was read from memory before the preemption will
-// be re-read from memory after resume. This allows the following read trick to
-// work:
-// - read the full RawPosePair into a shader.
-// - select the pose with the newest count.
-//
-// The older pose may be partially written by the async stores from CPU/DSP, but
-// because of the memory barrier and GPU characteristics, the highest count pose
-// should always be a fully consistent RawPose.
-struct RawPosePair {
- RawPose pose1;
- RawPose pose2;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_RAW_POSE_H_
diff --git a/libs/vr/libeds/include/private/dvr/render_texture_params.h b/libs/vr/libeds/include/private/dvr/render_texture_params.h
deleted file mode 100644
index 71aebef..0000000
--- a/libs/vr/libeds/include/private/dvr/render_texture_params.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef ANDROID_DVR_RENDER_TEXTURE_PARAMS_H_
-#define ANDROID_DVR_RENDER_TEXTURE_PARAMS_H_
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-// Encapsulates information about the render texture, includes the size
-// of the render texture, and the left/right viewport which define the
-// portion each eye is rendering onto. This struct will be passed to
-// PresentFrame every frame before the client actually drawing the scene.
-struct RenderTextureParams {
- RenderTextureParams() {}
-
- RenderTextureParams(vec2i target_texture_size,
- const Range2i& eye_viewport_bounds_left,
- const Range2i& eye_viewport_bounds_right,
- const FieldOfView& eye_fov_left,
- const FieldOfView& eye_fov_right)
- : texture_size(target_texture_size) {
- eye_viewport_bounds[kLeftEye] = eye_viewport_bounds_left;
- eye_viewport_bounds[kRightEye] = eye_viewport_bounds_right;
- eye_fov[kLeftEye] = eye_fov_left;
- eye_fov[kRightEye] = eye_fov_right;
- }
-
- explicit RenderTextureParams(vec2i target_texture_size,
- const FieldOfView& eye_fov_left,
- const FieldOfView& eye_fov_right) {
- texture_size = target_texture_size;
- eye_viewport_bounds[0] = Range2i::FromSize(
- vec2i(0, 0), vec2i(texture_size[0] / 2, texture_size[1]));
- eye_viewport_bounds[1] =
- Range2i::FromSize(vec2i(texture_size[0] / 2, 0),
- vec2i(texture_size[0] / 2, texture_size[1]));
-
- eye_fov[kLeftEye] = eye_fov_left;
- eye_fov[kRightEye] = eye_fov_right;
- }
-
- // The render texture size.
- vec2i texture_size;
-
- // The viewport bounds on the render texture for each eye.
- Range2i eye_viewport_bounds[2];
-
- // The field of view for each eye in degrees.
- FieldOfView eye_fov[2];
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_RENDER_TEXTURE_PARAMS_H_
diff --git a/libs/vr/libeds/polynomial_radial_distortion.cpp b/libs/vr/libeds/polynomial_radial_distortion.cpp
deleted file mode 100644
index fa01bb4..0000000
--- a/libs/vr/libeds/polynomial_radial_distortion.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-#include "include/private/dvr/polynomial_radial_distortion.h"
-
-namespace android {
-namespace dvr {
-
-PolynomialRadialDistortion::PolynomialRadialDistortion(
- const std::vector<float>& coefficients)
- : coefficients_(coefficients) {}
-
-float PolynomialRadialDistortion::DistortionFactor(float r_squared) const {
- float r_factor = 1.0f;
- float distortion_factor = 1.0f;
-
- for (float ki : coefficients_) {
- r_factor *= r_squared;
- distortion_factor += ki * r_factor;
- }
-
- return distortion_factor;
-}
-
-float PolynomialRadialDistortion::DistortRadius(float r) const {
- return r * DistortionFactor(r * r);
-}
-
-vec2 PolynomialRadialDistortion::Distort(vec2 p) const {
- return p * DistortionFactor(p.squaredNorm());
-}
-
-vec2 PolynomialRadialDistortion::DistortInverse(vec2 p) const {
- // Secant method.
- const float radius = p.norm();
- float r0 = radius / 0.9f;
- float r1 = radius * 0.9f;
- float r2;
- float dr0 = radius - DistortRadius(r0);
- float dr1;
- while (fabsf(r1 - r0) > 0.0001f /** 0.1mm */) {
- dr1 = radius - DistortRadius(r1);
- r2 = r1 - dr1 * ((r1 - r0) / (dr1 - dr0));
- r0 = r1;
- r1 = r2;
- dr0 = dr1;
- }
- return (r1 / radius) * p;
-}
-
-const std::vector<float>& PolynomialRadialDistortion::GetCoefficients() const {
- return coefficients_;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/libs/vr/libeds/tests/eds_app_tests.cpp b/libs/vr/libeds/tests/eds_app_tests.cpp
deleted file mode 100644
index 549d864..0000000
--- a/libs/vr/libeds/tests/eds_app_tests.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-#include <EGL/egl.h>
-#include <GLES2/gl2.h>
-
-#include <dvr/graphics.h>
-#include <dvr/pose_client.h>
-#include <gtest/gtest.h>
-#include <private/dvr/graphics/shader_program.h>
-#include <private/dvr/types.h>
-
-namespace {
-
-#define POSE_BINDING 0
-
-#ifndef STRINGIFY
-#define STRINGIFY2(s) #s
-#define STRINGIFY(s) STRINGIFY2(s)
-#endif
-
-static const char g_vert_shader[] =
- "layout(binding = " STRINGIFY(POSE_BINDING) ", std140)\n"
- "uniform LateLatchData {\n"
- " mat4 uViewProjection;\n"
- "};\n"
- "void main() {\n"
- " vec2 verts[4];\n"
- " verts[0] = vec2(-1, -1);\n"
- " verts[1] = vec2(-1, 1);\n"
- " verts[2] = vec2(1, -1);\n"
- " verts[3] = vec2(1, 1);\n"
- " gl_Position = uViewProjection * vec4(verts[gl_VertexID], 0.0, 1.0);\n"
- "}\n";
-
-static const char g_frag_shader[] =
- "precision mediump float;\n"
- "out vec4 outColor;\n"
- "void main() {\n"
- " outColor = vec4(1.0);\n"
- "}\n";
-
-DvrGraphicsContext* CreateContext(int* surface_width, int* surface_height) {
- DvrGraphicsContext* context = nullptr;
- int display_width = 0, display_height = 0;
- float inter_lens_meters = 0.0f;
- float left_fov[4] = {0.0f};
- float right_fov[4] = {0.0f};
- int disable_warp = 0;
- int enable_late_latch = 1;
- DvrSurfaceParameter surface_params[] = {
- DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
- DVR_SURFACE_PARAMETER_IN(ENABLE_LATE_LATCH, enable_late_latch),
- DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
- DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
- DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, surface_width),
- DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, surface_height),
- DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
- DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
- DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
- DVR_SURFACE_PARAMETER_LIST_END,
- };
- dvrGraphicsContextCreate(surface_params, &context);
- return context;
-}
-
-} // namespace
-
-TEST(SensorAppTests, EdsWithLateLatch) {
- int surface_width = 0, surface_height = 0;
- DvrGraphicsContext* context = CreateContext(&surface_width, &surface_height);
- ASSERT_NE(nullptr, context);
-
- android::dvr::ShaderProgram shader(g_vert_shader, g_frag_shader);
-
- for (int i = 0; i < 5; ++i) {
- DvrFrameSchedule schedule;
- dvrGraphicsWaitNextFrame(context, 0, &schedule);
-
- const auto ident_mat = android::dvr::mat4::Identity();
- const float* ident_mats[] = { ident_mat.data(), ident_mat.data() };
- GLuint late_latch_buffer_id = 0;
- int ret = dvrBeginRenderFrameLateLatch(context, 0, schedule.vsync_count, 2,
- ident_mats, ident_mats, ident_mats,
- &late_latch_buffer_id);
- EXPECT_EQ(0, ret);
- for (int eye = 0; eye < 2; ++eye) {
- if (eye == 0)
- glViewport(0, 0, surface_width / 2, surface_height);
- else
- glViewport(surface_width / 2, 0, surface_width / 2, surface_height);
-
- glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
- shader.Use();
-
- // Bind late latch pose matrix buffer.
- glBindBufferRange(
- GL_UNIFORM_BUFFER, POSE_BINDING, late_latch_buffer_id,
- offsetof(DvrGraphicsLateLatchData, view_proj_matrix[eye]),
- 16 * sizeof(float));
-
- // TODO(jbates): use transform feedback here to grab the vertex output
- // and verify that it received late-latch pose data. Combine this with
- // mocked pose data to verify that late-latching is working.
- glDrawArrays(GL_POINTS, 0, 4);
- }
- dvrPresent(context);
- }
-
- glFinish();
- dvrGraphicsContextDestroy(context);
-}
-
-TEST(SensorAppTests, EdsWithoutLateLatch) {
- int surface_width = 0, surface_height = 0;
- DvrGraphicsContext* context = CreateContext(&surface_width, &surface_height);
- ASSERT_NE(nullptr, context);
- DvrPose* client = dvrPoseCreate();
- ASSERT_NE(nullptr, client);
-
- for (int i = 0; i < 5; ++i) {
- DvrFrameSchedule schedule;
- dvrGraphicsWaitNextFrame(context, 0, &schedule);
- DvrPoseAsync pose;
- int ret = dvrPoseGet(client, schedule.vsync_count, &pose);
- ASSERT_EQ(0, ret);
-
- dvrBeginRenderFrameEds(context, pose.orientation, pose.translation);
- for (int eye = 0; eye < 2; ++eye) {
- if (eye == 0)
- glViewport(0, 0, surface_width / 2, surface_height);
- else
- glViewport(surface_width / 2, 0, surface_width / 2, surface_height);
-
- glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
- EXPECT_EQ(0, ret);
- }
- dvrPresent(context);
- }
-
- dvrPoseDestroy(client);
- dvrGraphicsContextDestroy(context);
-}
diff --git a/libs/vr/libvrflinger/Android.bp b/libs/vr/libvrflinger/Android.bp
index dc81c60..6218e8e 100644
--- a/libs/vr/libvrflinger/Android.bp
+++ b/libs/vr/libvrflinger/Android.bp
@@ -30,7 +30,6 @@
"libhwcomposer-command-buffer",
"libbufferhub",
"libbufferhubqueue",
- "libeds",
"libdisplay",
"libdvrcommon",
"libdvrgraphics",
diff --git a/libs/vr/libvrflinger/hardware_composer.cpp b/libs/vr/libvrflinger/hardware_composer.cpp
index 079d6fd..4c08284 100644
--- a/libs/vr/libvrflinger/hardware_composer.cpp
+++ b/libs/vr/libvrflinger/hardware_composer.cpp
@@ -49,10 +49,6 @@
// Offset before vsync to submit frames to hardware composer.
constexpr int64_t kFramePostOffsetNs = 4000000; // 4ms
-constexpr size_t kDefaultDisplayConfigCount = 32;
-
-constexpr float kMetersPerInch = 0.0254f;
-
const char kBacklightBrightnessSysFile[] =
"/sys/class/leds/lcd-backlight/brightness";
@@ -65,23 +61,6 @@
const char kRightEyeOffsetProperty[] = "dvr.right_eye_offset_ns";
-// Returns our best guess for the time the compositor will spend rendering the
-// next frame.
-int64_t GuessFrameTime(int compositor_visible_layer_count) {
- // The cost of asynchronous EDS and lens warp is currently measured at 2.5ms
- // for one layer and 7ms for two layers, but guess a higher frame time to
- // account for CPU overhead. This guess is only used before we've measured the
- // actual time to render a frame for the current compositor configuration.
- switch (compositor_visible_layer_count) {
- case 0:
- return 500000; // .5ms
- case 1:
- return 5000000; // 5ms
- default:
- return 10500000; // 10.5ms
- }
-}
-
// Get time offset from a vsync to when the pose for that vsync should be
// predicted out to. For example, if scanout gets halfway through the frame
// at the halfway point between vsyncs, then this could be half the period.
@@ -241,14 +220,6 @@
}
void HardwareComposer::OnPostThreadResumed() {
- constexpr int format = HAL_PIXEL_FORMAT_RGBA_8888;
- constexpr int usage =
- GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER;
-
- framebuffer_target_ = std::make_shared<IonBuffer>(
- native_display_metrics_.width, native_display_metrics_.height, format,
- usage);
-
hwc2_hidl_->resetCommands();
// Connect to pose service.
@@ -275,7 +246,6 @@
}
void HardwareComposer::OnPostThreadPaused() {
- framebuffer_target_.reset();
retire_fence_fds_.clear();
display_surfaces_.clear();
diff --git a/libs/vr/libvrflinger/hardware_composer.h b/libs/vr/libvrflinger/hardware_composer.h
index 5a75f42..20327a3 100644
--- a/libs/vr/libvrflinger/hardware_composer.h
+++ b/libs/vr/libvrflinger/hardware_composer.h
@@ -383,9 +383,6 @@
// Transform required to get from native to logical display orientation.
HWC::Transform display_transform_ = HWC::Transform::None;
- // Buffer for the background layer required by hardware composer.
- std::shared_ptr<IonBuffer> framebuffer_target_;
-
// Pending surface list. Set by the display service when DirectSurfaces are
// added, removed, or change visibility. Written by the message dispatch
// thread and read by the post thread.
diff --git a/libs/vr/libvrsensor/Android.bp b/libs/vr/libvrsensor/Android.bp
index d59182e..3588b5e 100644
--- a/libs/vr/libvrsensor/Android.bp
+++ b/libs/vr/libvrsensor/Android.bp
@@ -66,7 +66,6 @@
"libgmock_main",
"libgmock",
"libdisplay",
- "libeds",
"libvrsensor",
"libdvrgraphics",
] + staticLibraries,
diff --git a/opengl/libs/EGL/eglApi.cpp b/opengl/libs/EGL/eglApi.cpp
index 9de15d0..10f4e66 100644
--- a/opengl/libs/EGL/eglApi.cpp
+++ b/opengl/libs/EGL/eglApi.cpp
@@ -829,6 +829,42 @@
egl_tls_t::setContext(EGL_NO_CONTEXT);
}
} else {
+ // Force return to current context for drivers that cannot handle errors
+ EGLBoolean restore_result = EGL_FALSE;
+
+ // get a reference to the old current objects
+ ContextRef _c2(dp.get(), cur_c);
+ SurfaceRef _d2(dp.get(), cur_c->draw);
+ SurfaceRef _r2(dp.get(), cur_c->read);
+
+ if (cur_c == NULL) {
+ restore_result = dp->makeCurrent(c, cur_c,
+ EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT,
+ EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ } else {
+ c = cur_c;
+ impl_ctx = c->context;
+ impl_draw = EGL_NO_SURFACE;
+ if (cur_c->draw != EGL_NO_SURFACE) {
+ d = get_surface(cur_c->draw);
+ impl_draw = d->surface;
+ }
+ impl_read = EGL_NO_SURFACE;
+ if (cur_c->read != EGL_NO_SURFACE) {
+ r = get_surface(cur_c->read);
+ impl_read = r->surface;
+ }
+ restore_result = dp->makeCurrent(c, cur_c,
+ cur_c->draw, cur_c->read, cur_c->context,
+ impl_draw, impl_read, impl_ctx);
+ }
+ if (restore_result == EGL_TRUE) {
+ _c2.acquire();
+ _r2.acquire();
+ _d2.acquire();
+ } else {
+ ALOGE("Could not restore original EGL context");
+ }
// this will ALOGE the error
egl_connection_t* const cnx = &gEGLImpl;
result = setError(cnx->egl.eglGetError(), (EGLBoolean)EGL_FALSE);
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index c5bbeee..d60768c 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -1250,7 +1250,7 @@
}
// Check maximum delay for the sensor.
- nsecs_t maxDelayNs = sensor->getSensor().getMaxDelay() * 1000;
+ nsecs_t maxDelayNs = sensor->getSensor().getMaxDelay() * 1000LL;
if (maxDelayNs > 0 && (samplingPeriodNs > maxDelayNs)) {
samplingPeriodNs = maxDelayNs;
}
@@ -1511,4 +1511,3 @@
}
}; // namespace android
-
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index d33d370..06a0765 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -697,7 +697,16 @@
mName.string(), z, to_string(error).c_str(),
static_cast<int32_t>(error));
- error = hwcLayer->setInfo(s.type, s.appId);
+ int type = s.type;
+ int appId = s.appId;
+ sp<Layer> parent = mParent.promote();
+ if (parent.get()) {
+ auto& parentState = parent->getDrawingState();
+ type = parentState.type;
+ appId = parentState.appId;
+ }
+
+ error = hwcLayer->setInfo(type, appId);
ALOGE_IF(error != HWC2::Error::None, "[%s] Failed to set info (%d)",
mName.string(), static_cast<int32_t>(error));
#else
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 45cac6f..29a8292 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1116,9 +1116,14 @@
void SurfaceFlinger::resyncWithRateLimit() {
static constexpr nsecs_t kIgnoreDelay = ms2ns(500);
- if (systemTime() - mLastSwapTime > kIgnoreDelay) {
+
+ // No explicit locking is needed here since EventThread holds a lock while calling this method
+ static nsecs_t sLastResyncAttempted = 0;
+ const nsecs_t now = systemTime();
+ if (now - sLastResyncAttempted > kIgnoreDelay) {
resyncToHardwareVsync(false);
}
+ sLastResyncAttempted = now;
}
void SurfaceFlinger::onVSyncReceived(HWComposer* composer, int32_t type,
@@ -1262,7 +1267,6 @@
}
}
mDisplays.clear();
- initializeDisplays();
}
void SurfaceFlinger::updateVrFlinger() {
@@ -1309,6 +1313,12 @@
// parts of this class rely on the primary display always being available.
createDefaultDisplayDevice();
+ // Reset the timing values to account for the period of the swapped in HWC
+ const auto& activeConfig = mHwc->getActiveConfig(HWC_DISPLAY_PRIMARY);
+ const nsecs_t period = activeConfig->getVsyncPeriod();
+ mAnimFrameTracker.setDisplayRefreshPeriod(period);
+ setCompositorTimingSnapped(0, period, 0);
+
android_atomic_or(1, &mRepaintEverything);
setTransactionFlags(eDisplayTransactionNeeded);
}
diff --git a/services/vr/bufferhubd/buffer_hub.cpp b/services/vr/bufferhubd/buffer_hub.cpp
index debcc73..d27f274 100644
--- a/services/vr/bufferhubd/buffer_hub.cpp
+++ b/services/vr/bufferhubd/buffer_hub.cpp
@@ -73,7 +73,7 @@
} else {
std::string dimensions = std::to_string(info.width) + "x" +
std::to_string(info.height) + "x" +
- std::to_string(info.slice_count);
+ std::to_string(info.layer_count);
stream << std::setw(14) << dimensions;
}
stream << " ";
@@ -120,7 +120,7 @@
} else {
std::string dimensions = std::to_string(info.width) + "x" +
std::to_string(info.height) + "x" +
- std::to_string(info.slice_count);
+ std::to_string(info.layer_count);
stream << std::setw(14) << dimensions;
}
stream << " ";
@@ -242,15 +242,13 @@
Status<void> BufferHubService::OnCreateBuffer(Message& message, uint32_t width,
uint32_t height, uint32_t format,
uint64_t usage,
- size_t meta_size_bytes,
- size_t slice_count) {
+ size_t meta_size_bytes) {
// Use the producer channel id as the global buffer id.
const int buffer_id = message.GetChannelId();
ALOGD_IF(TRACE,
"BufferHubService::OnCreateBuffer: buffer_id=%d width=%u height=%u "
- "format=%u usage=%" PRIx64 " meta_size_bytes=%zu slice_count=%zu",
- buffer_id, width, height, format, usage, meta_size_bytes,
- slice_count);
+ "format=%u usage=%" PRIx64 " meta_size_bytes=%zu",
+ buffer_id, width, height, format, usage, meta_size_bytes);
// See if this channel is already attached to a buffer.
if (const auto channel = message.GetChannel<BufferHubChannel>()) {
@@ -258,9 +256,10 @@
buffer_id);
return ErrorStatus(EALREADY);
}
-
- auto status = ProducerChannel::Create(this, buffer_id, width, height, format,
- usage, meta_size_bytes, slice_count);
+ const uint32_t kDefaultLayerCount = 1;
+ auto status = ProducerChannel::Create(this, buffer_id, width, height,
+ kDefaultLayerCount, format, usage,
+ meta_size_bytes);
if (status) {
message.SetChannel(status.take());
return {};
@@ -274,14 +273,15 @@
Status<void> BufferHubService::OnCreatePersistentBuffer(
Message& message, const std::string& name, int user_id, int group_id,
uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
- size_t meta_size_bytes, size_t slice_count) {
+ size_t meta_size_bytes) {
+ const uint32_t kDefaultLayerCount = 1;
const int channel_id = message.GetChannelId();
ALOGD_IF(TRACE,
"BufferHubService::OnCreatePersistentBuffer: channel_id=%d name=%s "
"user_id=%d group_id=%d width=%u height=%u format=%u "
- "usage=%" PRIx64 " meta_size_bytes=%zu slice_count=%zu",
+ "usage=%" PRIx64 " meta_size_bytes=%zu",
channel_id, name.c_str(), user_id, group_id, width, height, format,
- usage, meta_size_bytes, slice_count);
+ usage, meta_size_bytes);
// See if this channel is already attached to a buffer.
if (const auto channel = message.GetChannel<BufferHubChannel>()) {
@@ -302,8 +302,8 @@
"not have permission to access named buffer: name=%s euid=%d egid=%d",
name.c_str(), euid, euid);
return ErrorStatus(EPERM);
- } else if (!buffer->CheckParameters(width, height, format, usage,
- meta_size_bytes, slice_count)) {
+ } else if (!buffer->CheckParameters(width, height, kDefaultLayerCount,
+ format, usage, meta_size_bytes)) {
ALOGE(
"BufferHubService::OnCreatePersistentBuffer: Requested an existing "
"buffer with different parameters: name=%s",
@@ -321,9 +321,9 @@
return {};
}
} else {
- auto status =
- ProducerChannel::Create(this, channel_id, width, height, format, usage,
- meta_size_bytes, slice_count);
+ auto status = ProducerChannel::Create(this, channel_id, width, height,
+ kDefaultLayerCount, format, usage,
+ meta_size_bytes);
if (!status) {
ALOGE("BufferHubService::OnCreateBuffer: Failed to create producer!!");
return status.error_status();
diff --git a/services/vr/bufferhubd/buffer_hub.h b/services/vr/bufferhubd/buffer_hub.h
index 817b01e..3bc2635 100644
--- a/services/vr/bufferhubd/buffer_hub.h
+++ b/services/vr/bufferhubd/buffer_hub.h
@@ -50,9 +50,9 @@
// Data field for buffer producer.
uint32_t width = 0;
uint32_t height = 0;
+ uint32_t layer_count = 0;
uint32_t format = 0;
uint64_t usage = 0;
- size_t slice_count = 0;
std::string name;
// Data filed for producer queue.
@@ -60,16 +60,15 @@
UsagePolicy usage_policy{0, 0, 0, 0};
BufferInfo(int id, size_t consumer_count, uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage, size_t slice_count,
- const std::string& name)
+ uint32_t layer_count, uint32_t format, uint64_t usage, const std::string& name)
: id(id),
type(kProducerType),
consumer_count(consumer_count),
width(width),
height(height),
+ layer_count(layer_count),
format(format),
usage(usage),
- slice_count(slice_count),
name(name) {}
BufferInfo(int id, size_t consumer_count, size_t capacity,
@@ -158,12 +157,13 @@
pdx::Status<void> OnCreateBuffer(pdx::Message& message, uint32_t width,
uint32_t height, uint32_t format,
- uint64_t usage, size_t meta_size_bytes,
- size_t slice_count);
- pdx::Status<void> OnCreatePersistentBuffer(
- pdx::Message& message, const std::string& name, int user_id, int group_id,
- uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
- size_t meta_size_bytes, size_t slice_count);
+ uint64_t usage, size_t meta_size_bytes);
+ pdx::Status<void> OnCreatePersistentBuffer(pdx::Message& message,
+ const std::string& name,
+ int user_id, int group_id,
+ uint32_t width, uint32_t height,
+ uint32_t format, uint64_t usage,
+ size_t meta_size_bytes);
pdx::Status<void> OnGetPersistentBuffer(pdx::Message& message,
const std::string& name);
pdx::Status<QueueInfo> OnCreateProducerQueue(pdx::Message& message,
diff --git a/services/vr/bufferhubd/consumer_channel.cpp b/services/vr/bufferhubd/consumer_channel.cpp
index 311f5c6..08b2790 100644
--- a/services/vr/bufferhubd/consumer_channel.cpp
+++ b/services/vr/bufferhubd/consumer_channel.cpp
@@ -75,11 +75,6 @@
*producer, &ProducerChannel::OnGetBuffer, message);
return true;
- case BufferHubRPC::GetBuffers::Opcode:
- DispatchRemoteMethod<BufferHubRPC::GetBuffers>(
- *producer, &ProducerChannel::OnGetBuffers, message);
- return true;
-
case BufferHubRPC::NewConsumer::Opcode:
DispatchRemoteMethod<BufferHubRPC::NewConsumer>(
*producer, &ProducerChannel::OnNewConsumer, message);
diff --git a/services/vr/bufferhubd/producer_channel.cpp b/services/vr/bufferhubd/producer_channel.cpp
index 398aa12..b9984a0 100644
--- a/services/vr/bufferhubd/producer_channel.cpp
+++ b/services/vr/bufferhubd/producer_channel.cpp
@@ -26,23 +26,20 @@
ProducerChannel::ProducerChannel(BufferHubService* service, int channel_id,
uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage,
- size_t meta_size_bytes, size_t slice_count,
+ uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t meta_size_bytes,
int* error)
: BufferHubChannel(service, channel_id, channel_id, kProducerType),
pending_consumers_(0),
- slices_(std::max(static_cast<size_t>(1), slice_count)),
producer_owns_(true),
meta_size_bytes_(meta_size_bytes),
meta_(meta_size_bytes ? new uint8_t[meta_size_bytes] : nullptr) {
- for (auto& ion_buffer : slices_) {
- const int ret = ion_buffer.Alloc(width, height, format, usage);
- if (ret < 0) {
- ALOGE("ProducerChannel::ProducerChannel: Failed to allocate buffer: %s",
- strerror(-ret));
- *error = ret;
- return;
- }
+ const int ret = buffer_.Alloc(width, height, layer_count, format, usage);
+ if (ret < 0) {
+ ALOGE("ProducerChannel::ProducerChannel: Failed to allocate buffer: %s",
+ strerror(-ret));
+ *error = ret;
+ return;
}
// Success.
@@ -51,12 +48,12 @@
Status<std::shared_ptr<ProducerChannel>> ProducerChannel::Create(
BufferHubService* service, int channel_id, uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage, size_t meta_size_bytes,
- size_t slice_count) {
+ uint32_t layer_count, uint32_t format, uint64_t usage,
+ size_t meta_size_bytes) {
int error;
std::shared_ptr<ProducerChannel> producer(
- new ProducerChannel(service, channel_id, width, height, format, usage,
- meta_size_bytes, slice_count, &error));
+ new ProducerChannel(service, channel_id, width, height, layer_count,
+ format, usage, meta_size_bytes, &error));
if (error < 0)
return ErrorStatus(-error);
else
@@ -72,9 +69,9 @@
}
BufferHubChannel::BufferInfo ProducerChannel::GetBufferInfo() const {
- return BufferInfo(buffer_id(), consumer_channels_.size(), slices_[0].width(),
- slices_[0].height(), slices_[0].format(),
- slices_[0].usage(), slices_.size(), name_);
+ return BufferInfo(buffer_id(), consumer_channels_.size(), buffer_.width(),
+ buffer_.height(), buffer_.layer_count(), buffer_.format(),
+ buffer_.usage(), name_);
}
void ProducerChannel::HandleImpulse(Message& message) {
@@ -94,11 +91,6 @@
*this, &ProducerChannel::OnGetBuffer, message);
return true;
- case BufferHubRPC::GetBuffers::Opcode:
- DispatchRemoteMethod<BufferHubRPC::GetBuffers>(
- *this, &ProducerChannel::OnGetBuffers, message);
- return true;
-
case BufferHubRPC::NewConsumer::Opcode:
DispatchRemoteMethod<BufferHubRPC::NewConsumer>(
*this, &ProducerChannel::OnNewConsumer, message);
@@ -130,24 +122,10 @@
}
Status<NativeBufferHandle<BorrowedHandle>> ProducerChannel::OnGetBuffer(
- Message& message, unsigned index) {
+ Message& message) {
ATRACE_NAME("ProducerChannel::OnGetBuffer");
ALOGD_IF(TRACE, "ProducerChannel::OnGetBuffer: buffer=%d", buffer_id());
- if (index < slices_.size()) {
- return {NativeBufferHandle<BorrowedHandle>(slices_[index], buffer_id())};
- } else {
- return ErrorStatus(EINVAL);
- }
-}
-
-Status<std::vector<NativeBufferHandle<BorrowedHandle>>>
-ProducerChannel::OnGetBuffers(Message&) {
- ATRACE_NAME("ProducerChannel::OnGetBuffers");
- ALOGD_IF(TRACE, "ProducerChannel::OnGetBuffers: buffer_id=%d", buffer_id());
- std::vector<NativeBufferHandle<BorrowedHandle>> buffer_handles;
- for (const auto& buffer : slices_)
- buffer_handles.emplace_back(buffer, buffer_id());
- return {std::move(buffer_handles)};
+ return {NativeBufferHandle<BorrowedHandle>(buffer_, buffer_id())};
}
Status<RemoteChannelHandle> ProducerChannel::CreateConsumer(Message& message) {
@@ -371,12 +349,11 @@
// Returns true if the given parameters match the underlying buffer parameters.
bool ProducerChannel::CheckParameters(uint32_t width, uint32_t height,
- uint32_t format, uint64_t usage,
- size_t meta_size_bytes,
- size_t slice_count) {
- return slices_.size() == slice_count && meta_size_bytes == meta_size_bytes_ &&
- slices_[0].width() == width && slices_[0].height() == height &&
- slices_[0].format() == format && slices_[0].usage() == usage;
+ uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t meta_size_bytes) {
+ return meta_size_bytes == meta_size_bytes_ && buffer_.width() == width &&
+ buffer_.height() == height && buffer_.layer_count() == layer_count &&
+ buffer_.format() == format && buffer_.usage() == usage;
}
} // namespace dvr
diff --git a/services/vr/bufferhubd/producer_channel.h b/services/vr/bufferhubd/producer_channel.h
index 6de619d..5ada478 100644
--- a/services/vr/bufferhubd/producer_channel.h
+++ b/services/vr/bufferhubd/producer_channel.h
@@ -32,8 +32,8 @@
static pdx::Status<std::shared_ptr<ProducerChannel>> Create(
BufferHubService* service, int channel_id, uint32_t width,
- uint32_t height, uint32_t format, uint64_t usage, size_t meta_size_bytes,
- size_t slice_count);
+ uint32_t height, uint32_t layer_count, uint32_t format, uint64_t usage,
+ size_t meta_size_bytes);
~ProducerChannel() override;
@@ -42,10 +42,7 @@
BufferInfo GetBufferInfo() const override;
- pdx::Status<NativeBufferHandle<BorrowedHandle>> OnGetBuffer(Message& message,
- unsigned index);
- pdx::Status<std::vector<NativeBufferHandle<BorrowedHandle>>> OnGetBuffers(
- Message& message);
+ pdx::Status<NativeBufferHandle<BorrowedHandle>> OnGetBuffer(Message& message);
pdx::Status<RemoteChannelHandle> CreateConsumer(Message& message);
pdx::Status<RemoteChannelHandle> OnNewConsumer(Message& message);
@@ -61,9 +58,8 @@
void RemoveConsumer(ConsumerChannel* channel);
bool CheckAccess(int euid, int egid);
- bool CheckParameters(uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t meta_size_bytes,
- size_t slice_count);
+ bool CheckParameters(uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage, size_t meta_size_bytes);
pdx::Status<void> OnProducerMakePersistent(Message& message,
const std::string& name,
@@ -76,7 +72,7 @@
// zero then the producer can re-acquire ownership.
int pending_consumers_;
- std::vector<IonBuffer> slices_;
+ IonBuffer buffer_;
bool producer_owns_;
LocalFence post_fence_;
@@ -95,8 +91,8 @@
std::string name_;
ProducerChannel(BufferHubService* service, int channel, uint32_t width,
- uint32_t height, uint32_t format, uint64_t usage,
- size_t meta_size_bytes, size_t slice_count, int* error);
+ uint32_t height, uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t meta_size_bytes, int* error);
pdx::Status<void> OnProducerPost(
Message& message, LocalFence acquire_fence,
diff --git a/services/vr/bufferhubd/producer_queue_channel.cpp b/services/vr/bufferhubd/producer_queue_channel.cpp
index 843277e..886e621 100644
--- a/services/vr/bufferhubd/producer_queue_channel.cpp
+++ b/services/vr/bufferhubd/producer_queue_channel.cpp
@@ -39,14 +39,12 @@
const UsagePolicy& usage_policy) {
// Configuration between |usage_deny_set_mask| and |usage_deny_clear_mask|
// should be mutually exclusive.
- if ((usage_policy.usage_deny_set_mask &
- usage_policy.usage_deny_clear_mask)) {
+ if ((usage_policy.usage_deny_set_mask & usage_policy.usage_deny_clear_mask)) {
ALOGE(
"BufferHubService::OnCreateProducerQueue: illegal usage mask "
"configuration: usage_deny_set_mask=%" PRIx64
" usage_deny_clear_mask=%" PRIx64,
- usage_policy.usage_deny_set_mask,
- usage_policy.usage_deny_clear_mask);
+ usage_policy.usage_deny_set_mask, usage_policy.usage_deny_clear_mask);
return ErrorStatus(EINVAL);
}
@@ -141,8 +139,8 @@
Status<std::vector<std::pair<RemoteChannelHandle, size_t>>>
ProducerQueueChannel::OnProducerQueueAllocateBuffers(
- Message& message, uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count, size_t buffer_count) {
+ Message& message, uint32_t width, uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage, size_t buffer_count) {
ATRACE_NAME("ProducerQueueChannel::OnProducerQueueAllocateBuffers");
ALOGD_IF(TRACE,
"ProducerQueueChannel::OnProducerQueueAllocateBuffers: "
@@ -176,8 +174,8 @@
(usage & ~usage_policy_.usage_clear_mask) | usage_policy_.usage_set_mask;
for (size_t i = 0; i < buffer_count; i++) {
- auto status = AllocateBuffer(message, width, height, format,
- effective_usage, slice_count);
+ auto status = AllocateBuffer(message, width, height, layer_count, format,
+ effective_usage);
if (!status) {
ALOGE(
"ProducerQueueChannel::OnProducerQueueAllocateBuffers: Failed to "
@@ -192,8 +190,8 @@
Status<std::pair<RemoteChannelHandle, size_t>>
ProducerQueueChannel::AllocateBuffer(Message& message, uint32_t width,
- uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count) {
+ uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage) {
ATRACE_NAME("ProducerQueueChannel::AllocateBuffer");
ALOGD_IF(TRACE,
"ProducerQueueChannel::AllocateBuffer: producer_channel_id=%d",
@@ -218,13 +216,13 @@
ALOGD_IF(TRACE,
"ProducerQueueChannel::AllocateBuffer: buffer_id=%d width=%u "
- "height=%u format=%u usage=%" PRIx64 " slice_count=%zu",
- buffer_id, width, height, format, usage, slice_count);
+ "height=%u layer_count=%u format=%u usage=%" PRIx64,
+ buffer_id, width, height, layer_count, format, usage);
auto buffer_handle = status.take();
auto producer_channel_status =
- ProducerChannel::Create(service(), buffer_id, width, height, format,
- usage, meta_size_bytes_, slice_count);
+ ProducerChannel::Create(service(), buffer_id, width, height, layer_count,
+ format, usage, meta_size_bytes_);
if (!producer_channel_status) {
ALOGE(
"ProducerQueueChannel::AllocateBuffer: Failed to create producer "
diff --git a/services/vr/bufferhubd/producer_queue_channel.h b/services/vr/bufferhubd/producer_queue_channel.h
index 13c9ddc..28c74cd 100644
--- a/services/vr/bufferhubd/producer_queue_channel.h
+++ b/services/vr/bufferhubd/producer_queue_channel.h
@@ -34,8 +34,8 @@
// handle this as if a new producer is created through kOpCreateBuffer.
pdx::Status<std::vector<std::pair<pdx::RemoteChannelHandle, size_t>>>
OnProducerQueueAllocateBuffers(pdx::Message& message, uint32_t width,
- uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count,
+ uint32_t height, uint32_t layer_count,
+ uint32_t format, uint64_t usage,
size_t buffer_count);
// Detach a BufferHubProducer indicated by |slot|. Note that the buffer must
@@ -57,8 +57,8 @@
// Returns the remote channel handle and the slot number for the newly
// allocated buffer.
pdx::Status<std::pair<pdx::RemoteChannelHandle, size_t>> AllocateBuffer(
- pdx::Message& message, uint32_t width, uint32_t height, uint32_t format,
- uint64_t usage, size_t slice_count);
+ pdx::Message& message, uint32_t width, uint32_t height,
+ uint32_t layer_count, uint32_t format, uint64_t usage);
// Size of the meta data associated with all the buffers allocated from the
// queue. Now we assume the metadata size is immutable once the queue is
diff --git a/services/vr/sensord/Android.mk b/services/vr/sensord/Android.mk
deleted file mode 100644
index 638c9a8..0000000
--- a/services/vr/sensord/Android.mk
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-SENSORD_EXTEND ?= libsensordextensionstub
-
-sourceFiles := \
- pose_service.cpp \
- sensord.cpp \
- sensor_fusion.cpp \
- sensor_hal_thread.cpp \
- sensor_ndk_thread.cpp \
- sensor_service.cpp \
- sensor_thread.cpp \
-
-includeFiles += \
- $(LOCAL_PATH)/include
-
-staticLibraries := \
- libdvrcommon \
- libvrsensor \
- libperformance \
- libbufferhub \
- libpdx_default_transport \
- libposepredictor \
-
-sharedLibraries := \
- libandroid \
- libbase \
- libbinder \
- libcutils \
- liblog \
- libhardware \
- libutils \
- libui \
- $(SENSORD_EXTEND) \
-
-cFlags := -DLOG_TAG=\"sensord\" \
- -DTRACE=0
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(sourceFiles)
-LOCAL_CFLAGS := $(cFlags)
-LOCAL_STATIC_LIBRARIES := $(staticLibraries)
-LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE := sensord
-LOCAL_C_INCLUDES := $(includeFiles)
-LOCAL_C_INCLUDES += \
- $(call local-generated-sources-dir)/proto/frameworks/native/services/vr/sensord
-LOCAL_INIT_RC := sensord.rc
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_STATIC_LIBRARIES := $(staticLibraries)
-LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
-LOCAL_SRC_FILES := test/poselatencytest.cpp
-LOCAL_MODULE := poselatencytest
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libsensordextensionstub
-LOCAL_SRC_FILES := sensord_extension.cpp
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/vr/sensord/pose_service.cpp b/services/vr/sensord/pose_service.cpp
deleted file mode 100644
index 75423bb..0000000
--- a/services/vr/sensord/pose_service.cpp
+++ /dev/null
@@ -1,649 +0,0 @@
-#define ATRACE_TAG ATRACE_TAG_INPUT
-#include "pose_service.h"
-
-#include <dlfcn.h>
-#include <errno.h>
-#include <time.h>
-
-#include <array>
-#include <cmath>
-#include <cstdint>
-#include <sstream>
-#include <type_traits>
-
-#include <cutils/properties.h>
-#include <cutils/trace.h>
-#include <dvr/performance_client_api.h>
-#include <dvr/pose_client.h>
-#include <hardware/sensors.h>
-#include <log/log.h>
-#include <pdx/default_transport/service_endpoint.h>
-#include <private/dvr/benchmark.h>
-#include <private/dvr/clock_ns.h>
-#include <private/dvr/platform_defines.h>
-#include <private/dvr/pose-ipc.h>
-#include <private/dvr/sensor_constants.h>
-#include <utils/Trace.h>
-
-using android::pdx::LocalChannelHandle;
-using android::pdx::default_transport::Endpoint;
-using android::pdx::Status;
-
-namespace android {
-namespace dvr {
-
-using Vector3d = vec3d;
-using Rotationd = quatd;
-using AngleAxisd = Eigen::AngleAxis<double>;
-
-namespace {
-// Wait a few seconds before checking if we need to disable sensors.
-static constexpr int64_t kSensorTimeoutNs = 5000000000ll;
-
-static constexpr float kTwoPi = 2.0 * M_PI;
-static constexpr float kDegToRad = M_PI / 180.f;
-
-// Head model code data.
-static constexpr float kDefaultNeckHorizontalOffset = 0.080f; // meters
-static constexpr float kDefaultNeckVerticalOffset = 0.075f; // meters
-
-static constexpr char kDisablePosePredictionProp[] =
- "persist.dvr.disable_predict";
-
-// Device type property for controlling classes of behavior that differ
-// between devices. If unset, defaults to kOrientationTypeSmartphone.
-static constexpr char kOrientationTypeProp[] = "ro.dvr.orientation_type";
-static constexpr char kEnableSensorRecordProp[] = "dvr.enable_6dof_recording";
-static constexpr char kEnableSensorPlayProp[] = "dvr.enable_6dof_playback";
-static constexpr char kEnableSensorPlayIdProp[] = "dvr.6dof_playback_id";
-static constexpr char kEnablePoseRecordProp[] = "dvr.enable_pose_recording";
-static constexpr char kPredictorTypeProp[] = "dvr.predictor_type";
-
-// Persistent buffer names.
-static constexpr char kPoseRingBufferName[] = "PoseService:RingBuffer";
-
-static constexpr int kDatasetIdLength = 36;
-static constexpr char kDatasetIdChars[] = "0123456789abcdef-";
-
-static constexpr int kLatencyWindowSize = 200;
-
-// These are the flags used by BufferProducer::CreatePersistentUncachedBlob,
-// plus PRIVATE_ADSP_HEAP to allow access from the DSP.
-static constexpr int kPoseRingBufferFlags =
- GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_RARELY |
- GRALLOC_USAGE_PRIVATE_UNCACHED | GRALLOC_USAGE_PRIVATE_ADSP_HEAP;
-
-std::string GetPoseModeString(DvrPoseMode mode) {
- switch (mode) {
- case DVR_POSE_MODE_6DOF:
- return "DVR_POSE_MODE_6DOF";
- case DVR_POSE_MODE_3DOF:
- return "DVR_POSE_MODE_3DOF";
- case DVR_POSE_MODE_MOCK_FROZEN:
- return "DVR_POSE_MODE_MOCK_FROZEN";
- case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
- return "DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW";
- case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
- return "DVR_POSE_MODE_MOCK_HEAD_TURN_FAST";
- case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
- return "DVR_POSE_MODE_MOCK_ROTATE_SLOW";
- case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
- return "DVR_POSE_MODE_MOCK_ROTATE_MEDIUM";
- case DVR_POSE_MODE_MOCK_ROTATE_FAST:
- return "DVR_POSE_MODE_MOCK_ROTATE_FAST";
- case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE:
- return "DVR_POSE_MODE_MOCK_CIRCLE_STRAFE";
- default:
- return "Unknown pose mode";
- }
-}
-
-} // namespace
-
-PoseService::PoseService(SensorThread* sensor_thread)
- : BASE("PoseService", Endpoint::Create(DVR_POSE_SERVICE_CLIENT)),
- sensor_thread_(sensor_thread),
- last_sensor_usage_time_ns_(0),
- watchdog_shutdown_(false),
- sensors_on_(false),
- accelerometer_index_(-1),
- gyroscope_index_(-1),
- pose_mode_(DVR_POSE_MODE_6DOF),
- mapped_pose_buffer_(nullptr),
- vsync_count_(0),
- photon_timestamp_(0),
- // Will be updated by external service, but start with a non-zero value:
- display_period_ns_(16000000),
- sensor_latency_(kLatencyWindowSize) {
- last_known_pose_ = {
- .orientation = {1.0f, 0.0f, 0.0f, 0.0f},
- .translation = {0.0f, 0.0f, 0.0f, 0.0f},
- .angular_velocity = {0.0f, 0.0f, 0.0f, 0.0f},
- .velocity = {0.0f, 0.0f, 0.0f, 0.0f},
- .timestamp_ns = 0,
- .flags = DVR_POSE_FLAG_HEAD,
- .pad = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- };
-
- switch (property_get_int32(kOrientationTypeProp, kOrientationTypePortrait)) {
- case kOrientationTypeLandscape:
- device_orientation_type_ = kOrientationTypeLandscape;
- break;
- default:
- device_orientation_type_ = kOrientationTypePortrait;
- break;
- }
-
- ring_buffer_ =
- BufferProducer::Create(kPoseRingBufferName, 0, 0, kPoseRingBufferFlags,
- sizeof(DvrPoseRingBuffer));
- if (!ring_buffer_) {
- ALOGE("PoseService::PoseService: Failed to create/get pose ring buffer!");
- return;
- }
-
- void* addr = nullptr;
- int ret =
- ring_buffer_->GetBlobReadWritePointer(sizeof(DvrPoseRingBuffer), &addr);
- if (ret < 0) {
- ALOGE("PoseService::PoseService: Failed to map pose ring buffer: %s",
- strerror(-ret));
- return;
- }
- memset(addr, 0, sizeof(DvrPoseRingBuffer));
- mapped_pose_buffer_ = static_cast<DvrPoseRingBuffer*>(addr);
- addr = nullptr;
-
- for (int i = 0; i < sensor_thread->GetSensorCount(); ++i) {
- if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_ACCELEROMETER)
- accelerometer_index_ = i;
- if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_GYROSCOPE_UNCALIBRATED)
- gyroscope_index_ = i;
- }
- // If we failed to find the uncalibrated gyroscope, use the regular one.
- if (gyroscope_index_ < 0) {
- ALOGW("PoseService was unable to find uncalibrated gyroscope");
- for (int i = 0; i < sensor_thread->GetSensorCount(); ++i) {
- ALOGI("Type %d", sensor_thread->GetSensorType(i));
- if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_GYROSCOPE)
- gyroscope_index_ = i;
- }
- }
-
- if (accelerometer_index_ < 0) {
- ALOGE("PoseService was unable to find accelerometer");
- }
- if (gyroscope_index_ < 0) {
- ALOGE("PoseService was unable to find gyroscope");
- }
-
- {
- std::lock_guard<std::mutex> lock(mutex_);
- KickSensorWatchDogThread();
- }
-
- // Read the persistent dvr flags before using them in SetPoseMode.
- enable_pose_prediction_ =
- property_get_bool(kDisablePosePredictionProp, 0) == 0;
-
- enable_sensor_recording_ = property_get_bool(kEnableSensorRecordProp, 0) == 1;
-
- enable_sensor_playback_ = property_get_bool(kEnableSensorPlayProp, 0) == 1;
-
- if (enable_sensor_playback_) {
- char dataset_id[PROPERTY_VALUE_MAX];
- property_get(kEnableSensorPlayIdProp, dataset_id, "");
- sensor_playback_id_ = std::string(dataset_id);
-
- if (sensor_playback_id_.length() != kDatasetIdLength ||
- sensor_playback_id_.find_first_not_of(kDatasetIdChars) !=
- std::string::npos) {
- ALOGE("Error: invalid playback id %s", sensor_playback_id_.c_str());
- sensor_playback_id_ = "";
- enable_sensor_playback_ = false;
- } else {
- ALOGI("Playback id %s", sensor_playback_id_.c_str());
- }
- }
-
- switch (property_get_int32(kPredictorTypeProp, 0)) {
- case 1:
- pose_predictor_ = posepredictor::Predictor::Create(
- posepredictor::PredictorType::Quadric);
- default:
- pose_predictor_ = posepredictor::Predictor::Create(
- posepredictor::PredictorType::Linear);
- }
-
- enable_pose_recording_ = property_get_bool(kEnablePoseRecordProp, 0) == 1;
-
- SetPoseMode(DVR_POSE_MODE_6DOF);
-}
-
-PoseService::~PoseService() {
- if (watchdog_thread_.get_id() != std::thread::id()) {
- {
- std::lock_guard<std::mutex> guard(mutex_);
- watchdog_shutdown_ = true;
- watchdog_condition_.notify_one();
- }
- watchdog_thread_.join();
- }
-}
-
-void PoseService::KickSensorWatchDogThread() {
- // This method is called every frame while rendering so we want to make sure
- // it is very light weight with synchronization.
- // TODO(jbates) For better performance, we can consider a lock-free atomic
- // solution instead of locking this mutex.
-
- // Update the usage time. The watchdog thread will poll this value to know
- // when to disable sensors.
- last_sensor_usage_time_ns_ = GetSystemClockNs();
-
- // If sensors are still on, there's nothing else to do.
- if (sensors_on_)
- return;
-
- // Enable sensors.
- ALOGI("Start using sensors.");
- sensors_on_ = true;
- if (accelerometer_index_ >= 0) {
- sensor_thread_->StartUsingSensor(accelerometer_index_);
- }
- if (gyroscope_index_ >= 0) {
- sensor_thread_->StartUsingSensor(gyroscope_index_);
- }
-
- // Tell the thread to wake up to disable the sensors when no longer needed.
- watchdog_condition_.notify_one();
-
- if (watchdog_thread_.get_id() == std::thread::id()) {
- // The sensor watchdog thread runs while sensors are in use. When no APIs
- // have requested sensors beyond a threshold (5 seconds), sensors are
- // disabled.
- watchdog_thread_ = std::thread([this] {
- std::unique_lock<std::mutex> lock(mutex_);
- while (!watchdog_shutdown_) {
- int64_t remaining_sensor_time_ns =
- last_sensor_usage_time_ns_ + kSensorTimeoutNs - GetSystemClockNs();
-
- if (remaining_sensor_time_ns > 0) {
- // Wait for the remaining usage time before checking again.
- watchdog_condition_.wait_for(
- lock, std::chrono::nanoseconds(remaining_sensor_time_ns));
- continue;
- }
-
- if (sensors_on_) {
- // Disable sensors.
- ALOGI("Stop using sensors.");
- sensors_on_ = false;
- if (accelerometer_index_ >= 0) {
- sensor_thread_->StopUsingSensor(accelerometer_index_);
- }
- if (gyroscope_index_ >= 0) {
- sensor_thread_->StopUsingSensor(gyroscope_index_);
- }
- }
-
- // Wait for sensors to be enabled again.
- watchdog_condition_.wait(lock);
- }
- });
- }
-}
-
-bool PoseService::IsInitialized() const {
- return BASE::IsInitialized() && ring_buffer_ && mapped_pose_buffer_;
-}
-
-void PoseService::WriteAsyncPoses(const Vector3d& start_t_head,
- const Rotationd& start_q_head,
- int64_t pose_timestamp) {
- if (enable_external_pose_) {
- return;
- }
-
- // If playing back data, the timestamps are different enough from the
- // current time that prediction doesn't work. This hack pretends that
- // there was one nanosecond of latency between the sensors and here.
- if (enable_sensor_playback_)
- pose_timestamp = GetSystemClockNs() - 1;
-
- // Feed the sample to the predictor
- AddPredictorPose(pose_predictor_.get(), start_t_head, start_q_head,
- pose_timestamp, &last_known_pose_);
-
- // Store one extra value, because the application is working on the next
- // frame and expects the minimum count from that frame on.
- for (uint32_t i = 0; i < kPoseAsyncBufferMinFutureCount + 1; ++i) {
- int64_t target_time = photon_timestamp_ + i * display_period_ns_;
-
- // TODO(jbates, cwolfe) For the DSP code, we may still want poses even when
- // the vsyncs are not ticking up. But it's important not to update the pose
- // data that's in the past so that applications have the most accurate
- // estimate of the last frame's *actual* pose, so that they can update
- // simulations and calculate collisions, etc.
- if (target_time < pose_timestamp) {
- // Already in the past, do not update this head pose slot.
- continue;
- }
-
- // Write to the actual shared memory ring buffer.
- uint32_t index = ((vsync_count_ + i) & kPoseAsyncBufferIndexMask);
-
- // Make a pose prediction
- if (enable_pose_prediction_) {
- PredictPose(pose_predictor_.get(), target_time,
- target_time + right_eye_photon_offset_ns_,
- mapped_pose_buffer_->ring + index);
- } else {
- mapped_pose_buffer_->ring[index] = last_known_pose_;
- }
- }
-}
-
-void PoseService::UpdatePoseMode() {
- ALOGI_IF(TRACE, "UpdatePoseMode: %f %f %f", last_known_pose_.translation[0],
- last_known_pose_.translation[1], last_known_pose_.translation[2]);
-
- const int64_t current_time_ns = GetSystemClockNs();
-
- const PoseState pose_state = sensor_fusion_.GetLatestPoseState();
-
- switch (pose_mode_) {
- case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
- case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
- case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
- case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
- case DVR_POSE_MODE_MOCK_ROTATE_FAST:
- case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE: {
- // Calculate a pose based on monotic system time.
- const Vector3d y_axis(0., 1., 0.);
- double time_s = current_time_ns / 1e9;
-
- // Generate fake yaw data.
- float yaw = 0.0f;
- Vector3d head_trans(0.0, 0.0, 0.0);
- switch (pose_mode_) {
- default:
- case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
- // Pan across 120 degrees in 15 seconds.
- yaw = std::cos(kTwoPi * time_s / 15.0) * 60.0 * kDegToRad;
- break;
- case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
- // Pan across 120 degrees in 4 seconds.
- yaw = std::cos(kTwoPi * time_s / 4.0) * 60.0 * kDegToRad;
- break;
- case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
- // Rotate 5 degrees per second.
- yaw = std::fmod(time_s * 5.0 * kDegToRad, kTwoPi);
- break;
- case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
- // Rotate 30 degrees per second.
- yaw = std::fmod(time_s * 30.0 * kDegToRad, kTwoPi);
- break;
- case DVR_POSE_MODE_MOCK_ROTATE_FAST:
- // Rotate 90 degrees per second.
- yaw = std::fmod(time_s * 90.0 * kDegToRad, kTwoPi);
- break;
- case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE:
- // Circle strafe around origin at distance of 3 meters.
- yaw = std::fmod(time_s * 30.0 * kDegToRad, kTwoPi);
- head_trans += 3.0 * Vector3d(sin(yaw), 0.0, cos(yaw));
- break;
- }
-
- // Calculate the simulated head rotation in an absolute "head" space.
- // This space is not related to start space and doesn't need a
- // reference.
- Rotationd head_rotation_in_head_space(AngleAxisd(yaw, y_axis));
-
- WriteAsyncPoses(head_trans, head_rotation_in_head_space, current_time_ns);
- break;
- }
- case DVR_POSE_MODE_MOCK_FROZEN: {
- // Even when frozen, we still provide a current timestamp, because
- // consumers may rely on it being monotonic.
-
- Rotationd start_from_head_rotation(
- frozen_state_.head_from_start_rotation.w,
- frozen_state_.head_from_start_rotation.x,
- frozen_state_.head_from_start_rotation.y,
- frozen_state_.head_from_start_rotation.z);
- Vector3d head_from_start_translation(
- frozen_state_.head_from_start_translation.x,
- frozen_state_.head_from_start_translation.y,
- frozen_state_.head_from_start_translation.z);
-
- WriteAsyncPoses(head_from_start_translation, start_from_head_rotation,
- current_time_ns);
- break;
- }
- case DVR_POSE_MODE_3DOF: {
- // Sensor fusion provides IMU-space data, transform to world space.
-
- // Constants to perform IMU orientation adjustments. Note that these
- // calculations will be optimized out in a release build.
- constexpr double k90DegInRad = 90.0 * M_PI / 180.0;
- const Vector3d kVecAxisX(1.0, 0.0, 0.0);
- const Vector3d kVecAxisY(0.0, 1.0, 0.0);
- const Vector3d kVecAxisZ(0.0, 0.0, 1.0);
- const Rotationd kRotX90(AngleAxisd(k90DegInRad, kVecAxisX));
-
- Rotationd start_from_head_rotation;
- if (device_orientation_type_ == kOrientationTypeLandscape) {
- const Rotationd kPostRotation =
- kRotX90 * Rotationd(AngleAxisd(-k90DegInRad, kVecAxisY));
- start_from_head_rotation =
- (pose_state.sensor_from_start_rotation * kPostRotation).inverse();
- } else if (device_orientation_type_ == kOrientationTypeLandscape180) {
- const Rotationd kPreRotation =
- Rotationd(AngleAxisd(k90DegInRad * 2.0, kVecAxisY)) *
- Rotationd(AngleAxisd(k90DegInRad * 2.0, kVecAxisZ));
- const Rotationd kPostRotation = kRotX90;
- start_from_head_rotation =
- (kPreRotation *
- pose_state.sensor_from_start_rotation * kPostRotation)
- .inverse();
- } else {
- const Rotationd kPreRotation =
- Rotationd(AngleAxisd(k90DegInRad, kVecAxisZ));
- const Rotationd kPostRotation = kRotX90;
- start_from_head_rotation =
- (kPreRotation * pose_state.sensor_from_start_rotation *
- kPostRotation)
- .inverse();
- }
- start_from_head_rotation.normalize();
-
- // Neck / head model code procedure for when no 6dof is available.
- // To apply the neck model, first translate the head pose to the new
- // center of eyes, then rotate around the origin (the original head
- // pos).
- Vector3d position =
- start_from_head_rotation * Vector3d(0.0, kDefaultNeckVerticalOffset,
- -kDefaultNeckHorizontalOffset);
-
- // Update the current latency model.
- if (pose_state.timestamp_ns != 0) {
- sensor_latency_.AddLatency(GetSystemClockNs() -
- pose_state.timestamp_ns);
- }
-
- // Update the timestamp with the expected latency.
- WriteAsyncPoses(
- position, start_from_head_rotation,
- pose_state.timestamp_ns + sensor_latency_.CurrentLatencyEstimate());
- break;
- }
- default:
- case DVR_POSE_MODE_6DOF:
- ALOGE("ERROR: invalid pose mode");
- break;
- }
-}
-
-pdx::Status<void> PoseService::HandleMessage(pdx::Message& msg) {
- pdx::Status<void> ret;
- const pdx::MessageInfo& info = msg.GetInfo();
- switch (info.op) {
- case DVR_POSE_NOTIFY_VSYNC: {
- std::lock_guard<std::mutex> guard(mutex_);
-
- // Kick the sensor thread, because we are still rendering.
- KickSensorWatchDogThread();
-
- const struct iovec data[] = {
- {.iov_base = &vsync_count_, .iov_len = sizeof(vsync_count_)},
- {.iov_base = &photon_timestamp_,
- .iov_len = sizeof(photon_timestamp_)},
- {.iov_base = &display_period_ns_,
- .iov_len = sizeof(display_period_ns_)},
- {.iov_base = &right_eye_photon_offset_ns_,
- .iov_len = sizeof(right_eye_photon_offset_ns_)},
- };
- ret = msg.ReadVectorAll(data);
- if (ret && !enable_external_pose_) {
- mapped_pose_buffer_->vsync_count = vsync_count_;
- }
-
- // TODO(jbates, eieio): make this async, no need to reply.
- REPLY_MESSAGE(msg, ret, error);
- }
- case DVR_POSE_POLL: {
- ATRACE_NAME("pose_poll");
- std::lock_guard<std::mutex> guard(mutex_);
-
- DvrPoseState client_state;
- client_state = {
- .head_from_start_rotation = {last_known_pose_.orientation[0],
- last_known_pose_.orientation[1],
- last_known_pose_.orientation[2],
- last_known_pose_.orientation[3]},
- .head_from_start_translation = {last_known_pose_.translation[0],
- last_known_pose_.translation[1],
- last_known_pose_.translation[2]},
- .timestamp_ns = static_cast<uint64_t>(last_known_pose_.timestamp_ns),
- .sensor_from_start_rotation_velocity = {
- last_known_pose_.angular_velocity[0],
- last_known_pose_.angular_velocity[1],
- last_known_pose_.angular_velocity[2]}};
-
- Btrace("Sensor data received",
- static_cast<int64_t>(client_state.timestamp_ns));
-
- Btrace("Pose polled");
-
- ret = msg.WriteAll(&client_state, sizeof(client_state));
- REPLY_MESSAGE(msg, ret, error);
- }
- case DVR_POSE_FREEZE: {
- {
- std::lock_guard<std::mutex> guard(mutex_);
-
- DvrPoseState frozen_state;
- ret = msg.ReadAll(&frozen_state, sizeof(frozen_state));
- if (!ret) {
- REPLY_ERROR(msg, ret.error(), error);
- }
- frozen_state_ = frozen_state;
- }
- SetPoseMode(DVR_POSE_MODE_MOCK_FROZEN);
- REPLY_MESSAGE(msg, ret, error);
- }
- case DVR_POSE_SET_MODE: {
- int mode;
- {
- std::lock_guard<std::mutex> guard(mutex_);
- ret = msg.ReadAll(&mode, sizeof(mode));
- if (!ret) {
- REPLY_ERROR(msg, ret.error(), error);
- }
- if (mode < 0 || mode >= DVR_POSE_MODE_COUNT) {
- REPLY_ERROR(msg, EINVAL, error);
- }
- }
- SetPoseMode(DvrPoseMode(mode));
- REPLY_MESSAGE(msg, ret, error);
- }
- case DVR_POSE_GET_MODE: {
- std::lock_guard<std::mutex> guard(mutex_);
- int mode = pose_mode_;
- ret = msg.WriteAll(&mode, sizeof(mode));
- REPLY_MESSAGE(msg, ret, error);
- }
- case DVR_POSE_GET_RING_BUFFER: {
- std::lock_guard<std::mutex> guard(mutex_);
-
- // Kick the sensor thread, because we have a new consumer.
- KickSensorWatchDogThread();
-
- Status<LocalChannelHandle> consumer_channel =
- ring_buffer_->CreateConsumer();
- REPLY_MESSAGE(msg, consumer_channel, error);
- }
- case DVR_POSE_GET_CONTROLLER_RING_BUFFER: {
- std::lock_guard<std::mutex> guard(mutex_);
- REPLY_ERROR(msg, EINVAL, error);
- }
- case DVR_POSE_LOG_CONTROLLER: {
- std::lock_guard<std::mutex> guard(mutex_);
- REPLY_ERROR(msg, EINVAL, error);
- }
- default:
- // Do not lock mutex_ here, because this may call the on*() handlers,
- // which will lock the mutex themselves.
- ret = Service::HandleMessage(msg);
- break;
- }
-error:
- return ret;
-}
-
-std::string PoseService::DumpState(size_t /*max_length*/) {
- DvrPoseMode pose_mode;
- {
- std::lock_guard<std::mutex> guard(mutex_);
- pose_mode = pose_mode_;
- }
-
- std::ostringstream stream;
- stream << "Pose mode: " << GetPoseModeString(pose_mode);
- return stream.str();
-}
-
-void PoseService::HandleEvents(const sensors_event_t* begin_events,
- const sensors_event_t* end_events) {
- ATRACE_NAME("PoseService::HandleEvents");
- std::lock_guard<std::mutex> guard(mutex_);
-
- for (const sensors_event_t* event = begin_events; event != end_events;
- ++event) {
- if (event->type == SENSOR_TYPE_ACCELEROMETER) {
- sensor_fusion_.ProcessAccelerometerSample(
- event->acceleration.x, event->acceleration.y, event->acceleration.z,
- event->timestamp);
- } else if (event->type == SENSOR_TYPE_GYROSCOPE_UNCALIBRATED) {
- sensor_fusion_.ProcessGyroscopeSample(event->gyro.x, event->gyro.y,
- event->gyro.z, event->timestamp);
- }
- }
-
- UpdatePoseMode();
-}
-
-void PoseService::SetPoseMode(DvrPoseMode mode) {
- if (mode == DVR_POSE_MODE_6DOF) {
- // Only 3DoF is currently supported.
- mode = DVR_POSE_MODE_3DOF;
- }
-
- pose_mode_ = mode;
-
- sensor_thread_->SetPaused(false);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/pose_service.h b/services/vr/sensord/pose_service.h
deleted file mode 100644
index 7b7adec..0000000
--- a/services/vr/sensord/pose_service.h
+++ /dev/null
@@ -1,149 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_POSE_SERVICE_H_
-#define ANDROID_DVR_SENSORD_POSE_SERVICE_H_
-
-#include <condition_variable>
-#include <forward_list>
-#include <mutex>
-#include <thread>
-#include <unordered_map>
-#include <vector>
-
-#include <dvr/pose_client.h>
-#include <pdx/service.h>
-#include <private/dvr/buffer_hub_client.h>
-#include <private/dvr/dvr_pose_predictor.h>
-#include <private/dvr/latency_model.h>
-#include <private/dvr/pose_client_internal.h>
-#include <private/dvr/ring_buffer.h>
-
-#include "sensor_fusion.h"
-#include "sensor_thread.h"
-
-namespace android {
-namespace dvr {
-
-// PoseService implements the HMD pose service over ServiceFS.
-class PoseService : public pdx::ServiceBase<PoseService> {
- public:
- ~PoseService() override;
-
- bool IsInitialized() const override;
- pdx::Status<void> HandleMessage(pdx::Message& msg) override;
- std::string DumpState(size_t max_length) override;
-
- // Handle events from the sensor HAL.
- // Safe to call concurrently with any other public member functions.
- void HandleEvents(const sensors_event_t* begin_events,
- const sensors_event_t* end_events);
-
- private:
- friend BASE;
-
- enum OrientationType {
- // Typical smartphone device (default).
- kOrientationTypePortrait = 1,
- // Landscape device.
- kOrientationTypeLandscape = 2,
- // 180 Landscape device.
- kOrientationTypeLandscape180 = 3,
- };
-
- // Initializes the service. Keeps a reference to sensor_thread, which must be
- // non-null.
- explicit PoseService(SensorThread* sensor_thread);
-
- // Kick the sensor watch dog thread which will robustly disable IMU usage
- // when there are no sensor data consumers.
- // The class mutex (mutex_) must be locked while calling this method.
- void KickSensorWatchDogThread();
-
- void UpdatePoseMode();
-
- // Update the async pose ring buffer with new pose data.
- // |start_t_head| Head position in start space.
- // |start_q_head| Head orientation quaternion in start space.
- // |pose_timestamp| System timestamp of pose data in seconds.
- // |pose_delta_time| Elapsed time in seconds between this pose and the last.
- void WriteAsyncPoses(const Eigen::Vector3<double>& start_t_head,
- const Eigen::Quaternion<double>& start_q_head,
- int64_t pose_timestamp);
-
- // Set the pose mode.
- void SetPoseMode(DvrPoseMode mode);
-
- // The abstraction around the sensor data.
- SensorThread* sensor_thread_;
-
- // Protects access to all member variables.
- std::mutex mutex_;
-
- // Watchdog thread data. The watchdog thread will ensure that sensor access
- // is disabled when nothing has been consuming it for a while.
- int64_t last_sensor_usage_time_ns_;
- std::thread watchdog_thread_;
- std::condition_variable watchdog_condition_;
- bool watchdog_shutdown_;
- bool sensors_on_;
-
- // Indices for the accelerometer and gyroscope sensors, or -1 if the sensor
- // wasn't present on construction.
- int accelerometer_index_;
- int gyroscope_index_;
-
- // The sensor fusion algorithm and its state.
- SensorFusion sensor_fusion_;
-
- // Current pose mode.
- DvrPoseMode pose_mode_;
-
- // State which is sent if pose_mode_ is DVR_POSE_MODE_MOCK_FROZEN.
- DvrPoseState frozen_state_;
-
- // Last known pose.
- DvrPoseAsync last_known_pose_;
-
- // If this flag is true, the pose published includes a small prediction of
- // where it'll be when it's consumed.
- bool enable_pose_prediction_;
-
- // Flag to turn on recording of raw sensor data
- bool enable_sensor_recording_;
-
- // Flag to log pose to a file
- bool enable_pose_recording_;
-
- // Flag to turn on playback from a saved dataset instead of using live data.
- bool enable_sensor_playback_;
-
- std::string sensor_playback_id_;
-
- // External pose generation.
- bool enable_external_pose_ = false;
-
- // The predictor to extrapolate pose samples.
- std::unique_ptr<posepredictor::Predictor> pose_predictor_;
-
- // Pose ring buffer.
- std::shared_ptr<BufferProducer> ring_buffer_;
- // Temporary mapped ring buffer.
- DvrPoseRingBuffer* mapped_pose_buffer_;
- // Current vsync info, updated by displayd.
- uint32_t vsync_count_;
- int64_t photon_timestamp_;
- int64_t display_period_ns_;
- int64_t right_eye_photon_offset_ns_ = 0;
-
- // To model the measurement - arrival latency.
- LatencyModel sensor_latency_;
-
- // Type for controlling pose orientation calculation.
- OrientationType device_orientation_type_;
-
- PoseService(const PoseService&) = delete;
- void operator=(const PoseService&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_POSE_SERVICE_H_
diff --git a/services/vr/sensord/sensor_fusion.cpp b/services/vr/sensord/sensor_fusion.cpp
deleted file mode 100644
index 5663ae4..0000000
--- a/services/vr/sensord/sensor_fusion.cpp
+++ /dev/null
@@ -1,348 +0,0 @@
-#include "sensor_fusion.h"
-
-#include <algorithm>
-#include <cmath>
-
-#include <private/dvr/eigen.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-// --- start of added bits for porting to eigen
-
-// In general, we prefer to add wrappers for things like Inverse() to minimize
-// the changes to the imported code, so that merging in upstream changes becomes
-// simpler.
-
-inline Matrix3d Inverse(const Matrix3d& matrix) { return matrix.inverse(); }
-inline Matrix3d Transpose(const Matrix3d& matrix) { return matrix.transpose(); }
-inline Matrix3d RotationMatrixNH(const Rotationd& rotation) {
- return rotation.toRotationMatrix();
-}
-inline double Length(const Vector3d& vector) { return vector.norm(); }
-
-using uint64 = uint64_t;
-
-// --- end of added bits for porting to eigen
-
-static const double kFiniteDifferencingEpsilon = 1e-7;
-static const double kEpsilon = 1e-15;
-// Default gyroscope frequency. This corresponds to 200 Hz.
-static const double kDefaultGyroscopeTimestep_s = 0.005f;
-// Maximum time between gyroscope before we start limiting the integration.
-static const double kMaximumGyroscopeSampleDelay_s = 0.04f;
-// Compute a first-order exponential moving average of changes in accel norm per
-// frame.
-static const double kSmoothingFactor = 0.5;
-// Minimum and maximum values used for accelerometer noise covariance matrix.
-// The smaller the sigma value, the more weight is given to the accelerometer
-// signal.
-static const double kMinAccelNoiseSigma = 0.75;
-static const double kMaxAccelNoiseSigma = 7.0;
-// Initial value for the diagonal elements of the different covariance matrices.
-static const double kInitialStateCovarianceValue = 25.0;
-static const double kInitialProcessCovarianceValue = 1.0;
-// Maximum accelerometer norm change allowed before capping it covariance to a
-// large value.
-static const double kMaxAccelNormChange = 0.15;
-// Timestep IIR filtering coefficient.
-static const double kTimestepFilterCoeff = 0.95;
-// Minimum number of sample for timestep filtering.
-static const uint32_t kTimestepFilterMinSamples = 10;
-
-// Z direction in start space.
-static const Vector3d kCanonicalZDirection(0.0, 0.0, 1.0);
-
-// Computes a axis angle rotation from the input vector.
-// angle = norm(a)
-// axis = a.normalized()
-// If norm(a) == 0, it returns an identity rotation.
-static Rotationd RotationFromVector(const Vector3d& a) {
- const double norm_a = Length(a);
- if (norm_a < kEpsilon) {
- return Rotationd::Identity();
- }
- return Rotationd(AngleAxisd(norm_a, a / norm_a));
-}
-
-// --- start of functions ported from pose_prediction.cc
-
-namespace pose_prediction {
-
-// Returns a rotation matrix based on the integration of the gyroscope_value
-// over the timestep_s in seconds.
-// TODO(pfg): Document the space better here.
-//
-// @param gyroscope_value gyroscope sensor values.
-// @param timestep_s integration period in seconds.
-// @return Integration of the gyroscope value the rotation is from Start to
-// Sensor Space.
-Rotationd GetRotationFromGyroscope(const Vector3d& gyroscope_value,
- double timestep_s) {
- const double velocity = Length(gyroscope_value);
-
- // When there is no rotation data return an identity rotation.
- if (velocity < kEpsilon) {
- return Rotationd::Identity();
- }
- // Since the gyroscope_value is a start from sensor transformation we need to
- // invert it to have a sensor from start transformation, hence the minus sign.
- // For more info:
- // http://developer.android.com/guide/topics/sensors/sensors_motion.html#sensors-motion-gyro
- return Rotationd(AngleAxisd(-timestep_s * velocity,
- gyroscope_value / velocity));
-}
-
-} // namespace pose_prediction
-
-// --- end of functions ported from pose_prediction.cc
-
-} // namespace
-
-SensorFusion::SensorFusion()
- : execute_reset_with_next_accelerometer_sample_(false) {
- ResetState();
-}
-
-void SensorFusion::Reset() {
- execute_reset_with_next_accelerometer_sample_ = true;
-}
-
-void SensorFusion::ResetState() {
- current_state_.timestamp_ns = 0;
- current_state_.sensor_from_start_rotation = Rotationd::Identity();
- current_state_.sensor_from_start_rotation_velocity = Vector3d::Zero();
-
- current_accelerometer_timestamp_ns_ = 0;
-
- state_covariance_ = Matrix3d::Identity() * kInitialStateCovarianceValue;
- process_covariance_ = Matrix3d::Identity() * kInitialProcessCovarianceValue;
- accelerometer_measurement_covariance_ =
- Matrix3d::Identity() * kMinAccelNoiseSigma * kMinAccelNoiseSigma;
- innovation_covariance_.setIdentity();
-
- accelerometer_measurement_jacobian_ = Matrix3d::Zero();
- kalman_gain_ = Matrix3d::Zero();
- innovation_ = Vector3d::Zero();
- accelerometer_measurement_ = Vector3d::Zero();
- prediction_ = Vector3d::Zero();
- control_input_ = Vector3d::Zero();
- state_update_ = Vector3d::Zero();
-
- moving_average_accelerometer_norm_change_ = 0.0;
-
- is_timestep_filter_initialized_ = false;
- is_gyroscope_filter_valid_ = false;
- is_aligned_with_gravity_ = false;
-}
-
-// Here I am doing something wrong relative to time stamps. The state timestamps
-// always correspond to the gyrostamps because it would require additional
-// extrapolation if I wanted to do otherwise.
-// TODO(pfg): investigate about published an updated pose after accelerometer
-// data was used for filtering.
-PoseState SensorFusion::GetLatestPoseState() const {
- std::unique_lock<std::mutex> lock(mutex_);
- return current_state_;
-}
-
-void SensorFusion::ProcessGyroscopeSample(float v_x, float v_y, float v_z,
- uint64 timestamp_ns) {
- std::unique_lock<std::mutex> lock(mutex_);
-
- // Don't accept gyroscope sample when waiting for a reset.
- if (execute_reset_with_next_accelerometer_sample_) {
- return;
- }
-
- // Discard outdated samples.
- if (current_state_.timestamp_ns >= timestamp_ns) {
- // TODO(pfg): Investigate why this happens.
- return;
- }
-
- // Checks that we received at least one gyroscope sample in the past.
- if (current_state_.timestamp_ns != 0) {
- // TODO(pfg): roll this in filter gyroscope timestep function.
- double current_timestep_s =
- static_cast<double>(timestamp_ns - current_state_.timestamp_ns) * 1e-9;
- if (current_timestep_s > kMaximumGyroscopeSampleDelay_s) {
- if (is_gyroscope_filter_valid_) {
- // Replaces the delta timestamp by the filtered estimates of the delta
- // time.
- current_timestep_s = filtered_gyroscope_timestep_s_;
- } else {
- current_timestep_s = kDefaultGyroscopeTimestep_s;
- }
- } else {
- FilterGyroscopeTimestep(current_timestep_s);
- }
-
- // Only integrate after receiving a accelerometer sample.
- if (is_aligned_with_gravity_) {
- const Rotationd rotation_from_gyroscope =
- pose_prediction::GetRotationFromGyroscope(Vector3d(v_x, v_y, v_z),
- current_timestep_s);
- current_state_.sensor_from_start_rotation =
- rotation_from_gyroscope * current_state_.sensor_from_start_rotation;
- current_state_.sensor_from_start_rotation.normalize();
- UpdateStateCovariance(RotationMatrixNH(rotation_from_gyroscope));
- state_covariance_ =
- state_covariance_ +
- (process_covariance_ * (current_timestep_s * current_timestep_s));
- }
- }
-
- // Saves gyroscope event for future prediction.
- current_state_.timestamp_ns = timestamp_ns;
- current_state_.sensor_from_start_rotation_velocity = Vector3d(v_x, v_y, v_z);
-}
-
-// TODO(pfg): move to rotation object for the input.
-Vector3d SensorFusion::ComputeInnovation(const Rotationd& pose) {
- const Vector3d predicted_down_direction =
- RotationMatrixNH(pose) * kCanonicalZDirection;
-
- const Rotationd rotation = Rotationd::FromTwoVectors(
- predicted_down_direction, accelerometer_measurement_);
- AngleAxisd angle_axis(rotation);
- return angle_axis.axis() * angle_axis.angle();
-}
-
-void SensorFusion::ComputeMeasurementJacobian() {
- for (int dof = 0; dof < 3; dof++) {
- // TODO(pfg): Create this delta rotation in the constructor and used unitX..
- Vector3d delta = Vector3d::Zero();
- delta[dof] = kFiniteDifferencingEpsilon;
-
- const Rotationd epsilon_rotation = RotationFromVector(delta);
- const Vector3d delta_rotation = ComputeInnovation(
- epsilon_rotation * current_state_.sensor_from_start_rotation);
-
- const Vector3d col =
- (innovation_ - delta_rotation) / kFiniteDifferencingEpsilon;
- accelerometer_measurement_jacobian_(0, dof) = col[0];
- accelerometer_measurement_jacobian_(1, dof) = col[1];
- accelerometer_measurement_jacobian_(2, dof) = col[2];
- }
-}
-
-void SensorFusion::ProcessAccelerometerSample(float acc_x, float acc_y,
- float acc_z,
- uint64 timestamp_ns) {
- std::unique_lock<std::mutex> lock(mutex_);
-
- // Discard outdated samples.
- if (current_accelerometer_timestamp_ns_ >= timestamp_ns) {
- // TODO(pfg): Investigate why this happens.
- return;
- }
-
- // Call reset state if required.
- if (execute_reset_with_next_accelerometer_sample_.exchange(false)) {
- ResetState();
- }
-
- accelerometer_measurement_ = Vector3d(acc_x, acc_y, acc_z);
- current_accelerometer_timestamp_ns_ = timestamp_ns;
-
- if (!is_aligned_with_gravity_) {
- // This is the first accelerometer measurement so it initializes the
- // orientation estimate.
- current_state_.sensor_from_start_rotation = Rotationd::FromTwoVectors(
- kCanonicalZDirection, accelerometer_measurement_);
- is_aligned_with_gravity_ = true;
-
- previous_accelerometer_norm_ = Length(accelerometer_measurement_);
- return;
- }
-
- UpdateMeasurementCovariance();
-
- innovation_ = ComputeInnovation(current_state_.sensor_from_start_rotation);
- ComputeMeasurementJacobian();
-
- // S = H * P * H' + R
- innovation_covariance_ = accelerometer_measurement_jacobian_ *
- state_covariance_ *
- Transpose(accelerometer_measurement_jacobian_) +
- accelerometer_measurement_covariance_;
-
- // K = P * H' * S^-1
- kalman_gain_ = state_covariance_ *
- Transpose(accelerometer_measurement_jacobian_) *
- Inverse(innovation_covariance_);
-
- // x_update = K*nu
- state_update_ = kalman_gain_ * innovation_;
-
- // P = (I - K * H) * P;
- state_covariance_ = (Matrix3d::Identity() -
- kalman_gain_ * accelerometer_measurement_jacobian_) *
- state_covariance_;
-
- // Updates pose and associate covariance matrix.
- const Rotationd rotation_from_state_update =
- RotationFromVector(state_update_);
-
- current_state_.sensor_from_start_rotation =
- rotation_from_state_update * current_state_.sensor_from_start_rotation;
- UpdateStateCovariance(RotationMatrixNH(rotation_from_state_update));
-}
-
-void SensorFusion::UpdateStateCovariance(const Matrix3d& motion_update) {
- state_covariance_ =
- motion_update * state_covariance_ * Transpose(motion_update);
-}
-
-void SensorFusion::FilterGyroscopeTimestep(double gyroscope_timestep_s) {
- if (!is_timestep_filter_initialized_) {
- // Initializes the filter.
- filtered_gyroscope_timestep_s_ = gyroscope_timestep_s;
- num_gyroscope_timestep_samples_ = 1;
- is_timestep_filter_initialized_ = true;
- return;
- }
-
- // Computes the IIR filter response.
- filtered_gyroscope_timestep_s_ =
- kTimestepFilterCoeff * filtered_gyroscope_timestep_s_ +
- (1 - kTimestepFilterCoeff) * gyroscope_timestep_s;
- ++num_gyroscope_timestep_samples_;
-
- if (num_gyroscope_timestep_samples_ > kTimestepFilterMinSamples) {
- is_gyroscope_filter_valid_ = true;
- }
-}
-
-void SensorFusion::UpdateMeasurementCovariance() {
- const double current_accelerometer_norm = Length(accelerometer_measurement_);
- // Norm change between current and previous accel readings.
- const double current_accelerometer_norm_change =
- std::abs(current_accelerometer_norm - previous_accelerometer_norm_);
- previous_accelerometer_norm_ = current_accelerometer_norm;
-
- moving_average_accelerometer_norm_change_ =
- kSmoothingFactor * current_accelerometer_norm_change +
- (1. - kSmoothingFactor) * moving_average_accelerometer_norm_change_;
-
- // If we hit the accel norm change threshold, we use the maximum noise sigma
- // for the accel covariance. For anything below that, we use a linear
- // combination between min and max sigma values.
- const double norm_change_ratio =
- moving_average_accelerometer_norm_change_ / kMaxAccelNormChange;
- const double accelerometer_noise_sigma = std::min(
- kMaxAccelNoiseSigma,
- kMinAccelNoiseSigma +
- norm_change_ratio * (kMaxAccelNoiseSigma - kMinAccelNoiseSigma));
-
- // Updates the accel covariance matrix with the new sigma value.
- accelerometer_measurement_covariance_ = Matrix3d::Identity() *
- accelerometer_noise_sigma *
- accelerometer_noise_sigma;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/sensor_fusion.h b/services/vr/sensord/sensor_fusion.h
deleted file mode 100644
index 0ceae21..0000000
--- a/services/vr/sensord/sensor_fusion.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
-#define ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
-
-#include <atomic>
-#include <cstdlib>
-#include <mutex>
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-using Matrix3d = Eigen::Matrix<double, 3, 3>;
-using Rotationd = quatd;
-using Vector3d = vec3d;
-using AngleAxisd = Eigen::AngleAxisd;
-
-// Ported from GVR's pose_state.h.
-// Stores a 3dof pose plus derivatives. This can be used for prediction.
-struct PoseState {
- // Time in nanoseconds for the current pose.
- uint64_t timestamp_ns;
-
- // Rotation from Sensor Space to Start Space.
- Rotationd sensor_from_start_rotation;
-
- // First derivative of the rotation.
- // TODO(pfg): currently storing gyro data, switch to first derivative instead.
- Vector3d sensor_from_start_rotation_velocity;
-};
-
-// Sensor fusion class that implements an Extended Kalman Filter (EKF) to
-// estimate a 3D rotation from a gyroscope and and accelerometer.
-// This system only has one state, the pose. It does not estimate any velocity
-// or acceleration.
-//
-// To learn more about Kalman filtering one can read this article which is a
-// good introduction: http://en.wikipedia.org/wiki/Kalman_filter
-//
-// Start Space is :
-// z is up.
-// y is forward based on the first sensor data.
-// x = y \times z
-// Sensor Space follows the android specification {@link
-// http://developer.android.com/guide/topics/sensors/sensors_overview.html#sensors-coords}
-// See http://go/vr-coords for definitions of Start Space and Sensor Space.
-//
-// This is a port from GVR's SensorFusion code (See
-// https://cs/vr/gvr/sensors/sensor_fusion.h)
-// which in turn is a port from java of OrientationEKF (See
-// https://cs/java/com/google/vr/cardboard/vrtoolkit/vrtoolkit/src/main/java/com/google/vrtoolkit/cardboard/sensors/internal/OrientationEKF.java)
-class SensorFusion {
- public:
- SensorFusion();
- SensorFusion(const SensorFusion&) = delete;
- void operator=(const SensorFusion&) = delete;
-
- // Resets the state of the sensor fusion. It sets the velocity for
- // prediction to zero. The reset will happen with the next
- // accelerometer sample. Gyroscope sample will be discarded until a new
- // accelerometer sample arrives.
- void Reset();
-
- // Gets the PoseState representing the latest pose and derivatives at a
- // particular timestamp as estimated by SensorFusion.
- PoseState GetLatestPoseState() const;
-
- // Processes one gyroscope sample event. This updates the pose of the system
- // and the prediction model. The gyroscope data is assumed to be in axis angle
- // form. Angle = ||v|| and Axis = v / ||v||, with v = [v_x, v_y, v_z]^T.
- //
- // @param v_x velocity in x.
- // @param v_y velocity in y.
- // @param v_z velocity in z.
- // @param timestamp_ns gyroscope event timestamp in nanosecond.
- void ProcessGyroscopeSample(float v_x, float v_y, float v_z,
- uint64_t timestamp_ns);
-
- // Processes one accelerometer sample event. This updates the pose of the
- // system. If the Accelerometer norm changes too much between sample it is not
- // trusted as much.
- //
- // @param acc_x accelerometer data in x.
- // @param acc_y accelerometer data in y.
- // @param acc_z accelerometer data in z.
- // @param timestamp_ns accelerometer event timestamp in nanosecond.
- void ProcessAccelerometerSample(float acc_x, float acc_y, float acc_z,
- uint64_t timestamp_ns);
-
- private:
- // Estimates the average timestep between gyroscope event.
- void FilterGyroscopeTimestep(double gyroscope_timestep);
-
- // Updates the state covariance with an incremental motion. It changes the
- // space of the quadric.
- void UpdateStateCovariance(const Matrix3d& motion_update);
-
- // Computes the innovation vector of the Kalman based on the input pose.
- // It uses the latest measurement vector (i.e. accelerometer data), which must
- // be set prior to calling this function.
- Vector3d ComputeInnovation(const Rotationd& pose);
-
- // This computes the measurement_jacobian_ via numerical differentiation based
- // on the current value of sensor_from_start_rotation_.
- void ComputeMeasurementJacobian();
-
- // Updates the accelerometer covariance matrix.
- //
- // This looks at the norm of recent accelerometer readings. If it has changed
- // significantly, it means the phone receives additional acceleration than
- // just gravity, and so the down vector information gravity signal is noisier.
- //
- // TODO(dcoz,pfg): this function is very simple, we probably need something
- // more elaborated here once we have proper regression testing.
- void UpdateMeasurementCovariance();
-
- // Reset all internal states. This is not thread safe. Lock should be acquired
- // outside of it. This function is called in ProcessAccelerometerSample.
- void ResetState();
-
- // Current transformation from Sensor Space to Start Space.
- // x_sensor = sensor_from_start_rotation_ * x_start;
- PoseState current_state_;
-
- // Filtering of the gyroscope timestep started?
- bool is_timestep_filter_initialized_;
- // Filtered gyroscope timestep valid?
- bool is_gyroscope_filter_valid_;
- // Sensor fusion currently aligned with gravity? After initialization
- // it will requires a couple of accelerometer data for the system to get
- // aligned.
- bool is_aligned_with_gravity_;
-
- // Covariance of Kalman filter state (P in common formulation).
- Matrix3d state_covariance_;
- // Covariance of the process noise (Q in common formulation).
- Matrix3d process_covariance_;
- // Covariance of the accelerometer measurement (R in common formulation).
- Matrix3d accelerometer_measurement_covariance_;
- // Covariance of innovation (S in common formulation).
- Matrix3d innovation_covariance_;
- // Jacobian of the measurements (H in common formulation).
- Matrix3d accelerometer_measurement_jacobian_;
- // Gain of the Kalman filter (K in common formulation).
- Matrix3d kalman_gain_;
- // Parameter update a.k.a. innovation vector. (\nu in common formulation).
- Vector3d innovation_;
- // Measurement vector (z in common formulation).
- Vector3d accelerometer_measurement_;
- // Current prediction vector (g in common formulation).
- Vector3d prediction_;
- // Control input, currently this is only the gyroscope data (\mu in common
- // formulation).
- Vector3d control_input_;
- // Update of the state vector. (x in common formulation).
- Vector3d state_update_;
-
- // Time of the last accelerometer processed event.
- uint64_t current_accelerometer_timestamp_ns_;
-
- // Estimates of the timestep between gyroscope event in seconds.
- double filtered_gyroscope_timestep_s_;
- // Number of timestep samples processed so far by the filter.
- uint32_t num_gyroscope_timestep_samples_;
- // Norm of the accelerometer for the previous measurement.
- double previous_accelerometer_norm_;
- // Moving average of the accelerometer norm changes. It is computed for every
- // sensor datum.
- double moving_average_accelerometer_norm_change_;
-
- // Flag indicating if a state reset should be executed with the next
- // accelerometer sample.
- std::atomic<bool> execute_reset_with_next_accelerometer_sample_;
-
- mutable std::mutex mutex_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
diff --git a/services/vr/sensord/sensor_hal_thread.cpp b/services/vr/sensord/sensor_hal_thread.cpp
deleted file mode 100644
index c321d4f..0000000
--- a/services/vr/sensord/sensor_hal_thread.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-#include "sensor_hal_thread.h"
-
-#include <dvr/performance_client_api.h>
-#include <log/log.h>
-
-namespace android {
-namespace dvr {
-
-SensorHalThread::SensorHalThread(bool* out_success)
- : shutting_down_(false),
- paused_(false),
- sensor_module_(nullptr),
- sensor_device_(nullptr),
- sensor_list_(nullptr) {
- // Assume failure; we will change this to true on success.
- *out_success = false;
-
- // TODO(segal): module & device should be singletons.
- int32_t err = hw_get_module_by_class(SENSORS_HARDWARE_MODULE_ID, "platform",
- (hw_module_t const**)&sensor_module_);
-
- if (err) {
- ALOGE("couldn't load %s module (%s)", SENSORS_HARDWARE_MODULE_ID,
- strerror(-err));
- return;
- }
-
- err = sensors_open_1(&sensor_module_->common, &sensor_device_);
- if (err) {
- ALOGE("couldn't open device for module %s (%s)", SENSORS_HARDWARE_MODULE_ID,
- strerror(-err));
- return;
- }
-
- const int sensor_count =
- sensor_module_->get_sensors_list(sensor_module_, &sensor_list_);
-
- // Deactivate all of the sensors initially.
- sensor_user_count_.resize(sensor_count, 0);
- for (int i = 0; i < sensor_count; ++i) {
- err = sensor_device_->activate(
- reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
- sensor_list_[i].handle, 0);
-
- if (err) {
- ALOGE("failed to deactivate sensor %d (%s)", i, strerror(-err));
- return;
- }
- }
-
- // At this point, we've successfully initialized everything.
- *out_success = true;
-}
-
-SensorHalThread::~SensorHalThread() {
- {
- std::unique_lock<std::mutex> lock(mutex_);
- shutting_down_ = true;
- condition_.notify_one();
- }
-
- // Implicitly joins *thread_ if it's running.
-}
-
-void SensorHalThread::StartPolling(const EventConsumer& consumer) {
- if (thread_) {
- ALOGE("SensorHalThread::Start() called but thread is already running!");
- return;
- }
-
- thread_.reset(new std::thread([this, consumer] {
- const int priority_error = dvrSetSchedulerClass(0, "sensors:high");
- LOG_ALWAYS_FATAL_IF(
- priority_error < 0,
- "SensorHalTread::StartPolling: Failed to set scheduler class: %s",
- strerror(-priority_error));
-
- for (;;) {
- for (;;) {
- std::unique_lock<std::mutex> lock(mutex_);
- if (shutting_down_)
- return;
- if (!paused_)
- break;
- condition_.wait(lock);
- }
- const int kMaxEvents = 100;
- sensors_event_t events[kMaxEvents];
- ssize_t event_count = 0;
- do {
- if (sensor_device_) {
- event_count = sensor_device_->poll(
- reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
- events, kMaxEvents);
- } else {
- // When there is no sensor_device_, we still call the consumer at
- // regular intervals in case mock poses are in use. Note that this
- // will never be the case for production devices, but this helps
- // during bringup.
- usleep(5000);
- }
- } while (event_count == -EINTR);
- if (event_count == kMaxEvents)
- ALOGI("max events (%d) reached", kMaxEvents);
-
- if (event_count >= 0) {
- consumer(events, events + event_count);
- } else {
- ALOGE(
- "SensorHalThread::StartPolling: Error while polling sensor: %s "
- "(%zd)",
- strerror(-event_count), -event_count);
- }
- }
- }));
-}
-
-void SensorHalThread::SetPaused(bool is_paused) {
- std::unique_lock<std::mutex> lock(mutex_);
- paused_ = is_paused;
- condition_.notify_one();
-}
-
-void SensorHalThread::StartUsingSensor(const int sensor_index) {
- if (sensor_index < 0 || sensor_index >= GetSensorCount()) {
- ALOGE("StartUsingSensor(): sensor index %d out of range [0, %d)",
- sensor_index, GetSensorCount());
- return;
- }
-
- std::lock_guard<std::mutex> guard(user_count_mutex_);
- if (sensor_user_count_[sensor_index]++ == 0) {
- sensor_device_->activate(
- reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
- sensor_list_[sensor_index].handle, 1);
- sensor_device_->setDelay(
- reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
- sensor_list_[sensor_index].handle, 0);
- }
-}
-
-void SensorHalThread::StopUsingSensor(const int sensor_index) {
- if (sensor_index < 0 || sensor_index >= GetSensorCount()) {
- ALOGE("StopUsingSensor(): sensor index %d out of range [0, %d)",
- sensor_index, GetSensorCount());
- return;
- }
-
- std::lock_guard<std::mutex> guard(user_count_mutex_);
- if (--sensor_user_count_[sensor_index] == 0) {
- sensor_device_->activate(
- reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
- sensor_list_[sensor_index].handle, 0);
- }
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/sensor_hal_thread.h b/services/vr/sensord/sensor_hal_thread.h
deleted file mode 100644
index 9220757..0000000
--- a/services/vr/sensord/sensor_hal_thread.h
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
-#define ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
-
-#include <hardware/sensors.h>
-
-#include <atomic>
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-#include "sensor_thread.h"
-
-namespace android {
-namespace dvr {
-
-// Manages initialization and polling of the sensor HAL. Polling is performed
-// continuously on a thread that passes events along to an arbitrary consumer.
-// All const member functions are thread-safe; otherwise, thread safety is noted
-// for each function.
-class SensorHalThread : public SensorThread {
- public:
- // Initializes the sensor HAL, but does not yet start polling (see Start()
- // below). Sets *out_success to true on success; otherwise, sets *out_success
- // to false and logs an error.
- explicit SensorHalThread(bool* out_success);
-
- // Tells the polling thread to shut down if it's running, and waits for it to
- // complete its polling loop.
- ~SensorHalThread() override;
-
- // Begins polling on the thread. The provided consumer will be notified of
- // events. Event notification occurs on the polling thread.
- // Calling Start() more than once on an instance of SensorHalThread is
- // invalid.
- void StartPolling(const EventConsumer& consumer) override;
-
- // Set whether the sensor polling thread is paused or not. This is useful
- // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
- // must be paused while the 6DoF codepath is using the IMU event stream.
- void SetPaused(bool is_paused) override;
-
- // Increase the number of users of the given sensor by one. Activates the
- // sensor if it wasn't already active.
- // Safe to call concurrently with any other functions in this class.
- void StartUsingSensor(int sensor_index) override;
-
- // Decrease the number of users of the given sensor by one. Deactivates the
- // sensor if its usage count has dropped to zero.
- // Safe to call concurrently with any other functions in this class.
- void StopUsingSensor(int sensor_index) override;
-
- // The number of sensors that are available. Returns a negative number if
- // initialization failed.
- int GetSensorCount() const override {
- return static_cast<int>(sensor_user_count_.size());
- }
-
- // The underlying sensor HAL data structure for the sensor at the given index.
- int GetSensorType(int index) const override {
- return sensor_list_[index].type;
- }
-
- private:
- // The actual thread on which we consume events.
- std::unique_ptr<std::thread> thread_;
-
- // Mutex for access to shutting_down_ and paused_ members.
- std::mutex mutex_;
-
- // Condition for signaling pause/unpause to the thread.
- std::condition_variable condition_;
-
- // If this member is set to true, the thread will stop running at its next
- // iteration. Only set with the mutex held and signal condition_ when changed.
- bool shutting_down_;
-
- // If this member is set to true, the thread will pause at its next
- // iteration. Only set with the mutex held and signal condition_ when changed.
- bool paused_;
-
- // HAL access
- struct sensors_module_t* sensor_module_;
- sensors_poll_device_1_t* sensor_device_;
-
- // Contiguous array of available sensors, owned by the sensor HAL.
- const sensor_t* sensor_list_;
-
- // Mutex that protects access to sensor_user_count_.data().
- std::mutex user_count_mutex_;
-
- // A count of how many users each sensor has. Protected by user_count_mutex.
- std::vector<int> sensor_user_count_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
diff --git a/services/vr/sensord/sensor_ndk_thread.cpp b/services/vr/sensord/sensor_ndk_thread.cpp
deleted file mode 100644
index 9c3abbc..0000000
--- a/services/vr/sensord/sensor_ndk_thread.cpp
+++ /dev/null
@@ -1,269 +0,0 @@
-#include "sensor_ndk_thread.h"
-
-#include <dvr/performance_client_api.h>
-#include <log/log.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-static constexpr int kLooperIdUser = 5;
-} // namespace
-
-SensorNdkThread::SensorNdkThread(bool* out_success)
- : shutting_down_(false),
- paused_(true),
- thread_started_(false),
- initialization_result_(false),
- looper_(nullptr),
- sensor_manager_(nullptr),
- event_queue_(nullptr),
- sensor_list_(nullptr),
- sensor_count_(0) {
- // Assume failure; we will change this to true on success.
- *out_success = false;
-
- // These structs are the same, but sanity check the sizes.
- static_assert(sizeof(sensors_event_t) == sizeof(ASensorEvent),
- "Error: sizeof(sensors_event_t) != sizeof(ASensorEvent)");
-
- thread_.reset(new std::thread([this] {
- const int priority_error = dvrSetSchedulerClass(0, "sensors:high");
- LOG_ALWAYS_FATAL_IF(
- priority_error < 0,
- "SensorHalTread::StartPolling: Failed to set scheduler class: %s",
- strerror(-priority_error));
-
- // Start ALooper and initialize sensor access.
- {
- std::unique_lock<std::mutex> lock(mutex_);
- InitializeSensors();
- thread_started_ = true;
- init_condition_.notify_one();
- // Continue on failure - the loop below will periodically retry.
- }
-
- EventConsumer consumer;
- for (;;) {
- for (;;) {
- std::unique_lock<std::mutex> lock(mutex_);
- UpdateSensorUse();
- if (!consumer)
- consumer = consumer_;
- if (shutting_down_)
- return;
- if (!paused_)
- break;
- condition_.wait(lock);
- }
-
- constexpr int kMaxEvents = 100;
- sensors_event_t events[kMaxEvents];
- ssize_t event_count = 0;
- if (initialization_result_) {
- int poll_fd, poll_events;
- void* poll_source;
- // Poll for events.
- int ident = ALooper_pollAll(-1, &poll_fd, &poll_events, &poll_source);
-
- if (ident != kLooperIdUser)
- continue;
-
- ASensorEvent* event = reinterpret_cast<ASensorEvent*>(&events[0]);
- event_count =
- ASensorEventQueue_getEvents(event_queue_, event, kMaxEvents);
-
- if (event_count == 0) {
- ALOGE("Detected sensor service failure, restarting sensors");
- // This happens when sensorservice has died and restarted. To avoid
- // spinning we need to restart the sensor access.
- DestroySensors();
- }
- } else {
- // When there is no sensor_device_, we still call the consumer at
- // regular intervals in case mock poses are in use. Note that this
- // will never be the case for production devices, but this helps
- // during bringup.
- usleep(5000);
- }
- if (event_count == kMaxEvents)
- ALOGI("max events (%d) reached", kMaxEvents);
-
- if (event_count >= 0) {
- consumer(events, events + event_count);
- } else {
- ALOGE(
- "SensorNdkThread::StartPolling: Error while polling sensor: %s "
- "(%zd)",
- strerror(-event_count), -event_count);
- }
- }
-
- // About to exit sensor thread, destroy sensor objects.
- DestroySensors();
- }));
-
- // Wait for thread to startup and initialize sensors so that we know whether
- // it succeeded.
- {
- std::unique_lock<std::mutex> lock(mutex_);
- while (!thread_started_)
- init_condition_.wait(lock);
- }
-
- // At this point, we've successfully initialized everything.
- // The NDK sensor thread will continue to retry on error, so assume success here.
- *out_success = true;
-}
-
-SensorNdkThread::~SensorNdkThread() {
- {
- if (looper_)
- ALooper_wake(looper_);
- std::unique_lock<std::mutex> lock(mutex_);
- shutting_down_ = true;
- condition_.notify_one();
- }
-
- thread_->join();
-}
-
-bool SensorNdkThread::InitializeSensors() {
- looper_ = ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
- if (!looper_) {
- ALOGE("Failed to create ALooper.");
- return false;
- }
-
- // Prepare to monitor accelerometer
- sensor_manager_ = ASensorManager_getInstanceForPackage(nullptr);
- if (!sensor_manager_) {
- ALOGE("Failed to create ASensorManager.");
- return false;
- }
-
- event_queue_ = ASensorManager_createEventQueue(
- sensor_manager_, looper_, kLooperIdUser, nullptr, nullptr);
- if (!event_queue_) {
- ALOGE("Failed to create sensor EventQueue.");
- return false;
- }
-
- sensor_count_ = ASensorManager_getSensorList(sensor_manager_, &sensor_list_);
- ALOGI("Sensor count %d", sensor_count_);
-
- sensor_user_count_.resize(sensor_count_, 0);
-
- // To recover from sensorservice restart, enable the sensors that are already
- // requested.
- for (size_t sensor_index = 0; sensor_index < sensor_user_count_.size();
- ++sensor_index) {
- if (sensor_user_count_[sensor_index] > 0) {
- int result = ASensorEventQueue_registerSensor(
- event_queue_, sensor_list_[sensor_index], 0, 0);
- ALOGE_IF(result < 0, "ASensorEventQueue_registerSensor failed: %d",
- result);
- }
- }
-
- initialization_result_ = true;
- return true;
-}
-
-void SensorNdkThread::DestroySensors() {
- if (!event_queue_)
- return;
- for (size_t sensor_index = 0; sensor_index < sensor_user_count_.size();
- ++sensor_index) {
- if (sensor_user_count_[sensor_index] > 0) {
- ASensorEventQueue_disableSensor(event_queue_, sensor_list_[sensor_index]);
- }
- }
- ASensorManager_destroyEventQueue(sensor_manager_, event_queue_);
- event_queue_ = nullptr;
- initialization_result_ = false;
-}
-
-void SensorNdkThread::UpdateSensorUse() {
- if (!initialization_result_) {
- // Sleep for 1 second to avoid spinning during system instability.
- usleep(1000 * 1000);
- InitializeSensors();
- if (!initialization_result_)
- return;
- }
-
- if (!enable_sensors_.empty()) {
- for (int sensor_index : enable_sensors_) {
- if (sensor_user_count_[sensor_index]++ == 0) {
- int result = ASensorEventQueue_registerSensor(
- event_queue_, sensor_list_[sensor_index], 0, 0);
- ALOGE_IF(result < 0, "ASensorEventQueue_registerSensor failed: %d",
- result);
- }
- }
- enable_sensors_.clear();
- }
-
- if (!disable_sensors_.empty()) {
- for (int sensor_index : disable_sensors_) {
- if (--sensor_user_count_[sensor_index] == 0) {
- int result = ASensorEventQueue_disableSensor(
- event_queue_, sensor_list_[sensor_index]);
- ALOGE_IF(result < 0, "ASensorEventQueue_disableSensor failed: %d",
- result);
- }
- }
- disable_sensors_.clear();
- }
-}
-
-void SensorNdkThread::StartPolling(const EventConsumer& consumer) {
- {
- std::unique_lock<std::mutex> lock(mutex_);
- if (consumer_) {
- ALOGE("Already started sensor thread.");
- return;
- }
- consumer_ = consumer;
- }
- SetPaused(false);
-}
-
-void SensorNdkThread::SetPaused(bool is_paused) {
- std::unique_lock<std::mutex> lock(mutex_);
- // SetPaused may be called before we have StartPolling, make sure we have
- // an event consumer. Otherwise we defer until StartPolling is called.
- if (!consumer_)
- return;
- paused_ = is_paused;
- condition_.notify_one();
- ALooper_wake(looper_);
-}
-
-void SensorNdkThread::StartUsingSensor(const int sensor_index) {
- std::unique_lock<std::mutex> lock(mutex_);
- if (sensor_index < 0 || sensor_index >= sensor_count_) {
- ALOGE("StartUsingSensor(): sensor index %d out of range [0, %d)",
- sensor_index, sensor_count_);
- return;
- }
-
- enable_sensors_.push_back(sensor_index);
- ALooper_wake(looper_);
-}
-
-void SensorNdkThread::StopUsingSensor(const int sensor_index) {
- std::unique_lock<std::mutex> lock(mutex_);
- if (sensor_index < 0 || sensor_index >= sensor_count_) {
- ALOGE("StopUsingSensor(): sensor index %d out of range [0, %d)",
- sensor_index, sensor_count_);
- return;
- }
-
- disable_sensors_.push_back(sensor_index);
- ALooper_wake(looper_);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/sensor_ndk_thread.h b/services/vr/sensord/sensor_ndk_thread.h
deleted file mode 100644
index eb3cf9d..0000000
--- a/services/vr/sensord/sensor_ndk_thread.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
-#define ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
-
-#include <android/sensor.h>
-#include <hardware/sensors.h>
-
-#include <atomic>
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-#include "sensor_thread.h"
-
-namespace android {
-namespace dvr {
-
-// Manages initialization and polling of the sensor data. Polling is performed
-// continuously on a thread that passes events along to an arbitrary consumer.
-// All const member functions are thread-safe; otherwise, thread safety is noted
-// for each function.
-class SensorNdkThread : public SensorThread {
- public:
- // Initializes the sensor access, but does not yet start polling (see Start()
- // below). Sets *out_success to true on success; otherwise, sets *out_success
- // to false and logs an error.
- explicit SensorNdkThread(bool* out_success);
-
- // Tells the polling thread to shut down if it's running, and waits for it to
- // complete its polling loop.
- ~SensorNdkThread() override;
-
- // Begins polling on the thread. The provided consumer will be notified of
- // events. Event notification occurs on the polling thread.
- // Calling Start() more than once on an instance of SensorNdkThread is
- // invalid.
- void StartPolling(const EventConsumer& consumer) override;
-
- // Set whether the sensor polling thread is paused or not. This is useful
- // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
- // must be paused while the 6DoF codepath is using the IMU event stream.
- void SetPaused(bool is_paused) override;
-
- // Increase the number of users of the given sensor by one. Activates the
- // sensor if it wasn't already active.
- // Safe to call concurrently with any other functions in this class.
- void StartUsingSensor(int sensor_index) override;
-
- // Decrease the number of users of the given sensor by one. Deactivates the
- // sensor if its usage count has dropped to zero.
- // Safe to call concurrently with any other functions in this class.
- void StopUsingSensor(int sensor_index) override;
-
- // The number of sensors that are available. Returns a negative number if
- // initialization failed.
- int GetSensorCount() const override { return sensor_count_; }
-
- // The underlying sensor HAL data structure for the sensor at the given index.
- int GetSensorType(int index) const override {
- return ASensor_getType(sensor_list_[index]);
- }
-
- private:
- // Initialize ALooper and sensor access on the thread.
- // Returns true on success, false on failure.
- bool InitializeSensors();
-
- // Destroy sensor access.
- void DestroySensors();
-
- // Start or stop requested sensors from the thread. Class mutex must already
- // be locked.
- void UpdateSensorUse();
-
- // The actual thread on which we consume events.
- std::unique_ptr<std::thread> thread_;
-
- // Mutex for access to shutting_down_ and paused_ members.
- std::mutex mutex_;
-
- // Condition for signaling pause/unpause to the thread.
- std::condition_variable condition_;
-
- // Condition for signaling thread initialization.
- std::condition_variable init_condition_;
-
- // If this member is set to true, the thread will stop running at its next
- // iteration. Only set with the mutex held and signal condition_ when changed.
- bool shutting_down_;
-
- // If this member is set to true, the thread will pause at its next
- // iteration. Only set with the mutex held and signal condition_ when changed.
- bool paused_;
-
- // Thread start hand shake to verify that sensor initialization succeeded.
- bool thread_started_;
-
- // Initialization result (true for success).
- bool initialization_result_;
-
- // The callback.
- EventConsumer consumer_;
-
- // Sensor access
- ALooper* looper_;
- ASensorManager* sensor_manager_;
- ASensorEventQueue* event_queue_;
-
- // Sensor list from NDK.
- ASensorList sensor_list_;
- int sensor_count_;
-
- // Requests to the sensor thread to enable or disable given sensors.
- std::vector<int> enable_sensors_;
- std::vector<int> disable_sensors_;
-
- // A count of how many users each sensor has. Protected by user_count_mutex.
- std::vector<int> sensor_user_count_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
diff --git a/services/vr/sensord/sensor_service.cpp b/services/vr/sensord/sensor_service.cpp
deleted file mode 100644
index a182a26..0000000
--- a/services/vr/sensord/sensor_service.cpp
+++ /dev/null
@@ -1,184 +0,0 @@
-#include "sensor_service.h"
-
-#include <hardware/sensors.h>
-#include <log/log.h>
-#include <pdx/default_transport/service_endpoint.h>
-#include <poll.h>
-#include <private/dvr/sensor-ipc.h>
-#include <time.h>
-
-using android::pdx::default_transport::Endpoint;
-
-namespace android {
-namespace dvr {
-
-SensorService::SensorService(SensorThread* sensor_thread)
- : BASE("SensorService", Endpoint::Create(DVR_SENSOR_SERVICE_CLIENT)),
- sensor_thread_(sensor_thread) {
- sensor_clients_.resize(sensor_thread_->GetSensorCount());
-
- for (int i = 0; i < sensor_thread_->GetSensorCount(); ++i)
- type_to_sensor_[sensor_thread_->GetSensorType(i)] = i;
-}
-
-std::shared_ptr<pdx::Channel> SensorService::OnChannelOpen(pdx::Message& msg) {
- std::lock_guard<std::mutex> guard(mutex_);
-
- const pdx::MessageInfo& info = msg.GetInfo();
-
- std::shared_ptr<SensorClient> client(
- new SensorClient(*this, info.pid, info.cid));
- AddClient(client);
- return client;
-}
-
-void SensorService::OnChannelClose(pdx::Message& /*msg*/,
- const std::shared_ptr<pdx::Channel>& chan) {
- std::lock_guard<std::mutex> guard(mutex_);
-
- auto client = std::static_pointer_cast<SensorClient>(chan);
- if (!client) {
- ALOGW("WARNING: SensorClient was NULL!\n");
- return;
- }
- RemoveClient(client);
-}
-
-void SensorService::AddClient(const std::shared_ptr<SensorClient>& client) {
- clients_.push_front(client);
-}
-
-void SensorService::RemoveClient(const std::shared_ptr<SensorClient>& client) {
- // First remove it from the clients associated with its sensor, if any.
- RemoveSensorClient(client.get());
-
- // Finally, remove it from the list of clients we're aware of, and decrease
- // its reference count.
- clients_.remove(client);
-}
-
-void SensorService::RemoveSensorClient(SensorClient* client) {
- if (!client->has_sensor())
- return;
-
- std::forward_list<SensorClient*>& sensor_clients =
- sensor_clients_[client->sensor()];
- sensor_clients.remove(client);
- sensor_thread_->StopUsingSensor(client->sensor());
-
- client->unset_sensor();
-}
-
-pdx::Status<void> SensorService::HandleMessage(pdx::Message& msg) {
- pdx::Status<void> ret;
- const pdx::MessageInfo& info = msg.GetInfo();
- switch (info.op) {
- case DVR_SENSOR_START: {
- std::lock_guard<std::mutex> guard(mutex_);
- // Associate this channel with the indicated sensor,
- // unless it already has an association. In that case,
- // fail.
- auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
- if (client->has_sensor())
- REPLY_ERROR(msg, EINVAL, error);
- int sensor_type;
- if (!msg.ReadAll(&sensor_type, sizeof(sensor_type)))
- REPLY_ERROR(msg, EIO, error);
-
- // Find the sensor of the requested type.
- if (type_to_sensor_.find(sensor_type) == type_to_sensor_.end())
- REPLY_ERROR(msg, EINVAL, error);
- const int sensor_index = type_to_sensor_[sensor_type];
-
- sensor_clients_[sensor_index].push_front(client.get());
- client->set_sensor(sensor_index);
- sensor_thread_->StartUsingSensor(sensor_index);
-
- REPLY_SUCCESS(msg, 0, error);
- }
- case DVR_SENSOR_STOP: {
- std::lock_guard<std::mutex> guard(mutex_);
- auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
- if (!client->has_sensor())
- REPLY_ERROR(msg, EINVAL, error);
- RemoveSensorClient(client.get());
- REPLY_SUCCESS(msg, 0, error);
- }
- case DVR_SENSOR_POLL: {
- std::lock_guard<std::mutex> guard(mutex_);
- auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
-
- // Package up the events we've got for this client. Number of
- // events, followed by 0 or more sensor events, popped from
- // this client's queue until it's empty.
- int num_events = client->EventCount();
- sensors_event_t out_buffer[num_events];
- client->WriteEvents(out_buffer);
- struct iovec svec[] = {
- {.iov_base = &num_events, .iov_len = sizeof(num_events)},
- {.iov_base = out_buffer,
- .iov_len = num_events * sizeof(sensors_event_t)},
- };
- ret = msg.WriteVectorAll(svec, 2);
- if (!ret) {
- REPLY_ERROR(msg, EIO, error);
- }
- REPLY_SUCCESS(msg, 0, error);
- }
- default:
- // Do not lock mutex_ here, because this may call the on*() handlers,
- // which will lock the mutex themselves.
- ret = Service::HandleMessage(msg);
- break;
- }
-error:
- return ret;
-}
-
-void SensorService::EnqueueEvents(const sensors_event_t* begin_events,
- const sensors_event_t* end_events) {
- std::lock_guard<std::mutex> guard(mutex_);
-
- // Put the sensor values we got in the circular queue for each client that
- // cares about the given event.
- for (const sensors_event_t* event = begin_events; event != end_events;
- ++event) {
- const int sensor_index = type_to_sensor_[event->type];
- for (const auto& client : sensor_clients_[sensor_index]) {
- client->EnqueueEvent(*event);
- }
- }
-}
-
-void SensorClient::WriteEvents(sensors_event_t* buffer) {
- while (!event_queue_.Empty()) {
- *buffer = *(event_queue_.Top());
- event_queue_.Pop();
- ++buffer;
- }
-}
-
-void SensorClient::CircularQ::Push(const sensors_event_t& event) {
- if (count_ != 0 && head_ == tail_) {
- Pop(); // If we're full, throw away the oldest event.
- }
- events_[head_] = event;
- head_ = (head_ + 1) % kCqSize;
- ++count_;
-}
-
-const sensors_event_t* SensorClient::CircularQ::Top() const {
- if (count_ == 0)
- return nullptr;
- return &events_[tail_];
-}
-
-void SensorClient::CircularQ::Pop() {
- if (count_ == 0)
- return;
- tail_ = (tail_ + 1) % kCqSize;
- --count_;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/sensor_service.h b/services/vr/sensord/sensor_service.h
deleted file mode 100644
index 6ea470b..0000000
--- a/services/vr/sensord/sensor_service.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
-#define ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
-
-#include <forward_list>
-#include <unordered_map>
-#include <vector>
-
-#include <pdx/service.h>
-#include <pthread.h>
-
-#include "sensor_thread.h"
-
-namespace android {
-namespace dvr {
-
-class SensorClient;
-
-/*
- * SensorService implements the sensor service over ServiceFS.
- * The sensor service provides an interface to one sensor over
- * each channel.
- */
-class SensorService : public pdx::ServiceBase<SensorService> {
- public:
- pdx::Status<void> HandleMessage(pdx::Message& msg) override;
- std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& msg) override;
- void OnChannelClose(pdx::Message& msg,
- const std::shared_ptr<pdx::Channel>& chan) override;
-
- // Enqueue the events in [begin_events, end_events) onto any clients that care
- // about them.
- // Safe to call concurrently with any other public member functions.
- void EnqueueEvents(const sensors_event_t* begin_events,
- const sensors_event_t* end_events);
-
- private:
- friend BASE;
-
- // Initializes the service. Keeps a reference to sensor_thread, which must be
- // non-null.
- explicit SensorService(SensorThread* sensor_thread);
-
- // The abstraction around the sensor HAL.
- SensorThread* sensor_thread_;
-
- // All of the clients we are connected to. This is the one place in this class
- // where we keep the SensorClient instances alive using shared_ptr instances.
- std::forward_list<std::shared_ptr<SensorClient>> clients_;
-
- // Map types back to sensor indexes.
- std::unordered_map<int, int> type_to_sensor_;
- // For each sensor, the list of clients that are connected to it.
- // Every entry in here must also be in clients_, so that its reference count
- // remains positive.
- std::vector<std::forward_list<SensorClient*>> sensor_clients_;
-
- // Protects access to all member variables.
- std::mutex mutex_;
-
- // None of the following functions is thread-safe; callers must lock mutex_
- // before calling one.
- void AddClient(const std::shared_ptr<SensorClient>& client);
- void RemoveClient(const std::shared_ptr<SensorClient>& client);
- // Dissociate the indicated client from its sensor, if it has one; otherwise
- // do nothing.
- void RemoveSensorClient(SensorClient* client);
-
- SensorService(const SensorService&) = delete;
- void operator=(const SensorService&) = delete;
-};
-
-/*
- * SensorClient manages the service-side per-client context for each client
- * using the service.
- */
-class SensorClient : public pdx::Channel {
- public:
- SensorClient(SensorService& /*service*/, int /*pid*/, int /*cid*/)
- : sensor_index_(-1), has_sensor_index_(false) {}
-
- bool has_sensor() const { return has_sensor_index_; }
- int sensor() const { return sensor_index_; }
- void set_sensor(int sensor) {
- sensor_index_ = sensor;
- has_sensor_index_ = true;
- }
- void unset_sensor() {
- sensor_index_ = -1;
- has_sensor_index_ = false;
- }
-
- int EventCount() const { return event_queue_.Count(); }
-
- // Push an event onto our queue.
- void EnqueueEvent(const sensors_event_t& event) { event_queue_.Push(event); }
-
- // Write all the events in our queue (and clear it) to the supplied
- // buffer. Buffer must be large enough.
- void WriteEvents(sensors_event_t* buffer);
-
- private:
- SensorClient(const SensorClient&) = delete;
- SensorClient& operator=(const SensorClient&) = delete;
-
- int sensor_index_ = -1;
- bool has_sensor_index_ = false;
- // Circular queue holds as-yet-unasked-for events for the sensor associated
- // with this client.
- class CircularQ {
- public:
- static const int kCqSize = 10;
- CircularQ() : head_(0), tail_(0), count_(0) {}
- ~CircularQ() {}
- void Push(const sensors_event_t& event);
- const sensors_event_t* Top() const;
- void Pop();
- bool Empty() const { return count_ == 0; }
- int Count() const { return count_; }
-
- private:
- sensors_event_t events_[kCqSize];
- int head_ = 0;
- int tail_ = 0;
- int count_ = 0;
- };
- CircularQ event_queue_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
diff --git a/services/vr/sensord/sensor_thread.cpp b/services/vr/sensord/sensor_thread.cpp
deleted file mode 100644
index 01e4e7e..0000000
--- a/services/vr/sensord/sensor_thread.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "sensor_thread.h"
-
-namespace android {
-namespace dvr {
-
-SensorThread::~SensorThread() {}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/sensord/sensor_thread.h b/services/vr/sensord/sensor_thread.h
deleted file mode 100644
index 46aba17..0000000
--- a/services/vr/sensord/sensor_thread.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
-#define ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
-
-#include <hardware/sensors.h>
-
-#include <functional>
-
-namespace android {
-namespace dvr {
-
-// Manages initialization and polling of the sensor data. Polling is performed
-// continuously on a thread that passes events along to an arbitrary consumer.
-// All const member functions are thread-safe; otherwise, thread safety is noted
-// for each function.
-class SensorThread {
- public:
- // A function type that can be called to provide it with new events.
- // [events_begin, events_end) forms a contiguous array of events.
- using EventConsumer = std::function<void(const sensors_event_t* events_begin,
- const sensors_event_t* events_end)>;
-
- // Tells the polling thread to shut down if it's running, and waits for it to
- // complete its polling loop.
- virtual ~SensorThread();
-
- // Begins polling on the thread. The provided consumer will be notified of
- // events. Event notification occurs on the polling thread.
- // Calling Start() more than once on an instance of SensorThread is
- // invalid.
- virtual void StartPolling(const EventConsumer& consumer) = 0;
-
- // Set whether the sensor polling thread is paused or not. This is useful
- // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
- // must be paused while the 6DoF codepath is using the IMU event stream.
- virtual void SetPaused(bool is_paused) = 0;
-
- // Increase the number of users of the given sensor by one. Activates the
- // sensor if it wasn't already active.
- // Safe to call concurrently with any other functions in this class.
- virtual void StartUsingSensor(int sensor_index) = 0;
-
- // Decrease the number of users of the given sensor by one. Deactivates the
- // sensor if its usage count has dropped to zero.
- // Safe to call concurrently with any other functions in this class.
- virtual void StopUsingSensor(int sensor_index) = 0;
-
- // The number of sensors that are available. Returns a negative number if
- // initialization failed.
- virtual int GetSensorCount() const = 0;
-
- // Get the sensor type for the sensor at the given index.
- virtual int GetSensorType(int index) const = 0;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
diff --git a/services/vr/sensord/sensord.cpp b/services/vr/sensord/sensord.cpp
deleted file mode 100644
index db39152..0000000
--- a/services/vr/sensord/sensord.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-#define LOG_TAG "sensord"
-
-#include <string.h>
-
-#include <binder/ProcessState.h>
-
-#include <dvr/performance_client_api.h>
-#include <pdx/default_transport/service_dispatcher.h>
-#include <private/dvr/pose-ipc.h>
-#include <private/dvr/sensor-ipc.h>
-
-#include "pose_service.h"
-#include "sensor_hal_thread.h"
-#include "sensor_ndk_thread.h"
-#include "sensor_service.h"
-#include "sensor_thread.h"
-#include "sensord_extension.h"
-
-using android::dvr::PoseService;
-using android::dvr::SensorHalThread;
-using android::dvr::SensorNdkThread;
-using android::dvr::SensorService;
-using android::dvr::SensorThread;
-using android::pdx::Service;
-using android::pdx::ServiceDispatcher;
-using android::dvr::SensordExtension;
-
-int main(int, char**) {
- ALOGI("Starting up...");
-
- SensordExtension::run();
-
- // We need to be able to create endpoints with full perms.
- umask(0000);
-
- android::ProcessState::self()->startThreadPool();
-
- bool sensor_thread_succeeded = false;
-#ifdef SENSORD_USES_HAL
- std::unique_ptr<SensorThread> sensor_thread(
- new SensorHalThread(&sensor_thread_succeeded));
-#else
- std::unique_ptr<SensorThread> sensor_thread(
- new SensorNdkThread(&sensor_thread_succeeded));
-#endif
-
- if (!sensor_thread_succeeded) {
- ALOGE("ERROR: Failed to initialize SensorThread! No 3DoF!\n");
- }
-
- if (sensor_thread->GetSensorCount() == 0)
- ALOGW("No sensors found\n");
-
- auto sensor_service = SensorService::Create(sensor_thread.get());
- if (!sensor_service) {
- ALOGE("TERMINATING: failed to create SensorService!!!\n");
- return -1;
- }
-
- auto pose_service = PoseService::Create(sensor_thread.get());
- if (!pose_service) {
- ALOGE("TERMINATING: failed to create PoseService!!!\n");
- return -1;
- }
-
- std::unique_ptr<ServiceDispatcher> dispatcher =
- android::pdx::default_transport::ServiceDispatcher::Create();
- if (!dispatcher) {
- ALOGE("TERMINATING: failed to create ServiceDispatcher!!!\n");
- return -1;
- }
-
- dispatcher->AddService(sensor_service);
- dispatcher->AddService(pose_service);
-
- sensor_thread->StartPolling([sensor_service, pose_service](
- const sensors_event_t* events_begin, const sensors_event_t* events_end) {
- sensor_service->EnqueueEvents(events_begin, events_end);
- pose_service->HandleEvents(events_begin, events_end);
- });
-
- const int priority_error = dvrSetSchedulerClass(0, "sensors:low");
- LOG_ALWAYS_FATAL_IF(priority_error < 0,
- "SensorService: Failed to set scheduler class: %s",
- strerror(-priority_error));
-
- int ret = dispatcher->EnterDispatchLoop();
- ALOGI("Dispatch loop exited because: %s\n", strerror(-ret));
-
- return ret;
-}
diff --git a/services/vr/sensord/sensord.rc b/services/vr/sensord/sensord.rc
deleted file mode 100644
index 36cd377..0000000
--- a/services/vr/sensord/sensord.rc
+++ /dev/null
@@ -1,11 +0,0 @@
-on init
- mkdir /dev/socket/pdx/system/vr/pose 0775 system system
- mkdir /dev/socket/pdx/system/vr/sensors 0775 system system
-
-service sensord /system/bin/sensord
- class core
- user system
- group system camera sdcard_rw
- writepid /dev/cpuset/system/tasks
- socket pdx/system/vr/sensors/client stream 0666 system system
- socket pdx/system/vr/pose/client stream 0666 system system
diff --git a/services/vr/sensord/sensord_extension.cpp b/services/vr/sensord/sensord_extension.cpp
deleted file mode 100644
index 6cd7db3..0000000
--- a/services/vr/sensord/sensord_extension.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "sensord_extension.h"
-
-void android::dvr::SensordExtension::run() {
-}
diff --git a/services/vr/sensord/sensord_extension.h b/services/vr/sensord/sensord_extension.h
deleted file mode 100644
index e553eed..0000000
--- a/services/vr/sensord/sensord_extension.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef ANDROID_DVR_SENSORD_EXTENSION_H_
-#define ANDROID_DVR_SENSORD_EXTENSION_H_
-
-namespace android {
-namespace dvr {
-
-// Allows sensord to be extended with additional code.
-class SensordExtension {
- public:
- static void run();
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // ANDROID_DVR_SENSORD_EXTENSION_H_
diff --git a/services/vr/sensord/test/poselatencytest.cpp b/services/vr/sensord/test/poselatencytest.cpp
deleted file mode 100644
index 615fc75..0000000
--- a/services/vr/sensord/test/poselatencytest.cpp
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <dvr/pose_client.h>
-#include <inttypes.h>
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-#include <vector>
-
-// Creates a pose client and polls 30x for new data. Prints timestamp and
-// latency. Latency is calculated based on the difference between the
-// current clock and the timestamp from the Myriad, which has been synced
-// to QC time. Note that there is some clock drift and clocks are only sycned
-// when the FW is loaded.
-int main(int /*argc*/, char** /*argv*/) {
- DvrPose* pose_client = dvrPoseCreate();
- if (pose_client == nullptr) {
- printf("Unable to create pose client\n");
- return -1;
- }
-
- DvrPoseAsync last_state;
- DvrPoseAsync current_state;
- last_state.timestamp_ns = 0;
- current_state.timestamp_ns = 0;
-
- double avg_latency = 0;
- double min_latency = (float)UINT64_MAX;
- double max_latency = 0;
- double std = 0;
- std::vector<uint64_t> latency;
-
- int num_samples = 100;
- for (int i = 0; i < num_samples; ++i) {
- while (last_state.timestamp_ns == current_state.timestamp_ns) {
- uint32_t vsync_count = dvrPoseGetVsyncCount(pose_client);
- int err = dvrPoseGet(pose_client, vsync_count, ¤t_state);
- if (err) {
- printf("Error polling pose: %d\n", err);
- dvrPoseDestroy(pose_client);
- return err;
- }
- }
- struct timespec timespec;
- uint64_t timestamp, diff;
- clock_gettime(CLOCK_MONOTONIC, ×pec);
- timestamp =
- ((uint64_t)timespec.tv_sec * 1000000000) + (uint64_t)timespec.tv_nsec;
- if (timestamp < current_state.timestamp_ns) {
- printf("ERROR: excessive clock drift detected, reload FW to resync\n");
- return -1;
- }
- diff = timestamp - current_state.timestamp_ns;
- printf("%02d) ts = %" PRIu64 " time = %" PRIu64 "\n", i + 1,
- current_state.timestamp_ns, timestamp);
- printf("\tlatency: %" PRIu64 " ns (%" PRIu64 " us) (%" PRIu64 " ms)\n",
- diff, diff / 1000, diff / 1000000);
-
- avg_latency += diff;
- if (diff < min_latency) {
- min_latency = diff;
- }
- if (diff > max_latency) {
- max_latency = diff;
- }
- latency.push_back(diff);
-
- last_state = current_state;
- }
- avg_latency /= num_samples;
- for (unsigned int i = 0; i < latency.size(); i++) {
- std += pow(latency[i] - avg_latency, 2);
- }
- std /= latency.size();
- std = sqrt(std);
-
- printf("\n************************\n");
- printf("Avg latency = %lf ns (%lf us) (%lf ms)\n", avg_latency,
- avg_latency / 1000, avg_latency / 1000000);
- printf("Max latency = %lf ns (%lf us) (%lf ms)\n", max_latency,
- max_latency / 1000, max_latency / 1000000);
- printf("Min latency = %lf ns (%lf us) (%lf ms)\n", min_latency,
- min_latency / 1000, min_latency / 1000000);
- printf("Standard dev = %lf ns (%lf us) (%lf ms)\n", std, std / 1000,
- std / 1000000);
- printf("\n************************\n");
- return 0;
-}
diff --git a/services/vr/vr_window_manager/Android.bp b/services/vr/vr_window_manager/Android.bp
deleted file mode 100644
index d7ddba1..0000000
--- a/services/vr/vr_window_manager/Android.bp
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-native_src = [
- "application.cpp",
- "controller_mesh.cpp",
- "elbow_model.cpp",
- "hwc_callback.cpp",
- "reticle.cpp",
- "shell_view.cpp",
- "surface_flinger_view.cpp",
- "texture.cpp",
- "vr_window_manager.cpp",
- "vr_window_manager_binder.cpp",
- "aidl/android/service/vr/IVrWindowManager.aidl",
- "display_view.cpp",
-]
-
-static_libs = [
- "libdisplay",
- "libbufferhub",
- "libbufferhubqueue",
- "libeds",
- "libdvrgraphics",
- "libdvrcommon",
- "libhwcomposer-client",
- "libvrsensor",
- "libperformance",
- "libpdx_default_transport",
- "libcutils",
- "libvr_hwc-binder",
- "libvr_manager",
- "libvirtualtouchpadclient",
-]
-
-shared_libs = [
- "android.frameworks.vr.composer@1.0",
- "android.hardware.graphics.composer@2.1",
- "libbase",
- "libbinder",
- "libinput",
- "libhardware",
- "libhwbinder",
- "libsync",
- "libutils",
- "libgui",
- "libEGL",
- "libGLESv2",
- "libvulkan",
- "libsync",
- "libui",
- "libhidlbase",
- "libhidltransport",
- "liblog",
- "libvr_hwc-hal",
-]
-
-cc_binary {
- srcs: native_src,
- static_libs: static_libs,
- shared_libs: shared_libs,
- cflags: ["-DGL_GLEXT_PROTOTYPES", "-DEGL_EGLEXT_PROTOTYPES", "-DLOG_TAG=\"VrWindowManager\""],
- host_ldlibs: ["-llog"],
- name: "vr_wm",
- tags: ["optional"],
- init_rc: ["vr_wm.rc"],
-}
-
-cmd_src = [
- "vr_wm_ctl.cpp",
- "aidl/android/service/vr/IVrWindowManager.aidl",
-]
-
-staticLibs = ["libcutils"]
-
-sharedLibs = [
- "libbase",
- "libbinder",
- "libutils",
-]
-
-cc_binary {
- srcs: cmd_src,
- static_libs: staticLibs,
- shared_libs: sharedLibs,
- cppflags: ["-std=c++11"],
- cflags: ["-DLOG_TAG=\"vrwmctl\""],
- host_ldlibs: ["-llog"],
- name: "vr_wm_ctl",
- tags: ["optional"],
-}
diff --git a/services/vr/vr_window_manager/aidl/android/service/vr/IVrWindowManager.aidl b/services/vr/vr_window_manager/aidl/android/service/vr/IVrWindowManager.aidl
deleted file mode 100644
index b16049f..0000000
--- a/services/vr/vr_window_manager/aidl/android/service/vr/IVrWindowManager.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright (c) 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.service.vr;
-
-/** @hide */
-interface IVrWindowManager {
- const String SERVICE_NAME = "vr_window_manager";
- void connectController(in FileDescriptor fd) = 0;
- void disconnectController() = 1;
- void enterVrMode() = 2;
- void exitVrMode() = 3;
- void setDebugMode(int mode) = 4;
- void set2DMode(int mode) = 5;
- void setRotation(int angle) = 6;
-}
-
diff --git a/services/vr/vr_window_manager/application.cpp b/services/vr/vr_window_manager/application.cpp
deleted file mode 100644
index 8b4460a..0000000
--- a/services/vr/vr_window_manager/application.cpp
+++ /dev/null
@@ -1,330 +0,0 @@
-#include "application.h"
-
-#include <inttypes.h>
-#include <EGL/egl.h>
-#include <GLES3/gl3.h>
-#include <binder/IServiceManager.h>
-#include <dvr/graphics.h>
-#include <dvr/performance_client_api.h>
-#include <dvr/pose_client.h>
-#include <gui/ISurfaceComposer.h>
-#include <hardware/hwcomposer_defs.h>
-#include <log/log.h>
-#include <private/dvr/graphics/vr_gl_extensions.h>
-
-#include <vector>
-
-namespace android {
-namespace dvr {
-
-Application::Application() {
- vr_mode_listener_ = new VrModeListener(this);
-}
-
-Application::~Application() {
- sp<IVrManager> vrManagerService = interface_cast<IVrManager>(
- defaultServiceManager()->getService(String16("vrmanager")));
- if (vrManagerService.get()) {
- vrManagerService->unregisterPersistentVrStateListener(vr_mode_listener_);
- }
-}
-
-int Application::Initialize() {
- dvrSetCpuPartition(0, "/application/performance");
-
- bool is_right_handed = true; // TODO: retrieve setting from system
- elbow_model_.Enable(ElbowModel::kDefaultNeckPosition, is_right_handed);
- last_frame_time_ = std::chrono::system_clock::now();
-
- sp<IVrManager> vrManagerService = interface_cast<IVrManager>(
- defaultServiceManager()->getService(String16("vrmanager")));
- if (vrManagerService.get()) {
- vrManagerService->registerPersistentVrStateListener(vr_mode_listener_);
- }
- return 0;
-}
-
-int Application::AllocateResources() {
- int surface_width = 0, surface_height = 0;
- DvrLensInfo lens_info = {};
- GLuint texture_id = 0;
- GLenum texture_target = 0;
- std::vector<DvrSurfaceParameter> surface_params = {
- DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
- DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
- DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &lens_info.inter_lens_meters),
- DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, &lens_info.left_fov),
- DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, &lens_info.right_fov),
- DVR_SURFACE_PARAMETER_OUT(SURFACE_TEXTURE_TARGET_TYPE, &texture_target),
- DVR_SURFACE_PARAMETER_OUT(SURFACE_TEXTURE_TARGET_ID, &texture_id),
- DVR_SURFACE_PARAMETER_IN(VISIBLE, 0),
- DVR_SURFACE_PARAMETER_IN(Z_ORDER, 1),
- DVR_SURFACE_PARAMETER_IN(GEOMETRY, DVR_SURFACE_GEOMETRY_SINGLE),
- DVR_SURFACE_PARAMETER_IN(ENABLE_LATE_LATCH, 0),
- DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, 0),
- DVR_SURFACE_PARAMETER_LIST_END,
- };
-
- int ret = dvrGraphicsContextCreate(surface_params.data(), &graphics_context_);
- if (ret)
- return ret;
-
- GLuint fbo = 0;
- GLuint depth_stencil_buffer = 0;
- GLuint samples = 1;
- glGenFramebuffers(1, &fbo);
- glBindFramebuffer(GL_FRAMEBUFFER, fbo);
- glFramebufferTexture2DMultisampleEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- texture_target, texture_id, 0, samples);
-
- glGenRenderbuffers(1, &depth_stencil_buffer);
- glBindRenderbuffer(GL_RENDERBUFFER, depth_stencil_buffer);
- glRenderbufferStorageMultisample(GL_RENDERBUFFER, samples,
- GL_DEPTH_COMPONENT24, surface_width,
- surface_height);
-
- glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
- GL_RENDERBUFFER, depth_stencil_buffer);
-
- ALOGI("Surface size=%dx%d", surface_width, surface_height);
- pose_client_ = dvrPoseCreate();
- if (!pose_client_)
- return 1;
-
- vec2i eye_size(surface_width / 2, surface_height);
-
- eye_viewport_[0] = Range2i::FromSize(vec2i(0, 0), eye_size);
- eye_viewport_[1] = Range2i::FromSize(vec2i(surface_width / 2, 0), eye_size);
-
- eye_from_head_[0] = Eigen::Translation3f(
- vec3(lens_info.inter_lens_meters * 0.5f, 0.0f, 0.0f));
- eye_from_head_[1] = Eigen::Translation3f(
- vec3(-lens_info.inter_lens_meters * 0.5f, 0.0f, 0.0f));
-
- fov_[0] = FieldOfView(lens_info.left_fov[0], lens_info.left_fov[1],
- lens_info.left_fov[2], lens_info.left_fov[3]);
- fov_[1] = FieldOfView(lens_info.right_fov[0], lens_info.right_fov[1],
- lens_info.right_fov[2], lens_info.right_fov[3]);
-
- return 0;
-}
-
-void Application::DeallocateResources() {
- if (graphics_context_)
- dvrGraphicsContextDestroy(graphics_context_);
- graphics_context_ = nullptr;
-
- if (pose_client_)
- dvrPoseDestroy(pose_client_);
-
- initialized_ = false;
-}
-
-void Application::ProcessTasks(const std::vector<MainThreadTask>& tasks) {
- for (auto task : tasks) {
- switch (task) {
- case MainThreadTask::EnableDebugMode:
- if (!debug_mode_) {
- debug_mode_ = true;
- SetVisibility(debug_mode_);
- }
- break;
- case MainThreadTask::DisableDebugMode:
- if (debug_mode_) {
- debug_mode_ = false;
- SetVisibility(debug_mode_);
- }
- break;
- case MainThreadTask::EnteringVrMode:
- if (!initialized_) {
- LOG_ALWAYS_FATAL_IF(AllocateResources(),
- "Failed to allocate resources");
- }
- break;
- case MainThreadTask::ExitingVrMode:
- if (initialized_)
- DeallocateResources();
- break;
- case MainThreadTask::Show:
- if (!is_visible_)
- SetVisibility(true);
- break;
- }
- }
-}
-
-void Application::DrawFrame() {
- // Thread should block if we are invisible or not fully initialized.
- std::unique_lock<std::mutex> lock(mutex_);
- wake_up_init_and_render_.wait(lock, [this]() {
- return (is_visible_ && initialized_) || !main_thread_tasks_.empty();
- });
-
- // Process main thread tasks if there are any.
- std::vector<MainThreadTask> tasks;
- tasks.swap(main_thread_tasks_);
- lock.unlock();
-
- if (!tasks.empty())
- ProcessTasks(tasks);
-
- if (!initialized_)
- return;
-
- // TODO(steventhomas): If we're not visible, block until we are. For now we
- // throttle by calling dvrGraphicsWaitNextFrame.
- DvrFrameSchedule schedule;
- int status = dvrGraphicsWaitNextFrame(graphics_context_, 0, &schedule);
- if (status < 0) {
- ALOGE("Context lost, deallocating graphics resources");
- SetVisibility(false);
- DeallocateResources();
- }
-
- OnDrawFrame();
-
- if (is_visible_) {
- ProcessControllerInput();
-
- DvrPoseAsync pose;
- dvrPoseGet(pose_client_, schedule.vsync_count, &pose);
- last_pose_ = Posef(
- quat(pose.orientation[3], pose.orientation[0], pose.orientation[1],
- pose.orientation[2]),
- vec3(pose.translation[0], pose.translation[1], pose.translation[2]));
-
- std::chrono::time_point<std::chrono::system_clock> now =
- std::chrono::system_clock::now();
- double delta =
- std::chrono::duration<double>(now - last_frame_time_).count();
- last_frame_time_ = now;
-
- if (delta > 1.0f)
- delta = 0.05f;
-
- fade_value_ += delta / 0.25f;
- if (fade_value_ > 1.0f)
- fade_value_ = 1.0f;
-
- controller_position_ =
- elbow_model_.Update(delta, last_pose_.GetRotation(),
- controller_orientation_, should_recenter_);
-
- dvrBeginRenderFrameEds(graphics_context_, pose.orientation,
- pose.translation);
-
- glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
-
- mat4 head_matrix = last_pose_.GetObjectFromReferenceMatrix();
- glViewport(eye_viewport_[kLeftEye].GetMinPoint()[0],
- eye_viewport_[kLeftEye].GetMinPoint()[1],
- eye_viewport_[kLeftEye].GetSize()[0],
- eye_viewport_[kLeftEye].GetSize()[1]);
- DrawEye(kLeftEye, fov_[kLeftEye].GetProjectionMatrix(0.1f, 500.0f),
- eye_from_head_[kLeftEye], head_matrix);
-
- glViewport(eye_viewport_[kRightEye].GetMinPoint()[0],
- eye_viewport_[kRightEye].GetMinPoint()[1],
- eye_viewport_[kRightEye].GetSize()[0],
- eye_viewport_[kRightEye].GetSize()[1]);
- DrawEye(kRightEye, fov_[kRightEye].GetProjectionMatrix(0.1f, 500.0f),
- eye_from_head_[kRightEye], head_matrix);
-
- OnEndFrame();
-
- dvrPresent(graphics_context_);
- should_recenter_ = false;
- }
-}
-
-void Application::ProcessControllerInput() {
- if (controller_data_provider_) {
- shmem_controller_active_ = false;
- const void* data = controller_data_provider_->LockControllerData();
- // TODO(kpschoedel): define wire format.
- if (data) {
- struct wire_format {
- uint32_t version;
- uint32_t timestamph;
- uint32_t timestampl;
- uint32_t quat_count;
- float q[4];
- uint32_t buttonsh;
- uint32_t buttonsl;
- } __attribute__((__aligned__(32)));
- const wire_format* wire_data = static_cast<const wire_format*>(data);
- static uint64_t last_timestamp = 0;
- if (wire_data->version == 1) {
- shmem_controller_active_ = true;
- uint64_t timestamp =
- (((uint64_t)wire_data->timestamph) << 32) | wire_data->timestampl;
- if (timestamp == last_timestamp) {
- static uint64_t last_logged_timestamp = 0;
- if (last_logged_timestamp != last_timestamp) {
- last_logged_timestamp = last_timestamp;
- ALOGI("Controller shmem stale T=0x%" PRIX64, last_timestamp);
- }
- } else {
- last_timestamp = timestamp;
- controller_orientation_ = quat(wire_data->q[3], wire_data->q[0],
- wire_data->q[1], wire_data->q[2]);
- shmem_controller_buttons_ =
- (((uint64_t)wire_data->buttonsh) << 32) | wire_data->buttonsl;
- }
- } else if (wire_data->version == 0xFEEDFACE) {
- static bool logged_init = false;
- if (!logged_init) {
- logged_init = true;
- ALOGI("Controller shmem waiting for data");
- }
- }
- }
- controller_data_provider_->UnlockControllerData();
- if (shmem_controller_active_) {
- ALOGV("Controller shmem orientation: %f %f %f %f",
- controller_orientation_.x(), controller_orientation_.y(),
- controller_orientation_.z(), controller_orientation_.w());
- if (shmem_controller_buttons_) {
- ALOGV("Controller shmem buttons: %017" PRIX64,
- shmem_controller_buttons_);
- }
- }
- }
-}
-
-void Application::SetVisibility(bool visible) {
- if (visible && !initialized_) {
- if (AllocateResources())
- ALOGE("Failed to allocate resources");
- }
-
- bool changed = is_visible_ != visible;
- if (changed) {
- is_visible_ = visible;
- dvrGraphicsSurfaceSetVisible(graphics_context_, is_visible_);
- OnVisibilityChanged(is_visible_);
- }
-}
-
-void Application::OnVisibilityChanged(bool visible) {
- if (visible) {
- fade_value_ = 0;
- // We have been sleeping so to ensure correct deltas, reset the time.
- last_frame_time_ = std::chrono::system_clock::now();
- }
-}
-
-void Application::QueueTask(MainThreadTask task) {
- std::unique_lock<std::mutex> lock(mutex_);
- main_thread_tasks_.push_back(task);
- wake_up_init_and_render_.notify_one();
-}
-
-void Application::VrModeListener::onPersistentVrStateChanged(bool enabled) {
- if (!enabled)
- app_->QueueTask(MainThreadTask::ExitingVrMode);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/application.h b/services/vr/vr_window_manager/application.h
deleted file mode 100644
index ed99157..0000000
--- a/services/vr/vr_window_manager/application.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_APPLICATION_H_
-#define VR_WINDOW_MANAGER_APPLICATION_H_
-
-#include <memory>
-#include <private/dvr/types.h>
-#include <stdint.h>
-#include <vr/vr_manager/vr_manager.h>
-
-#include <chrono>
-#include <mutex>
-#include <vector>
-
-#include "controller_data_provider.h"
-#include "elbow_model.h"
-
-struct DvrGraphicsContext;
-struct DvrPose;
-
-namespace android {
-namespace dvr {
-
-class Application {
- public:
- Application();
- virtual ~Application();
-
- virtual int Initialize();
-
- virtual int AllocateResources();
- virtual void DeallocateResources();
-
- void DrawFrame();
-
- void SetControllerDataProvider(ControllerDataProvider* provider) {
- controller_data_provider_ = provider;
- }
-
- protected:
- enum class MainThreadTask {
- EnteringVrMode,
- ExitingVrMode,
- EnableDebugMode,
- DisableDebugMode,
- Show,
- };
-
- class VrModeListener : public BnPersistentVrStateCallbacks {
- public:
- VrModeListener(Application *app) : app_(app) {}
- void onPersistentVrStateChanged(bool enabled) override;
-
- private:
- Application *app_;
- };
-
- sp<VrModeListener> vr_mode_listener_;
- virtual void OnDrawFrame() = 0;
- virtual void DrawEye(EyeType eye, const mat4& perspective,
- const mat4& eye_matrix, const mat4& head_matrix) = 0;
- virtual void OnEndFrame() = 0;
-
- void SetVisibility(bool visible);
- virtual void OnVisibilityChanged(bool visible);
-
- void ProcessControllerInput();
-
- void ProcessTasks(const std::vector<MainThreadTask>& tasks);
-
- void QueueTask(MainThreadTask task);
-
- DvrGraphicsContext* graphics_context_ = nullptr;
- DvrPose* pose_client_ = nullptr;
-
- Range2i eye_viewport_[2];
- mat4 eye_from_head_[2];
- FieldOfView fov_[2];
- Posef last_pose_;
-
- quat controller_orientation_;
- bool shmem_controller_active_ = false;
- uint64_t shmem_controller_buttons_;
-
- // Used to center the scene when the shell becomes visible.
- bool should_recenter_ = true;
-
- bool is_visible_ = false;
- std::chrono::time_point<std::chrono::system_clock> visibility_button_press_;
- bool debug_mode_ = false;
-
- std::chrono::time_point<std::chrono::system_clock> last_frame_time_;
- vec3 controller_position_;
- ElbowModel elbow_model_;
-
- float fade_value_ = 0;
-
- std::mutex mutex_;
- std::condition_variable wake_up_init_and_render_;
- bool initialized_ = false;
- std::vector<MainThreadTask> main_thread_tasks_;
-
- // Controller data provider from shared memory buffer.
- ControllerDataProvider* controller_data_provider_ = nullptr;
-
- Application(const Application&) = delete;
- void operator=(const Application&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_APPLICATION_H_
diff --git a/services/vr/vr_window_manager/controller_data_provider.h b/services/vr/vr_window_manager/controller_data_provider.h
deleted file mode 100644
index bc1450c..0000000
--- a/services/vr/vr_window_manager/controller_data_provider.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_CONTROLLER_DATA_PROVIDER_H_
-#define VR_WINDOW_MANAGER_CONTROLLER_DATA_PROVIDER_H_
-
-namespace android {
-namespace dvr {
-
-class ControllerDataProvider {
- public:
- virtual ~ControllerDataProvider() {}
- // Returns data pointer or nullptr. If pointer is valid, call to
- // UnlockControllerData is required.
- virtual const void* LockControllerData() = 0;
- virtual void UnlockControllerData() = 0;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_CONTROLLER_DATA_PROVIDER_H_
\ No newline at end of file
diff --git a/services/vr/vr_window_manager/controller_mesh.cpp b/services/vr/vr_window_manager/controller_mesh.cpp
deleted file mode 100644
index c6095b1..0000000
--- a/services/vr/vr_window_manager/controller_mesh.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-#include "controller_mesh.h"
-
-namespace android {
-namespace dvr {
-
-const int kNumControllerMeshVertices = 60;
-
-// Vertices in position.xyz, normal.xyz, uv.xy oder.
-// Generated from an .obj mesh.
-const float kControllerMeshVertices[] = {
- 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 0, 0,
- 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
- 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
- 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 0, 0,
- 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
- 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 0, 1,
- 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
- -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
- -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
- 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
- -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
- 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
- -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
- -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
- -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
- -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
- -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
- -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
- -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
- -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
- -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
- -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
- -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
- -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
- -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
- -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
- -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
- -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
- -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
- -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
- -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
- -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
- -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
- -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
- -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
- -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
- -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
- 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
- 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
- -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
- 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
- -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
- 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
- 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
- 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
- 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
- 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
- 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
- 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
- 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
- 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
- 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
- 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
- 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
- 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
- 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 1, 0,
- 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 1, 1,
- 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
- 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 1, 1,
- 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
-
-};
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/controller_mesh.h b/services/vr/vr_window_manager/controller_mesh.h
deleted file mode 100644
index 88872c7..0000000
--- a/services/vr/vr_window_manager/controller_mesh.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
-#define VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
-
-namespace android {
-namespace dvr {
-
-extern const int kNumControllerMeshVertices;
-extern const float kControllerMeshVertices[];
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
diff --git a/services/vr/vr_window_manager/display_view.cpp b/services/vr/vr_window_manager/display_view.cpp
deleted file mode 100644
index 88768a0..0000000
--- a/services/vr/vr_window_manager/display_view.cpp
+++ /dev/null
@@ -1,458 +0,0 @@
-#include "display_view.h"
-
-#include "texture.h"
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-constexpr float kLayerScaleFactor = 3.0f;
-constexpr unsigned int kMaximumPendingFrames = 8;
-constexpr uint32_t kSystemId = 1000;
-
-// clang-format off
-const GLfloat kVertices[] = {
- -1, -1, 0,
- 1, -1, 0,
- -1, 1, 0,
- 1, 1, 0,
-};
-
-const GLfloat kTextureVertices[] = {
- 0, 1,
- 1, 1,
- 0, 0,
- 1, 0,
-};
-// clang-format on
-
-// Returns true if the given point is inside the given rect.
-bool IsInside(const vec2& pt, const vec2& tl, const vec2& br) {
- return pt.x() >= tl.x() && pt.x() <= br.x() && pt.y() >= tl.y() &&
- pt.y() <= br.y();
-}
-
-mat4 GetScalingMatrix(float width, float height) {
- float xscale = 1, yscale = 1;
- float ar = width / height;
- if (ar > 1)
- yscale = 1.0 / ar;
- else
- xscale = ar;
-
- xscale *= kLayerScaleFactor;
- yscale *= kLayerScaleFactor;
-
- return mat4(Eigen::Scaling<float>(xscale, yscale, 1.0));
-}
-
-// Helper function that applies the crop transform to the texture layer and
-// positions (and scales) the texture layer in the appropriate location in the
-// display space.
-mat4 GetLayerTransform(const TextureLayer& texture_layer, float display_width,
- float display_height) {
- // Map from vertex coordinates to [0, 1] coordinates:
- // 1) Flip y since in vertex coordinates (-1, -1) is at the bottom left and
- // in texture coordinates (0, 0) is at the top left.
- // 2) Translate by (1, 1) to map vertex coordinates to [0, 2] on x and y.
- // 3) Scale by 1 / 2 to map coordinates to [0, 1] on x and y.
- mat4 unit_space(Eigen::AlignedScaling3f(0.5f, 0.5f, 1.0f) *
- Eigen::Translation3f(1.0f, 1.0f, 0.0f) *
- Eigen::AlignedScaling3f(1.0f, -1.0f, 1.0f));
-
- mat4 texture_space(Eigen::AlignedScaling3f(
- texture_layer.texture->width(), texture_layer.texture->height(), 1.0f));
-
- // 1) Translate the layer to crop the left and top edge.
- // 2) Scale the layer such that the cropped right and bottom edges map outside
- // the exture region.
- float crop_width = texture_layer.crop.right - texture_layer.crop.left;
- float crop_height = texture_layer.crop.bottom - texture_layer.crop.top;
- mat4 texture_crop(Eigen::AlignedScaling3f(
- texture_layer.texture->width() / crop_width,
- texture_layer.texture->height() / crop_height, 1.0f) *
- Eigen::Translation3f(-texture_layer.crop.left,
- -texture_layer.crop.top, 0.0f));
-
- mat4 display_space(
- Eigen::AlignedScaling3f(display_width, display_height, 1.0f));
-
- // 1) Scale the texture to fit the display frame.
- // 2) Translate the texture in the display frame location.
- float display_frame_width =
- texture_layer.display_frame.right - texture_layer.display_frame.left;
- float display_frame_height =
- texture_layer.display_frame.bottom - texture_layer.display_frame.top;
- mat4 display_frame(
- Eigen::Translation3f(texture_layer.display_frame.left,
- texture_layer.display_frame.top, 0.0f) *
- Eigen::AlignedScaling3f(display_frame_width / display_width,
- display_frame_height / display_height, 1.0f));
-
- mat4 layer_transform = unit_space.inverse() * display_space.inverse() *
- display_frame * display_space *
- texture_space.inverse() * texture_crop *
- texture_space * unit_space;
- return layer_transform;
-}
-
-// Determine if ths frame should be shown or hidden.
-ViewMode CalculateVisibilityFromLayerConfig(const HwcCallback::Frame& frame,
- uint32_t* appid) {
- auto& layers = frame.layers();
-
- size_t index;
- // Skip all layers that we don't know about.
- for (index = 0; index < layers.size(); index++) {
- if (layers[index].type != 0xFFFFFFFF && layers[index].type != 0)
- break;
- }
-
- if (index == layers.size())
- return ViewMode::Hidden;
-
- if (layers[index].type != 1) {
- // We don't have a VR app layer? Abort.
- return ViewMode::Hidden;
- }
-
- if (layers[index].appid != *appid) {
- *appid = layers[index].appid;
- return ViewMode::App;
- }
-
- // This is the VR app, ignore it.
- index++;
-
- // Now, find a dim layer if it exists.
- // If it does, ignore any layers behind it for visibility determination.
- for (size_t i = index; i < layers.size(); i++) {
- if (layers[i].appid == HwcCallback::HwcLayer::kSurfaceFlingerLayer) {
- index = i + 1;
- }
- }
-
- // If any non-skipped layers exist now then we show, otherwise hide.
- for (size_t i = index; i < layers.size(); i++) {
- if (!layers[i].should_skip_layer())
- return ViewMode::VR;
- }
-
- return ViewMode::Hidden;
-}
-
-} // namespace
-
-DisplayView::DisplayView(uint32_t id, int touchpad_id)
- : id_(id), touchpad_id_(touchpad_id) {
- translate_ = Eigen::Translation3f(0, 0, -5.0f);
- ime_translate_ = mat4(Eigen::Translation3f(0.0f, -0.5f, 0.25f));
- ime_top_left_ = vec2(0, 0);
- ime_size_ = vec2(0, 0);
- rotation_ = mat4::Identity();
-}
-
-DisplayView::~DisplayView() {}
-
-void DisplayView::Recenter(const mat4& initial) {
- initial_head_matrix_ =
- initial * Eigen::AngleAxisf(M_PI * 0.5f, vec3::UnitZ());
-}
-
-void DisplayView::SetPrograms(ShaderProgram* program,
- ShaderProgram* overlay_program) {
- program_ = program;
- overlay_program_ = overlay_program;
-}
-
-void DisplayView::DrawEye(EyeType /* eye */, const mat4& perspective,
- const mat4& eye_matrix, const mat4& head_matrix,
- float fade_value) {
- scale_ = GetScalingMatrix(size_.x(), size_.y());
-
- DrawOverlays(perspective, eye_matrix, head_matrix, fade_value);
-}
-
-void DisplayView::AdvanceFrame() {
- if (!pending_frames_.empty()) {
- // Check if we should advance the frame.
- auto& frame = pending_frames_.front();
- if (frame.visibility == ViewMode::Hidden ||
- frame.frame->Finish() == HwcCallback::FrameStatus::kFinished) {
- current_frame_ = std::move(frame);
- pending_frames_.pop_front();
- }
- }
-}
-
-void DisplayView::OnDrawFrame(SurfaceFlingerView* surface_flinger_view,
- bool debug_mode) {
- textures_.clear();
- has_ime_ = false;
-
- if (!visible())
- return;
-
- surface_flinger_view->GetTextures(*current_frame_.frame.get(), &textures_,
- &ime_texture_, debug_mode,
- current_frame_.visibility == ViewMode::VR);
- has_ime_ = ime_texture_.texture != nullptr;
-}
-
-base::unique_fd DisplayView::OnFrame(std::unique_ptr<HwcCallback::Frame> frame,
- bool debug_mode, bool is_vr_active,
- bool* showing) {
- size_ = vec2(frame->display_width(), frame->display_height());
- uint32_t app = current_vr_app_;
- ViewMode visibility = CalculateVisibilityFromLayerConfig(*frame.get(), &app);
-
- if (visibility == ViewMode::Hidden && debug_mode)
- visibility = ViewMode::VR;
-
- if (frame->layers().empty()) {
- current_vr_app_ = 0;
- } else if (visibility == ViewMode::App) {
- // This is either a VR app switch or a 2D app launching.
- // If we can have VR apps, update if it's 0.
- if (!always_2d_ && is_vr_active && !use_2dmode_ && app != kSystemId) {
- visibility = ViewMode::Hidden;
- current_vr_app_ = app;
- }
- } else if ((use_2dmode_ || !is_vr_active) && app != 0 &&
- visibility == ViewMode::Hidden) {
- // This is the case for the VR app launching a 2D intent of itself on some
- // display.
- visibility = ViewMode::App;
- } else if (!current_vr_app_) {
- // The VR app is running.
- current_vr_app_ = app;
- }
-
- pending_frames_.emplace_back(std::move(frame), visibility);
-
- if (pending_frames_.size() > kMaximumPendingFrames) {
- pending_frames_.pop_front();
- }
-
- if (visibility == ViewMode::Hidden &&
- current_frame_.visibility == ViewMode::Hidden) {
- // Consume all frames while hidden.
- while (!pending_frames_.empty())
- AdvanceFrame();
- }
-
- // If we are showing ourselves the main thread is not processing anything,
- // so give it a kick.
- if (visibility != ViewMode::Hidden &&
- current_frame_.visibility == ViewMode::Hidden) {
- *showing = true;
- }
-
- return base::unique_fd(dup(release_fence_.get()));
-}
-
-bool DisplayView::IsHit(const vec3& view_location, const vec3& view_direction,
- vec3* hit_location, vec2* hit_location_in_window_coord,
- bool test_ime) {
- mat4 m = GetStandardTransform();
- if (test_ime)
- m = m * ime_translate_;
- mat4 inverse = (m * scale_).inverse();
- vec4 transformed_loc =
- inverse * vec4(view_location[0], view_location[1], view_location[2], 1);
- vec4 transformed_dir = inverse * vec4(view_direction[0], view_direction[1],
- view_direction[2], 0);
-
- if (transformed_dir.z() >= 0 || transformed_loc.z() <= 0)
- return false;
-
- float distance = -transformed_loc.z() / transformed_dir.z();
- vec4 transformed_hit_loc = transformed_loc + transformed_dir * distance;
- if (transformed_hit_loc.x() < -1 || transformed_hit_loc.x() > 1)
- return false;
- if (transformed_hit_loc.y() < -1 || transformed_hit_loc.y() > 1)
- return false;
-
- hit_location_in_window_coord->x() =
- (1 + transformed_hit_loc.x()) / 2 * size_.x();
- hit_location_in_window_coord->y() =
- (1 - transformed_hit_loc.y()) / 2 * size_.y();
-
- *hit_location = view_location + view_direction * distance;
- return true;
-}
-
-void DisplayView::DrawOverlays(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix, float fade_value) {
- if (textures_.empty())
- return;
-
- program_->Use();
- mat4 mvp = perspective * eye_matrix * head_matrix;
- GLint view_projection_location =
- glGetUniformLocation(program_->GetProgram(), "uViewProjection");
- glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
-
- GLint alpha_location = glGetUniformLocation(program_->GetProgram(), "uAlpha");
-
- GLint tex_location = glGetUniformLocation(program_->GetProgram(), "tex");
- glUniform1i(tex_location, 0);
- glActiveTexture(GL_TEXTURE0);
-
- for (const auto& texture_layer : textures_) {
- switch (texture_layer.blending) {
- case HWC2_BLEND_MODE_PREMULTIPLIED:
- glEnable(GL_BLEND);
- glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
- break;
- case HWC2_BLEND_MODE_COVERAGE:
- glEnable(GL_BLEND);
- glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
- break;
- default:
- break;
- }
-
- glUniform1f(alpha_location, fade_value * texture_layer.alpha);
-
- glBindTexture(GL_TEXTURE_2D, texture_layer.texture->id());
-
- mat4 layer_transform =
- GetLayerTransform(texture_layer, size_.x(), size_.y());
-
- mat4 transform = GetStandardTransform() * scale_ * layer_transform;
- DrawWithTransform(transform, *program_);
-
- glDisable(GL_BLEND);
- }
-
- if (has_ime_) {
- ime_top_left_ = vec2(static_cast<float>(ime_texture_.display_frame.left),
- static_cast<float>(ime_texture_.display_frame.top));
- ime_size_ = vec2(static_cast<float>(ime_texture_.display_frame.right -
- ime_texture_.display_frame.left),
- static_cast<float>(ime_texture_.display_frame.bottom -
- ime_texture_.display_frame.top));
-
- DrawDimOverlay(mvp, textures_[0], ime_top_left_, ime_top_left_ + ime_size_);
-
- DrawIme();
- }
-}
-
-void DisplayView::UpdateReleaseFence() {
- EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
- EGLSyncKHR sync =
- eglCreateSyncKHR(display, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
- if (sync != EGL_NO_SYNC_KHR) {
- // Need to flush in order to get the fence FD.
- glFlush();
- base::unique_fd fence(eglDupNativeFenceFDANDROID(display, sync));
- eglDestroySyncKHR(display, sync);
- release_fence_ = std::move(fence);
- } else {
- ALOGE("Failed to create sync fence");
- release_fence_ = base::unique_fd();
- }
-}
-
-mat4 DisplayView::GetStandardTransform() {
- mat4 m = initial_head_matrix_ * rotation_ * translate_;
- if (current_frame_.visibility == ViewMode::App)
- m *= Eigen::AngleAxisf(M_PI * -0.5f, vec3::UnitZ());
- return m;
-}
-
-void DisplayView::DrawIme() {
- program_->Use();
- glBindTexture(GL_TEXTURE_2D, ime_texture_.texture->id());
-
- mat4 layer_transform = GetLayerTransform(ime_texture_, size_.x(), size_.y());
-
- mat4 transform =
- GetStandardTransform() * ime_translate_ * scale_ * layer_transform;
-
- DrawWithTransform(transform, *program_);
-}
-
-void DisplayView::DrawDimOverlay(const mat4& mvp, const TextureLayer& layer,
- const vec2& top_left,
- const vec2& bottom_right) {
- overlay_program_->Use();
- glUniformMatrix4fv(
- glGetUniformLocation(overlay_program_->GetProgram(), "uViewProjection"),
- 1, 0, mvp.data());
- glUniform4f(glGetUniformLocation(overlay_program_->GetProgram(), "uCoords"),
- top_left.x() / size_.x(), top_left.y() / size_.y(),
- bottom_right.x() / size_.x(), bottom_right.y() / size_.y());
- glEnable(GL_BLEND);
- glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
- mat4 layer_transform = GetLayerTransform(layer, size_.x(), size_.y());
-
- mat4 transform = GetStandardTransform() * scale_ * layer_transform;
- DrawWithTransform(transform, *overlay_program_);
- glDisable(GL_BLEND);
-}
-
-void DisplayView::DrawWithTransform(const mat4& transform,
- const ShaderProgram& program) {
- GLint transform_location =
- glGetUniformLocation(program.GetProgram(), "uTransform");
- glUniformMatrix4fv(transform_location, 1, 0, transform.data());
-
- glEnableVertexAttribArray(0);
- glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, kVertices);
- glEnableVertexAttribArray(1);
- glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, kTextureVertices);
- glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
-}
-
-bool DisplayView::UpdateHitInfo(const vec3& view_location,
- const vec3& view_direction,
- vec3* hit_location) {
- bool is_hit = false;
- if (has_ime_) {
- // This will set allow_input_ and hit_location_in_window_coord_.
- is_hit = IsImeHit(view_location, view_direction, hit_location);
- } else {
- is_hit = IsHit(view_location, view_direction, hit_location,
- &hit_location_in_window_coord_, false);
- allow_input_ = is_hit;
- }
- return is_hit;
-}
-
-bool DisplayView::IsImeHit(const vec3& view_location,
- const vec3& view_direction, vec3* hit_location) {
- // First, check if the IME window is hit.
- bool is_hit = IsHit(view_location, view_direction, hit_location,
- &hit_location_in_window_coord_, true);
- if (is_hit) {
- // If it is, check if the window coordinate is in the IME region;
- // if so then we are done.
- if (IsInside(hit_location_in_window_coord_, ime_top_left_,
- ime_top_left_ + ime_size_)) {
- allow_input_ = true;
- return true;
- }
- }
-
- allow_input_ = false;
- // Check if we have hit the main window.
- is_hit = IsHit(view_location, view_direction, hit_location,
- &hit_location_in_window_coord_, false);
- if (is_hit) {
- // Only allow input if we are not hitting the region hidden by the IME.
- // Allowing input here would cause clicks on the main window to actually
- // be clicks on the IME.
- if (!IsInside(hit_location_in_window_coord_, ime_top_left_,
- ime_top_left_ + ime_size_)) {
- allow_input_ = true;
- }
- }
- return is_hit;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/display_view.h b/services/vr/vr_window_manager/display_view.h
deleted file mode 100644
index ad624c6..0000000
--- a/services/vr/vr_window_manager/display_view.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_DISPLAY_VIEW_H_
-#define VR_WINDOW_MANAGER_DISPLAY_VIEW_H_
-
-#include <private/dvr/graphics/mesh.h>
-#include <private/dvr/graphics/shader_program.h>
-
-#include "hwc_callback.h"
-#include "surface_flinger_view.h"
-
-namespace android {
-namespace dvr {
-
-enum class ViewMode {
- Hidden,
- VR,
- App,
-};
-
-class DisplayView {
- public:
- DisplayView(uint32_t id, int touchpad_id);
- ~DisplayView();
-
- // Calls to these 3 functions must be synchronized.
- base::unique_fd OnFrame(std::unique_ptr<HwcCallback::Frame> frame,
- bool debug_mode, bool is_vr_active, bool* showing);
- void AdvanceFrame();
- void UpdateReleaseFence();
-
- void OnDrawFrame(SurfaceFlingerView* surface_flinger_view, bool debug_mode);
- void DrawEye(EyeType eye, const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix, float fade_value);
-
- void Recenter(const mat4& initial);
-
- bool UpdateHitInfo(const vec3& view_location, const vec3& view_direction,
- vec3* hit_location);
-
- void SetPrograms(ShaderProgram* program, ShaderProgram* overlay_program);
-
- bool visible() const { return current_frame_.visibility != ViewMode::Hidden; }
- bool allow_input() const { return allow_input_; }
- const vec2& hit_location() const { return hit_location_in_window_coord_; }
- uint32_t id() const { return id_; }
- int touchpad_id() const { return touchpad_id_; }
- vec2 size() const { return size_; }
-
- void set_2dmode(bool mode) { use_2dmode_ = mode; }
- void set_always_2d(bool mode) { always_2d_ = mode; }
-
- void set_rotation(const mat4& rotation) { rotation_ = rotation; }
-
- private:
- bool IsHit(const vec3& view_location, const vec3& view_direction,
- vec3* hit_location, vec2* hit_location_in_window_coord,
- bool test_ime);
- bool IsImeHit(const vec3& view_location, const vec3& view_direction,
- vec3* hit_location);
- void DrawOverlays(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix, float fade_value);
- void DrawIme();
- void DrawDimOverlay(const mat4& mvp, const TextureLayer& layer,
- const vec2& top_left, const vec2& bottom_right);
- void DrawWithTransform(const mat4& transform, const ShaderProgram& program);
-
- // This is the rotated, translated but unscaled transform to apply everywhere.
- mat4 GetStandardTransform();
-
- uint32_t id_;
- int touchpad_id_;
-
- uint32_t current_vr_app_;
-
- ShaderProgram* program_;
- ShaderProgram* overlay_program_;
-
- mat4 initial_head_matrix_;
- mat4 scale_;
- mat4 translate_;
- mat4 ime_translate_;
- mat4 rotation_;
- vec2 size_;
-
- std::vector<TextureLayer> textures_;
- TextureLayer ime_texture_;
-
- bool allow_input_ = false;
- vec2 hit_location_in_window_coord_;
- vec2 ime_top_left_;
- vec2 ime_size_;
- bool has_ime_ = false;
- bool use_2dmode_ = false;
- bool always_2d_ = false;
-
- struct PendingFrame {
- PendingFrame() = default;
- PendingFrame(std::unique_ptr<HwcCallback::Frame>&& frame,
- ViewMode visibility)
- : frame(std::move(frame)), visibility(visibility) {}
- PendingFrame(PendingFrame&& r)
- : frame(std::move(r.frame)), visibility(r.visibility) {}
-
- void operator=(PendingFrame&& r) {
- frame.reset(r.frame.release());
- visibility = r.visibility;
- }
-
- std::unique_ptr<HwcCallback::Frame> frame;
- ViewMode visibility = ViewMode::Hidden;
- };
- std::deque<PendingFrame> pending_frames_;
- PendingFrame current_frame_;
- base::unique_fd release_fence_;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_DISPLAY_VIEW_H_
diff --git a/services/vr/vr_window_manager/elbow_model.cpp b/services/vr/vr_window_manager/elbow_model.cpp
deleted file mode 100644
index 9543f17..0000000
--- a/services/vr/vr_window_manager/elbow_model.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-#include "elbow_model.h"
-
-#include <log/log.h>
-
-namespace android {
-namespace dvr {
-namespace {
-
-const vec3 kControllerForearm(0.0f, 0.0f, -0.25f);
-const vec3 kControllerPosition(0.0f, 0.0f, -0.05f);
-const vec3 kLeftElbowPosition(-0.195f, -0.5f, 0.075f);
-const vec3 kLeftArmExtension(0.13f, 0.14f, -0.08f);
-const vec3 kRightElbowPosition(0.195f, -0.5f, 0.075f);
-const vec3 kRightArmExtension(-0.13f, 0.14f, -0.08f);
-constexpr float kElbowBendRatio = 0.4f;
-constexpr float kCosMaxExtensionAngle =
- 0.87f; // Cos of 30 degrees (90-30 = 60)
-constexpr float kCosMinExtensionAngle = 0.12f; // Cos of 83 degrees (90-83 = 7)
-constexpr float kYAxisExtensionFraction = 0.4f;
-constexpr float kMinRotationSpeed = 0.61f; // 35 degrees in radians
-constexpr float kMinAngleDelta = 0.175f; // 10 degrees in radians
-
-float clamp(float v, float min, float max) {
- if (v < min)
- return min;
- if (v > max)
- return max;
- return v;
-}
-
-float NormalizeAngle(float angle) {
- if (angle > M_PI)
- angle = 2.0f * M_PI - angle;
- return angle;
-}
-
-} // namespace
-
-const vec3 ElbowModel::kDefaultNeckPosition = vec3(0, -0.075f, -0.080f);
-
-ElbowModel::ElbowModel() {}
-ElbowModel::~ElbowModel() {}
-
-void ElbowModel::Enable(const vec3& neck_position, bool right_handed) {
- enabled_ = true;
- neck_position_ = neck_position;
-
- if (right_handed) {
- elbow_position_ = kRightElbowPosition;
- arm_extension_ = kRightArmExtension;
- } else {
- elbow_position_ = kLeftElbowPosition;
- arm_extension_ = kLeftArmExtension;
- }
-
- ResetRoot();
-}
-
-void ElbowModel::Disable() { enabled_ = false; }
-
-vec3 ElbowModel::Update(float delta_t, const quat& hmd_orientation,
- const quat& controller_orientation, bool recenter) {
- if (!enabled_)
- return vec3::Zero();
-
- float heading_rad = GetHeading(hmd_orientation);
-
- quat y_rotation;
- y_rotation = Eigen::AngleAxis<float>(heading_rad, vec3::UnitY());
-
- // If the controller's angular velocity is above a certain amount, we can
- // assume torso rotation and move the elbow joint relative to the
- // camera orientation.
- float angle_delta = last_controller_.angularDistance(controller_orientation);
- float rot_speed = angle_delta / delta_t;
-
- if (recenter) {
- root_rot_ = y_rotation;
- } else if (rot_speed > kMinRotationSpeed) {
- root_rot_.slerp(angle_delta / kMinAngleDelta, y_rotation);
- }
-
- // Calculate angle (or really, cos thereof) between controller forward vector
- // and Y axis to determine extension amount.
- vec3 controller_forward_rotated = controller_orientation * -vec3::UnitZ();
- float dot_y = controller_forward_rotated.y();
- float amt_extension = clamp(dot_y - kCosMinExtensionAngle, 0, 1);
-
- // Remove the root rotation from the orientation reading--we'll add it back in
- // later.
- quat controller_rot = root_rot_.inverse() * controller_orientation;
- controller_forward_rotated = controller_rot * -vec3::UnitZ();
- quat rot_xy;
- rot_xy.setFromTwoVectors(-vec3::UnitZ(), controller_forward_rotated);
-
- // Fixing polar singularity
- float total_angle = NormalizeAngle(atan2f(rot_xy.norm(), rot_xy.w()) * 2.0f);
- float lerp_amount = (1.0f - powf(total_angle / M_PI, 6.0f)) *
- (1.0f - (kElbowBendRatio +
- (1.0f - kElbowBendRatio) *
- (amt_extension + kYAxisExtensionFraction)));
-
- // Calculate the relative rotations of the elbow and wrist joints.
- quat wrist_rot = quat::Identity();
- wrist_rot.slerp(lerp_amount, rot_xy);
- quat elbow_rot = wrist_rot.inverse() * rot_xy;
-
- last_controller_ = controller_orientation;
-
- vec3 position =
- root_rot_ *
- ((controller_root_offset_ + arm_extension_ * amt_extension) +
- elbow_rot * (kControllerForearm + wrist_rot * kControllerPosition));
-
- return position;
-}
-
-float ElbowModel::GetHeading(const quat& orientation) {
- vec3 gaze = orientation * -vec3::UnitZ();
-
- if (gaze.y() > 0.99)
- gaze = orientation * -vec3::UnitY();
- else if (gaze.y() < -0.99)
- gaze = orientation * vec3::UnitY();
-
- return atan2f(-gaze.x(), -gaze.z());
-}
-
-void ElbowModel::ResetRoot() {
- controller_root_offset_ = elbow_position_ + neck_position_;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/elbow_model.h b/services/vr/vr_window_manager/elbow_model.h
deleted file mode 100644
index a6d5ca9..0000000
--- a/services/vr/vr_window_manager/elbow_model.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_ELBOW_MODEL_H_
-#define VR_WINDOW_MANAGER_ELBOW_MODEL_H_
-
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-class ElbowModel {
- public:
- ElbowModel();
- ~ElbowModel();
-
- void Enable(const vec3& neck_position, bool right_handed);
- void Disable();
-
- vec3 Update(float delta_t, const quat& hmd_orientation,
- const quat& controller_orientation, bool recenter);
-
- static const vec3 kDefaultNeckPosition;
-
- private:
- ElbowModel(const ElbowModel&) = delete;
- void operator=(const ElbowModel&) = delete;
-
- void ResetRoot();
-
- float GetHeading(const quat& orientation);
-
- bool enabled_ = false;
-
- quat last_controller_ = quat::Identity();
-
- quat root_rot_ = quat::Identity();
-
- vec3 controller_root_offset_ = vec3::Zero();
- vec3 elbow_position_ = vec3::Zero();
- vec3 arm_extension_ = vec3::Zero();
- vec3 neck_position_ = vec3::Zero();
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_ELBOW_MODEL_H_
diff --git a/services/vr/vr_window_manager/hwc_callback.cpp b/services/vr/vr_window_manager/hwc_callback.cpp
deleted file mode 100644
index 28e97ff..0000000
--- a/services/vr/vr_window_manager/hwc_callback.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-#include "hwc_callback.h"
-
-#include <android-base/unique_fd.h>
-#include <log/log.h>
-#include <private/dvr/native_buffer.h>
-#include <sync/sync.h>
-#include <ui/GraphicBufferMapper.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-HwcCallback::FrameStatus GetFrameStatus(const HwcCallback::Frame& frame) {
- for (const auto& layer : frame.layers()) {
- // If there is no fence it means the buffer is already finished.
- if (layer.fence->isValid()) {
- status_t result = layer.fence->wait(0);
- if (result != OK) {
- if (result != -ETIME) {
- ALOGE("fence wait on buffer fence failed. status=%d (%s).",
- result, strerror(-result));
- return HwcCallback::FrameStatus::kError;
- }
- return HwcCallback::FrameStatus::kUnfinished;
- }
- }
- }
-
- return HwcCallback::FrameStatus::kFinished;
-}
-
-} // namespace
-
-void HwcCallback::HwcLayer::PrintLayer() {
- ALOGI("appid=%d, type=%d, alpha=%.2f, cursor=%dx%d, color=%02X%02X%02X%02X, "
- "crop=%.1f,%.1f,%.1f,%.1f, display=%d,%d,%d,%d, dataspace=%d, "
- "transform=%d", appid, type, alpha, cursor_x, cursor_y, color.r, color.g,
- color.b, color.a, crop.left, crop.top, crop.right, crop.bottom,
- display_frame.left, display_frame.right, display_frame.top,
- display_frame.bottom, dataspace, transform);
-}
-
-HwcCallback::HwcCallback(Client* client) : client_(client) {
-}
-
-HwcCallback::~HwcCallback() {
-}
-
-binder::Status HwcCallback::onNewFrame(
- const ParcelableComposerFrame& parcelable_frame,
- ParcelableUniqueFd* fence) {
- ComposerView::Frame frame = parcelable_frame.frame();
- std::vector<HwcLayer> hwc_frame(frame.layers.size());
- for (size_t i = 0; i < frame.layers.size(); ++i) {
- const ComposerView::ComposerLayer& layer = frame.layers[i];
- hwc_frame[i] = HwcLayer{
- .fence = layer.fence,
- .buffer = layer.buffer,
- .crop = layer.crop,
- .display_frame = layer.display_frame,
- .blending = static_cast<int32_t>(layer.blend_mode),
- .appid = layer.app_id,
- .type = static_cast<HwcLayer::LayerType>(layer.type),
- .alpha = layer.alpha,
- .cursor_x = layer.cursor_x,
- .cursor_y = layer.cursor_y,
- .color = layer.color,
- .dataspace = layer.dataspace,
- .transform = layer.transform,
- };
- }
-
- fence->set_fence(client_->OnFrame(std::make_unique<Frame>(
- std::move(hwc_frame), frame.display_id, frame.removed,
- frame.display_width, frame.display_height)));
- return binder::Status::ok();
-}
-
-HwcCallback::Frame::Frame(std::vector<HwcLayer>&& layers, uint32_t display_id,
- bool removed, int32_t display_width,
- int32_t display_height)
- : display_id_(display_id),
- removed_(removed),
- display_width_(display_width),
- display_height_(display_height),
- layers_(std::move(layers)) {}
-
-HwcCallback::FrameStatus HwcCallback::Frame::Finish() {
- if (status_ == FrameStatus::kUnfinished)
- status_ = GetFrameStatus(*this);
- return status_;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/hwc_callback.h b/services/vr/vr_window_manager/hwc_callback.h
deleted file mode 100644
index 259c4ac..0000000
--- a/services/vr/vr_window_manager/hwc_callback.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_HWC_CALLBACK_H_
-#define VR_WINDOW_MANAGER_HWC_CALLBACK_H_
-
-#include <android/dvr/BnVrComposerCallback.h>
-#include <android-base/unique_fd.h>
-#include <impl/vr_hwc.h>
-
-#include <deque>
-#include <functional>
-#include <mutex>
-#include <vector>
-
-namespace android {
-
-class Fence;
-class GraphicBuffer;
-
-namespace dvr {
-
-using Recti = ComposerView::ComposerLayer::Recti;
-using Rectf = ComposerView::ComposerLayer::Rectf;
-
-class HwcCallback : public BnVrComposerCallback {
- public:
- struct HwcLayer {
- enum LayerType : uint32_t {
- // These are from frameworks/base/core/java/android/view/WindowManager.java
- kSurfaceFlingerLayer = 0,
- kUndefinedWindow = ~0U,
- kFirstApplicationWindow = 1,
- kLastApplicationWindow = 99,
- kFirstSubWindow = 1000,
- kLastSubWindow = 1999,
- kFirstSystemWindow = 2000,
- kStatusBar = kFirstSystemWindow,
- kInputMethod = kFirstSystemWindow + 11,
- kNavigationBar = kFirstSystemWindow + 19,
- kLastSystemWindow = 2999,
- };
-
- bool should_skip_layer() const {
- switch (type) {
- // Always skip the following layer types
- case kNavigationBar:
- case kStatusBar:
- case kSurfaceFlingerLayer:
- case kUndefinedWindow:
- return true;
- default:
- return false;
- }
- }
-
- // This is a layer that provides some other functionality, eg dim layer.
- // We use this to determine the point at which layers are "on top".
- bool is_extra_layer() const {
- switch(type) {
- case kSurfaceFlingerLayer:
- case kUndefinedWindow:
- return true;
- default:
- return false;
- }
- }
-
- void PrintLayer();
-
- sp<Fence> fence;
- sp<GraphicBuffer> buffer;
- Rectf crop;
- Recti display_frame;
- int32_t blending;
- uint32_t appid;
- LayerType type;
- float alpha;
- int32_t cursor_x;
- int32_t cursor_y;
- IComposerClient::Color color;
- int32_t dataspace;
- int32_t transform;
- };
-
- enum class FrameStatus {
- kUnfinished,
- kFinished,
- kError
- };
-
- class Frame {
- public:
- Frame(std::vector<HwcLayer>&& layers, uint32_t display_id, bool removed,
- int32_t display_width, int32_t display_height);
-
- FrameStatus Finish();
- const std::vector<HwcLayer>& layers() const { return layers_; }
- uint32_t display_id() const { return display_id_; }
- bool removed() const { return removed_; }
- int32_t display_width() const { return display_width_; }
- int32_t display_height() const { return display_height_; }
-
- private:
- uint32_t display_id_;
- bool removed_;
- int32_t display_width_;
- int32_t display_height_;
- std::vector<HwcLayer> layers_;
- FrameStatus status_ = FrameStatus::kUnfinished;
- };
-
- class Client {
- public:
- virtual ~Client() {}
- virtual base::unique_fd OnFrame(std::unique_ptr<Frame>) = 0;
- };
-
- explicit HwcCallback(Client* client);
- ~HwcCallback() override;
-
- private:
- binder::Status onNewFrame(const ParcelableComposerFrame& frame,
- ParcelableUniqueFd* fence) override;
-
- Client *client_;
-
- HwcCallback(const HwcCallback&) = delete;
- void operator=(const HwcCallback&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_HWC_CALLBACK_H_
diff --git a/services/vr/vr_window_manager/proguard.flags b/services/vr/vr_window_manager/proguard.flags
deleted file mode 100644
index 7683d6e..0000000
--- a/services/vr/vr_window_manager/proguard.flags
+++ /dev/null
@@ -1,22 +0,0 @@
-# Don't obfuscate any NDK/SDK code. This makes the debugging of stack traces in
-# in release builds easier.
--keepnames class com.google.vr.ndk.** { *; }
--keepnames class com.google.vr.sdk.** { *; }
-
-# These are part of the SDK <-> VrCore interfaces for GVR.
--keepnames class com.google.vr.vrcore.library.api.** { *; }
-
-# These are part of the Java <-> native interfaces for GVR.
--keep class com.google.vr.** { native <methods>; }
-
--keep class com.google.vr.cardboard.annotations.UsedByNative
--keep @com.google.vr.cardboard.annotations.UsedByNative class *
--keepclassmembers class * {
- @com.google.vr.cardboard.annotations.UsedByNative *;
-}
-
--keep class com.google.vr.cardboard.UsedByNative
--keep @com.google.vr.cardboard.UsedByNative class *
--keepclassmembers class * {
- @com.google.vr.cardboard.UsedByNative *;
-}
diff --git a/services/vr/vr_window_manager/reticle.cpp b/services/vr/vr_window_manager/reticle.cpp
deleted file mode 100644
index cbd0caf..0000000
--- a/services/vr/vr_window_manager/reticle.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-#include "reticle.h"
-
-#include <GLES/gl.h>
-#include <GLES/glext.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-const std::string kVertexShader = SHADER0([]() {
- layout(location = 0) in vec4 aPosition;
- layout(location = 1) in vec4 aTexCoord;
- uniform mat4 uViewProjection;
- uniform mat4 uTransform;
-
- out vec2 vTexCoord;
- void main() {
- gl_Position = uViewProjection * uTransform * aPosition;
- vTexCoord = aTexCoord.xy;
- }
-});
-
-const std::string kFragmentShader = SHADER0([]() {
- precision mediump float;
-
- in vec2 vTexCoord;
- uniform vec3 uColor;
-
- out vec4 fragColor;
- void main() {
- float alpha = smoothstep(1.0, 0.0, length(vTexCoord));
- fragColor = vec4(uColor, alpha);
- }
-});
-
-} // namespace
-
-Reticle::Reticle() {}
-
-Reticle::~Reticle() {}
-
-bool Reticle::Initialize() {
- program_.Link(kVertexShader, kFragmentShader);
- if (!program_)
- return false;
-
- return true;
-}
-
-void Reticle::ShowAt(const mat4& hit_transform, const vec3& color) {
- transform_ = hit_transform;
- shown_ = true;
-
- GLint view_projection_location =
- glGetUniformLocation(program_.GetProgram(), "uColor");
- glProgramUniform3f(program_.GetProgram(), view_projection_location, color.x(),
- color.y(), color.z());
-}
-
-void Reticle::Draw(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix) {
- if (!shown_)
- return;
-
- glEnable(GL_BLEND);
- glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
-
- program_.Use();
-
- const float kRadius = 0.015;
- GLfloat vertices[] = {
- -kRadius, -kRadius, 0, kRadius, -kRadius, 0,
- -kRadius, kRadius, 0, kRadius, kRadius, 0,
- };
- GLfloat texture_vertices[] = {
- -1, 1, 1, 1, -1, -1, 1, -1,
- };
-
- mat4 mvp = perspective * eye_matrix * head_matrix;
- GLint view_projection_location =
- glGetUniformLocation(program_.GetProgram(), "uViewProjection");
- glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
-
- GLint transform_location =
- glGetUniformLocation(program_.GetProgram(), "uTransform");
- glUniformMatrix4fv(transform_location, 1, 0, transform_.data());
-
- glEnableVertexAttribArray(0);
- glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertices);
- glEnableVertexAttribArray(1);
- glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texture_vertices);
-
- glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
-
- glDisable(GL_BLEND);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/reticle.h b/services/vr/vr_window_manager/reticle.h
deleted file mode 100644
index d8522aa..0000000
--- a/services/vr/vr_window_manager/reticle.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_SHELL_RETICLE_H_
-#define VR_WINDOW_MANAGER_SHELL_RETICLE_H_
-
-#include <private/dvr/graphics/shader_program.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-class Reticle {
- public:
- Reticle();
- ~Reticle();
-
- bool Initialize();
-
- void ShowAt(const mat4& hit_transform, const vec3& color);
- void Hide() { shown_ = false; }
-
- void Draw(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix);
-
- private:
- bool shown_ = false;
- ShaderProgram program_;
- mat4 transform_;
-
- Reticle(const Reticle&) = delete;
- void operator=(const Reticle&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_SHELL_RETICLE_H_
diff --git a/services/vr/vr_window_manager/shell_view.cpp b/services/vr/vr_window_manager/shell_view.cpp
deleted file mode 100644
index abd0651..0000000
--- a/services/vr/vr_window_manager/shell_view.cpp
+++ /dev/null
@@ -1,535 +0,0 @@
-#include "shell_view.h"
-
-#include <EGL/eglext.h>
-#include <GLES3/gl3.h>
-#include <android/input.h>
-#include <binder/IServiceManager.h>
-#include <dvr/graphics.h>
-#include <hardware/hwcomposer2.h>
-#include <inttypes.h>
-#include <log/log.h>
-
-#include "controller_mesh.h"
-#include "texture.h"
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-constexpr uint32_t kPrimaryDisplayId = 1;
-
-const std::string kVertexShader = SHADER0([]() {
- layout(location = 0) in vec4 aPosition;
- layout(location = 1) in vec4 aTexCoord;
- uniform mat4 uViewProjection;
- uniform mat4 uTransform;
-
- out vec2 vTexCoord;
- void main() {
- gl_Position = uViewProjection * uTransform * aPosition;
- vTexCoord = aTexCoord.xy;
- }
-});
-
-const std::string kFragmentShader = SHADER0([]() {
- precision mediump float;
-
- in vec2 vTexCoord;
- uniform sampler2D tex;
- uniform float uAlpha;
-
- out vec4 fragColor;
- void main() {
- fragColor = texture(tex, vTexCoord);
- fragColor.a *= uAlpha;
- }
-});
-
-// This shader provides a dim layer in a given rect. This is intended
-// to indicate the non-interactive region.
-// Texture coordinates between [uCoords.xy, uCoords.zw] are dim, otherwise
-// transparent.
-const std::string kOverlayFragmentShader = SHADER0([]() {
- precision highp float;
-
- in vec2 vTexCoord;
- uniform sampler2D tex;
- uniform vec4 uCoords;
-
- out vec4 fragColor;
- void main() {
- vec4 color = vec4(0, 0, 0, 0);
- if (all(greaterThan(vTexCoord, uCoords.xy)) &&
- all(lessThan(vTexCoord, uCoords.zw))) {
- color = vec4(0, 0, 0, 0.5);
- }
- fragColor = color;
- }
-});
-
-const std::string kControllerFragmentShader = SHADER0([]() {
- precision mediump float;
-
- in vec2 vTexCoord;
-
- out vec4 fragColor;
- void main() { fragColor = vec4(0.8, 0.2, 0.2, 1.0); }
-});
-
-mat4 GetHorizontallyAlignedMatrixFromPose(const Posef& pose) {
- vec3 position = pose.GetPosition();
- quat view_quaternion = pose.GetRotation();
-
- vec3 z = vec3(view_quaternion * vec3(0.0f, 0.0f, 1.0f));
- vec3 y(0.0f, 1.0f, 0.0f);
- vec3 x = y.cross(z);
- x.normalize();
- y = z.cross(x);
-
- mat4 m;
- // clang-format off
- m(0, 0) = x[0]; m(0, 1) = y[0]; m(0, 2) = z[0]; m(0, 3) = position[0];
- m(1, 0) = x[1]; m(1, 1) = y[1]; m(1, 2) = z[1]; m(1, 3) = position[1];
- m(2, 0) = x[2]; m(2, 1) = y[2]; m(2, 2) = z[2]; m(2, 3) = position[2];
- m(3, 0) = 0.0f; m(3, 1) = 0.0f; m(3, 2) = 0.0f; m(3, 3) = 1.0f;
- // clang-format on
-
- return m;
-}
-
-int GetTouchIdForDisplay(uint32_t display) {
- return display == kPrimaryDisplayId ? DVR_VIRTUAL_TOUCHPAD_PRIMARY
- : DVR_VIRTUAL_TOUCHPAD_VIRTUAL;
-}
-
-} // namespace
-
-ShellView::ShellView() {}
-
-ShellView::~ShellView() {}
-
-int ShellView::Initialize() {
- int ret = Application::Initialize();
- if (ret)
- return ret;
-
- virtual_touchpad_.reset(dvrVirtualTouchpadCreate());
- const status_t touchpad_status =
- dvrVirtualTouchpadAttach(virtual_touchpad_.get());
- if (touchpad_status != OK) {
- ALOGE("Failed to connect to virtual touchpad");
- return touchpad_status;
- }
-
- surface_flinger_view_.reset(new SurfaceFlingerView);
- if (!surface_flinger_view_->Initialize(this))
- return 1;
-
- return 0;
-}
-
-int ShellView::AllocateResources() {
- int ret = Application::AllocateResources();
- if (ret)
- return ret;
-
- program_.reset(new ShaderProgram);
- program_->Link(kVertexShader, kFragmentShader);
- overlay_program_.reset(new ShaderProgram);
- overlay_program_->Link(kVertexShader, kOverlayFragmentShader);
- controller_program_.reset(new ShaderProgram);
- controller_program_->Link(kVertexShader, kControllerFragmentShader);
- if (!program_ || !overlay_program_ || !controller_program_)
- return 1;
-
- reticle_.reset(new Reticle());
- if (!reticle_->Initialize())
- return 1;
-
- controller_mesh_.reset(new Mesh<vec3, vec3, vec2>());
- controller_mesh_->SetVertices(kNumControllerMeshVertices,
- kControllerMeshVertices);
-
- for (auto& display : displays_)
- display->SetPrograms(program_.get(), overlay_program_.get());
-
- initialized_ = true;
-
- return 0;
-}
-
-void ShellView::DeallocateResources() {
- {
- std::unique_lock<std::mutex> l(display_frame_mutex_);
- removed_displays_.clear();
- new_displays_.clear();
- displays_.clear();
- }
-
- display_client_.reset();
- reticle_.reset();
- controller_mesh_.reset();
- program_.reset(new ShaderProgram);
- overlay_program_.reset(new ShaderProgram);
- controller_program_.reset(new ShaderProgram);
- Application::DeallocateResources();
-}
-
-void ShellView::EnableDebug(bool debug) {
- QueueTask(debug ? MainThreadTask::EnableDebugMode
- : MainThreadTask::DisableDebugMode);
-}
-
-void ShellView::VrMode(bool mode) {
- QueueTask(mode ? MainThreadTask::EnteringVrMode
- : MainThreadTask::ExitingVrMode);
-}
-
-void ShellView::dumpInternal(String8& result) {
- result.append("[shell]\n");
- result.appendFormat("initialized = %s\n", initialized_ ? "true" : "false");
- result.appendFormat("is_visible = %s\n", is_visible_ ? "true" : "false");
- result.appendFormat("debug_mode = %s\n\n", debug_mode_ ? "true" : "false");
-
- result.append("[displays]\n");
- result.appendFormat("count = %zu\n", displays_.size());
- for (size_t i = 0; i < displays_.size(); ++i) {
- result.appendFormat(" display_id = %" PRId32 "\n", displays_[i]->id());
- result.appendFormat(" size=%fx%f\n", displays_[i]->size().x(),
- displays_[i]->size().y());
- }
-
- result.append("\n");
-}
-
-void ShellView::Set2DMode(bool mode) {
- if (!displays_.empty())
- displays_[0]->set_2dmode(mode);
-}
-
-void ShellView::SetRotation(int angle) {
- mat4 m(Eigen::AngleAxisf(M_PI * -0.5f * angle, vec3::UnitZ()));
- for (auto& d : displays_)
- d->set_rotation(m);
-}
-
-void ShellView::OnDrawFrame() {
- bool visible = false;
-
- {
- std::unique_lock<std::mutex> l(display_frame_mutex_);
-
- // Move any new displays into the list.
- if (!new_displays_.empty()) {
- for (auto& display : new_displays_) {
- display->Recenter(GetHorizontallyAlignedMatrixFromPose(last_pose_));
- display->SetPrograms(program_.get(), overlay_program_.get());
- displays_.emplace_back(display.release());
- }
- new_displays_.clear();
- }
-
- // Remove any old displays from the list now.
- if (!removed_displays_.empty()) {
- for (auto& display : removed_displays_) {
- displays_.erase(std::find_if(
- displays_.begin(), displays_.end(),
- [display](auto& ptr) { return display == ptr.get(); }));
- }
- removed_displays_.clear();
- }
-
- for (auto& display : displays_) {
- display->AdvanceFrame();
- visible = visible || display->visible();
- }
- }
-
- if (!debug_mode_ && visible != is_visible_) {
- SetVisibility(visible);
- }
-
- for (auto& display : displays_) {
- display->OnDrawFrame(surface_flinger_view_.get(), debug_mode_);
- }
-}
-
-void ShellView::OnEndFrame() {
- std::unique_lock<std::mutex> l(display_frame_mutex_);
- for (auto& display : displays_) {
- display->UpdateReleaseFence();
- }
-}
-
-DisplayView* ShellView::FindOrCreateDisplay(uint32_t id) {
- for (auto& display : displays_) {
- if (display->id() == id) {
- return display.get();
- }
- }
-
- // It might be pending addition.
- for (auto& display : new_displays_) {
- if (display->id() == id) {
- return display.get();
- }
- }
-
- auto display = new DisplayView(id, GetTouchIdForDisplay(id));
- // Virtual displays only ever have 2D apps so force it.
- if (id != kPrimaryDisplayId)
- display->set_always_2d(true);
- new_displays_.emplace_back(display);
- return display;
-}
-
-base::unique_fd ShellView::OnFrame(std::unique_ptr<HwcCallback::Frame> frame) {
- std::unique_lock<std::mutex> l(display_frame_mutex_);
- DisplayView* display = FindOrCreateDisplay(frame->display_id());
-
- if (frame->removed()) {
- removed_displays_.push_back(display);
- return base::unique_fd();
- }
-
- bool showing = false;
-
- // This is a temporary fix for now. These APIs will be changed when everything
- // is moved into vrcore.
- // Do this on demand in case vr_flinger crashed and we are reconnecting.
- if (!display_client_.get()) {
- int error = 0;
- display_client_ = display::DisplayClient::Create(&error);
-
- if (error) {
- ALOGE("Could not connect to display service : %s(%d)", strerror(error),
- error);
- return base::unique_fd();
- }
- }
-
- // TODO(achaulk): change when moved into vrcore.
- auto status = display_client_->IsVrAppRunning();
- if (!status) {
- ALOGE("Failed to check VR running status: %s",
- status.GetErrorMessage().c_str());
- return base::unique_fd();
- }
- const bool vr_running = status.get();
-
- base::unique_fd fd(
- display->OnFrame(std::move(frame), debug_mode_, vr_running, &showing));
-
- if (showing)
- QueueTask(MainThreadTask::Show);
-
- return fd;
-}
-
-void ShellView::DrawEye(EyeType eye, const mat4& perspective,
- const mat4& eye_matrix, const mat4& head_matrix) {
- if (should_recenter_ && !displays_.empty()) {
- // Position the quad horizontally aligned in the direction the user
- // is facing, effectively taking out head roll.
- displays_[0]->Recenter(GetHorizontallyAlignedMatrixFromPose(last_pose_));
- }
-
- for (auto& display : displays_) {
- if (display->visible()) {
- display->DrawEye(eye, perspective, eye_matrix, head_matrix, fade_value_);
- }
- }
-
- // TODO(alexst): Replicate controller rendering from VR Home.
- // Current approach in the function below is a quick visualization.
- DrawController(perspective, eye_matrix, head_matrix);
-
- DrawReticle(perspective, eye_matrix, head_matrix);
-}
-
-void ShellView::OnVisibilityChanged(bool visible) {
- should_recenter_ = visible;
- Application::OnVisibilityChanged(visible);
-}
-
-bool ShellView::OnClick(bool down) {
- if (down) {
- if (!is_touching_ && active_display_ && active_display_->allow_input()) {
- is_touching_ = true;
- }
- } else {
- is_touching_ = false;
- }
- Touch();
- return true;
-}
-
-void ShellView::DrawReticle(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix) {
- reticle_->Hide();
-
- vec3 pointer_location = last_pose_.GetPosition();
- quat view_quaternion = last_pose_.GetRotation();
-
- if (shmem_controller_active_) {
- view_quaternion = controller_orientation_;
- vec4 controller_location = controller_translate_ * vec4(0, 0, 0, 1);
- pointer_location = vec3(controller_location.x(), controller_location.y(),
- controller_location.z());
-
- if (shmem_controller_active_) {
- uint64_t buttons = shmem_controller_buttons_;
- shmem_controller_buttons_ = 0;
- while (buttons) {
- switch (buttons & 0xF) {
- case 0x1:
- OnClick(false);
- break;
- case 0x3:
- OnTouchpadButton(false, AMOTION_EVENT_BUTTON_BACK);
- break;
- case 0x4:
- should_recenter_ = true;
- break;
- case 0x9:
- OnClick(true);
- break;
- case 0xB:
- OnTouchpadButton(true, AMOTION_EVENT_BUTTON_BACK);
- break;
- default:
- break;
- }
- buttons >>= 4;
- }
- }
- }
-
- vec3 hit_location;
- active_display_ =
- FindActiveDisplay(pointer_location, view_quaternion, &hit_location);
-
- if (active_display_) {
- reticle_->ShowAt(
- Eigen::Translation3f(hit_location) * view_quaternion.matrix(),
- active_display_->allow_input() ? vec3(1, 0, 0) : vec3(0, 0, 0));
- Touch();
- }
-
- reticle_->Draw(perspective, eye_matrix, head_matrix);
-}
-
-DisplayView* ShellView::FindActiveDisplay(const vec3& position,
- const quat& quaternion,
- vec3* hit_location) {
- vec3 direction = vec3(quaternion * vec3(0, 0, -1));
- vec3 temp_hit;
-
- DisplayView* best_display = nullptr;
- vec3 best_hit;
-
- auto is_better = [&best_hit, &position](DisplayView*, const vec3& hit) {
- return (hit - position).squaredNorm() < (best_hit - position).squaredNorm();
- };
-
- for (auto& display : displays_) {
- if (display->UpdateHitInfo(position, direction, &temp_hit)) {
- if (!best_display || is_better(display.get(), temp_hit)) {
- best_display = display.get();
- best_hit = temp_hit;
- }
- }
- }
-
- if (best_display)
- *hit_location = best_hit;
- return best_display;
-}
-
-void ShellView::DrawController(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix) {
- if (!shmem_controller_active_)
- return;
-
- controller_program_->Use();
- mat4 mvp = perspective * eye_matrix * head_matrix;
-
- GLint view_projection_location = glGetUniformLocation(
- controller_program_->GetProgram(), "uViewProjection");
- glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
-
- quat view_quaternion = controller_orientation_;
- view_quaternion.toRotationMatrix();
-
- vec3 world_pos = last_pose_.GetPosition() + controller_position_;
-
- controller_translate_ =
- Eigen::Translation3f(world_pos.x(), world_pos.y(), world_pos.z());
-
- mat4 transform = controller_translate_ * view_quaternion *
- mat4(Eigen::Scaling<float>(1, 1, 3.0));
- GLint transform_location =
- glGetUniformLocation(controller_program_->GetProgram(), "uTransform");
- glUniformMatrix4fv(transform_location, 1, 0, transform.data());
-
- controller_mesh_->Draw();
-}
-
-void ShellView::Touch() {
- if (!virtual_touchpad_) {
- ALOGE("missing virtual touchpad");
- return;
- }
-
- if (!active_display_)
- return;
-
- const vec2& hit_location = active_display_->hit_location();
- const vec2 size = active_display_->size();
-
- float x = hit_location.x() / size.x();
- float y = hit_location.y() / size.y();
-
- // Device is portrait, but in landscape when in VR.
- // Rotate touch input appropriately.
- const android::status_t status = dvrVirtualTouchpadTouch(
- virtual_touchpad_.get(), active_display_->touchpad_id(), x, y,
- is_touching_ ? 1.0f : 0.0f);
- if (status != OK) {
- ALOGE("touch failed: %d", status);
- }
-}
-
-bool ShellView::OnTouchpadButton(bool down, int button) {
- int buttons = touchpad_buttons_;
- if (down) {
- if (active_display_ && active_display_->allow_input()) {
- buttons |= button;
- }
- } else {
- buttons &= ~button;
- }
- if (buttons == touchpad_buttons_) {
- return true;
- }
- touchpad_buttons_ = buttons;
- if (!virtual_touchpad_) {
- ALOGE("missing virtual touchpad");
- return false;
- }
-
- if (!active_display_)
- return true;
-
- const android::status_t status = dvrVirtualTouchpadButtonState(
- virtual_touchpad_.get(), active_display_->touchpad_id(),
- touchpad_buttons_);
- if (status != OK) {
- ALOGE("touchpad button failed: %d %d", touchpad_buttons_, status);
- }
- return true;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/shell_view.h b/services/vr/vr_window_manager/shell_view.h
deleted file mode 100644
index 9b51600..0000000
--- a/services/vr/vr_window_manager/shell_view.h
+++ /dev/null
@@ -1,103 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_SHELL_VIEW_H_
-#define VR_WINDOW_MANAGER_SHELL_VIEW_H_
-
-#include <dvr/virtual_touchpad_client.h>
-#include <private/dvr/display_client.h>
-#include <private/dvr/graphics/mesh.h>
-#include <private/dvr/graphics/shader_program.h>
-
-#include <deque>
-
-#include "application.h"
-#include "display_view.h"
-#include "reticle.h"
-#include "shell_view_binder_interface.h"
-#include "surface_flinger_view.h"
-
-namespace android {
-namespace dvr {
-
-class ShellView : public Application,
- public android::dvr::ShellViewBinderInterface,
- public HwcCallback::Client {
- public:
- ShellView();
- virtual ~ShellView();
-
- int Initialize() override;
-
- int AllocateResources() override;
- void DeallocateResources() override;
-
- // ShellViewBinderInterface:
- void EnableDebug(bool debug) override;
- void VrMode(bool mode) override;
- void dumpInternal(String8& result) override;
- void Set2DMode(bool mode) override;
- void SetRotation(int angle) override;
-
-
- protected:
- void DrawEye(EyeType eye, const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix) override;
- void OnDrawFrame() override;
- void OnEndFrame() override;
- void OnVisibilityChanged(bool visible) override;
-
- void DrawReticle(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix);
- void DrawController(const mat4& perspective, const mat4& eye_matrix,
- const mat4& head_matrix);
-
- void Touch();
- bool OnTouchpadButton(bool down, int button);
-
- bool OnClick(bool down);
-
- DisplayView* FindActiveDisplay(const vec3& position, const quat& quaternion,
- vec3* hit_location);
-
- // HwcCallback::Client:
- base::unique_fd OnFrame(std::unique_ptr<HwcCallback::Frame> frame) override;
- DisplayView* FindOrCreateDisplay(uint32_t id);
-
- std::unique_ptr<ShaderProgram> program_;
- std::unique_ptr<ShaderProgram> overlay_program_;
- std::unique_ptr<ShaderProgram> controller_program_;
-
- std::unique_ptr<SurfaceFlingerView> surface_flinger_view_;
- std::unique_ptr<Reticle> reticle_;
-
- std::unique_ptr<display::DisplayClient> display_client_;
-
- struct DvrVirtualTouchpadDeleter {
- void operator()(DvrVirtualTouchpad* p) {
- dvrVirtualTouchpadDetach(p);
- dvrVirtualTouchpadDestroy(p);
- }
- };
- std::unique_ptr<DvrVirtualTouchpad, DvrVirtualTouchpadDeleter>
- virtual_touchpad_;
-
- std::unique_ptr<Mesh<vec3, vec3, vec2>> controller_mesh_;
-
- bool is_touching_ = false;
- int touchpad_buttons_ = 0;
-
- std::mutex display_frame_mutex_;
-
- std::vector<std::unique_ptr<DisplayView>> displays_;
- std::vector<std::unique_ptr<DisplayView>> new_displays_;
- std::vector<DisplayView*> removed_displays_;
- DisplayView* active_display_ = nullptr;
-
- mat4 controller_translate_;
-
- ShellView(const ShellView&) = delete;
- void operator=(const ShellView&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_SHELL_VIEW_H_
diff --git a/services/vr/vr_window_manager/shell_view_binder_interface.h b/services/vr/vr_window_manager/shell_view_binder_interface.h
deleted file mode 100644
index c66e4a1..0000000
--- a/services/vr/vr_window_manager/shell_view_binder_interface.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_SHELL_VIEWBINDER_INTERFACE_H_
-#define VR_WINDOW_MANAGER_SHELL_VIEWBINDER_INTERFACE_H_
-
-namespace android {
-namespace dvr {
-
-class ShellViewBinderInterface {
- public:
- ShellViewBinderInterface() {};
- virtual ~ShellViewBinderInterface() {};
-
- virtual void EnableDebug(bool debug) = 0;
- virtual void VrMode(bool mode) = 0;
- virtual void dumpInternal(String8& result) = 0;
- virtual void Set2DMode(bool mode) = 0;
- virtual void SetRotation(int angle) = 0;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_SHELL_VIEWBINDER_INTERFACE_H_
diff --git a/services/vr/vr_window_manager/surface_flinger_view.cpp b/services/vr/vr_window_manager/surface_flinger_view.cpp
deleted file mode 100644
index b41de03..0000000
--- a/services/vr/vr_window_manager/surface_flinger_view.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-#include "surface_flinger_view.h"
-
-#include <android/dvr/IVrComposer.h>
-#include <binder/IServiceManager.h>
-#include <private/dvr/native_buffer.h>
-
-#include "hwc_callback.h"
-#include "texture.h"
-
-namespace android {
-namespace dvr {
-
-SurfaceFlingerView::SurfaceFlingerView() {}
-
-SurfaceFlingerView::~SurfaceFlingerView() {}
-
-bool SurfaceFlingerView::Initialize(HwcCallback::Client *client) {
- sp<IServiceManager> sm(defaultServiceManager());
- vr_composer_ = interface_cast<IVrComposer>(
- sm->getService(IVrComposer::SERVICE_NAME()));
-
- String8 service_name(IVrComposer::SERVICE_NAME().string());
- if (!vr_composer_.get()) {
- ALOGE("Faild to connect to %s", service_name.c_str());
- return false;
- }
-
- composer_callback_ = new HwcCallback(client);
- binder::Status status = vr_composer_->registerObserver(composer_callback_);
- if (!status.isOk()) {
- ALOGE("Failed to register observer with %s", service_name.c_str());
- return false;
- }
-
- return true;
-}
-
-bool SurfaceFlingerView::GetTextures(const HwcCallback::Frame& frame,
- std::vector<TextureLayer>* texture_layers,
- TextureLayer* ime_layer,
- bool debug, bool skip_first_layer) const {
- auto& layers = frame.layers();
- texture_layers->clear();
-
- size_t start = 0;
- // Skip the second layer if it is from the VR app.
- if (!debug && skip_first_layer) {
- start = 2;
- }
-
- for (size_t i = start; i < layers.size(); ++i) {
- if (!debug && layers[i].should_skip_layer())
- continue;
-
- std::unique_ptr<Texture> texture(new Texture());
- if (!texture->Initialize(layers[i].buffer->getNativeBuffer())) {
- ALOGE("Failed to create texture");
- texture_layers->clear();
- return false;
- }
-
- TextureLayer texture_layer = {
- std::move(texture), layers[i].crop, layers[i].display_frame,
- layers[i].blending, layers[i].alpha,
- };
- if (debug && layers[i].type == HwcCallback::HwcLayer::kInputMethod) {
- *ime_layer = std::move(texture_layer);
- } else {
- texture_layers->emplace_back(std::move(texture_layer));
- }
- }
-
- return true;
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/vr_window_manager/surface_flinger_view.h b/services/vr/vr_window_manager/surface_flinger_view.h
deleted file mode 100644
index 1bea38d..0000000
--- a/services/vr/vr_window_manager/surface_flinger_view.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
-#define APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
-
-#include <memory>
-
-#include "hwc_callback.h"
-
-namespace android {
-namespace dvr {
-
-class IDisplay;
-class IVrComposer;
-class Texture;
-
-struct TextureLayer {
- std::unique_ptr<Texture> texture;
- Rectf crop;
- Recti display_frame;
- int32_t blending;
- float alpha;
-};
-
-class SurfaceFlingerView {
- public:
- SurfaceFlingerView();
- ~SurfaceFlingerView();
-
- bool Initialize(HwcCallback::Client *client);
-
- bool GetTextures(const HwcCallback::Frame& layers,
- std::vector<TextureLayer>* texture_layers,
- TextureLayer* ime_layer, bool debug,
- bool skip_first_layer) const;
-
- private:
- sp<IVrComposer> vr_composer_;
- sp<HwcCallback> composer_callback_;
-
- SurfaceFlingerView(const SurfaceFlingerView&) = delete;
- void operator=(const SurfaceFlingerView&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
diff --git a/services/vr/vr_window_manager/texture.cpp b/services/vr/vr_window_manager/texture.cpp
deleted file mode 100644
index 2229efa..0000000
--- a/services/vr/vr_window_manager/texture.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-#include "texture.h"
-
-#include <GLES/glext.h>
-#include <log/log.h>
-#include <system/window.h>
-
-namespace android {
-namespace dvr {
-
-Texture::Texture() {}
-
-Texture::~Texture() {
- EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
- if (id_)
- glDeleteTextures(1, &id_);
- if (image_)
- eglDestroyImageKHR(display, image_);
-}
-
-bool Texture::Initialize(ANativeWindowBuffer* buffer) {
- width_ = buffer->width;
- height_ = buffer->height;
-
- EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
- image_ = eglCreateImageKHR(display, EGL_NO_CONTEXT,
- EGL_NATIVE_BUFFER_ANDROID, buffer, nullptr);
- if (!image_) {
- ALOGE("Failed to create eglImage");
- return false;
- }
-
- glGenTextures(1, &id_);
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_2D, id_);
- glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image_);
-
- return true;
-}
-
-} // namespace android
-} // namespace dvr
diff --git a/services/vr/vr_window_manager/texture.h b/services/vr/vr_window_manager/texture.h
deleted file mode 100644
index 9840f19..0000000
--- a/services/vr/vr_window_manager/texture.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_TEXTURE_H_
-#define VR_WINDOW_MANAGER_TEXTURE_H_
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-#include <GLES/gl.h>
-
-struct ANativeWindowBuffer;
-
-namespace android {
-namespace dvr {
-
-class Texture {
- public:
- explicit Texture();
- ~Texture();
-
- bool Initialize(ANativeWindowBuffer* buffer);
-
- GLuint id() const { return id_; }
- int width() const { return width_; }
- int height() const { return height_; }
-
- private:
- EGLImageKHR image_ = nullptr;
- GLuint id_ = 0;
- int width_ = 0;
- int height_ = 0;
-
- Texture(const Texture&) = delete;
- void operator=(const Texture&) = delete;
-};
-
-} // namespace dvr
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_TEXTURE_H_
diff --git a/services/vr/vr_window_manager/vr_window_manager.cpp b/services/vr/vr_window_manager/vr_window_manager.cpp
deleted file mode 100644
index dd2cba7..0000000
--- a/services/vr/vr_window_manager/vr_window_manager.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <hwbinder/IPCThreadState.h>
-#include <impl/vr_hwc.h>
-
-#include "shell_view.h"
-#include "vr_window_manager_binder.h"
-
-using namespace android;
-using namespace android::dvr;
-
-int main(int /* argc */, char** /* argv */) {
- android::ProcessState::self()->startThreadPool();
-
- // ShellView needs to be created after vr_hwcomposer.
- android::dvr::ShellView app;
- const int app_status = app.Initialize();
- LOG_ALWAYS_FATAL_IF(app_status != 0, "failed to initialize: %d", app_status);
-
- // Create vr_wm_binder.
- android::sp<android::service::vr::VrWindowManagerBinder> vr_wm_binder =
- new android::service::vr::VrWindowManagerBinder(app);
- const int status = vr_wm_binder->Initialize();
- LOG_ALWAYS_FATAL_IF(status != 0, "initialization failed: %d", status);
-
- android::sp<android::IServiceManager> sm(android::defaultServiceManager());
- const android::status_t vr_wm_binder_status = sm->addService(
- android::service::vr::VrWindowManagerBinder::SERVICE_NAME(),
- vr_wm_binder, false /*allowIsolated*/);
- LOG_ALWAYS_FATAL_IF(vr_wm_binder_status != android::OK,
- "vr_wm_binder service not added: %d",
- static_cast<int>(vr_wm_binder_status));
-
- app.SetControllerDataProvider(vr_wm_binder.get());
-
- android::hardware::ProcessState::self()->startThreadPool();
-
- while (true) {
- app.DrawFrame();
- }
-
- android::hardware::IPCThreadState::self()->joinThreadPool();
- android::IPCThreadState::self()->joinThreadPool();
-
- return 0;
-}
diff --git a/services/vr/vr_window_manager/vr_window_manager_binder.cpp b/services/vr/vr_window_manager/vr_window_manager_binder.cpp
deleted file mode 100644
index fdcb8b2..0000000
--- a/services/vr/vr_window_manager/vr_window_manager_binder.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-#include "vr_window_manager_binder.h"
-
-#include <inttypes.h>
-#include <sys/mman.h>
-
-#include <binder/IPCThreadState.h>
-#include <binder/PermissionCache.h>
-#include <binder/Status.h>
-#include <cutils/log.h>
-#include <private/android_filesystem_config.h>
-#include <utils/Errors.h>
-
-namespace android {
-namespace service {
-namespace vr {
-
-namespace {
-const String16 kDumpPermission("android.permission.DUMP");
-const String16 kSendMeControllerInputPermission(
- "android.permission.RESTRICTED_VR_ACCESS");
-} // anonymous namespace
-
-constexpr size_t AshmemControllerDataProvider::kRegionLength;
-
-status_t AshmemControllerDataProvider::Connect(const int in_fd) {
- if (in_fd < 0) {
- return BAD_VALUE;
- }
- if (fd_.get() >= 0) {
- // The VrCore is dead. Long live the VrCore.
- Disconnect();
- }
- void* const shared_region =
- ::mmap(nullptr, kRegionLength, PROT_READ, MAP_SHARED, in_fd, 0);
- if (shared_region == MAP_FAILED) {
- shared_region_ = nullptr;
- return NO_MEMORY;
- }
-
- errno = 0;
- const int fd = ::fcntl(in_fd, F_DUPFD_CLOEXEC, 0);
- if (fd < 0) {
- ::munmap(shared_region, kRegionLength);
- return -errno;
- }
- fd_.reset(fd);
- ALOGI("controller connected %d -> %d @ %p", in_fd, fd, shared_region);
-
- std::lock_guard<std::mutex> guard(mutex_);
- shared_region_ = shared_region;
- return OK;
-}
-
-status_t AshmemControllerDataProvider::Disconnect() {
- if (shared_region_ == nullptr || fd_.get() < 0) {
- return INVALID_OPERATION;
- }
- std::lock_guard<std::mutex> guard(mutex_);
- ::munmap(shared_region_, kRegionLength);
- shared_region_ = nullptr;
- fd_.reset();
- ALOGI("controller disconnected");
- return OK;
-}
-
-const void* AshmemControllerDataProvider::LockControllerData() {
- mutex_.lock();
- if (!shared_region_) {
- mutex_.unlock();
- return nullptr;
- }
- return shared_region_;
-}
-
-void AshmemControllerDataProvider::UnlockControllerData() { mutex_.unlock(); }
-
-void AshmemControllerDataProvider::dumpInternal(String8& result) {
- result.appendFormat("[controller]\nfd = %d\n", fd_.get());
- if (shared_region_) {
- int32_t* p = reinterpret_cast<int32_t*>(shared_region_);
- result.appendFormat("header = ");
- for (int i = 0; i < 8; ++i) {
- result.appendFormat("%c 0x%08" PRIX32, i ? ',' : '[', p[i]);
- }
- result.appendFormat(" ]\n\n");
- }
-}
-
-int VrWindowManagerBinder::Initialize() { return 0; }
-
-binder::Status VrWindowManagerBinder::connectController(
- const ::android::base::unique_fd& in_fd) {
- // TODO(kpschoedel): check permission
-#if 0
- int32_t pid, uid;
- if (!PermissionCache::checkCallingPermission(kSendMeControllerInputPermission,
- &pid, &uid)) {
- ALOGE("permission denied to pid=%" PRId32 " uid=%" PRId32, pid, uid);
- return binder::Status::fromStatusT(PERMISSION_DENIED);
- }
-#endif
- return binder::Status::fromStatusT(Connect(in_fd.get()));
-}
-
-binder::Status VrWindowManagerBinder::disconnectController() {
- // TODO(kpschoedel): check permission
-#if 0
- int32_t pid, uid;
- if (!PermissionCache::checkCallingPermission(kSendMeControllerInputPermission,
- &pid, &uid)) {
- ALOGE("permission denied to pid=%" PRId32 " uid=%" PRId32, pid, uid);
- return binder::Status::fromStatusT(PERMISSION_DENIED);
- }
-#endif
- return binder::Status::fromStatusT(Disconnect());
-}
-
-binder::Status VrWindowManagerBinder::enterVrMode() {
- // TODO(kpschoedel): check permission
- app_.VrMode(true);
- return binder::Status::ok();
-}
-
-binder::Status VrWindowManagerBinder::exitVrMode() {
- // TODO(kpschoedel): check permission
- app_.VrMode(false);
- return binder::Status::ok();
-}
-
-binder::Status VrWindowManagerBinder::setDebugMode(int32_t mode) {
- // TODO(kpschoedel): check permission
- app_.EnableDebug(static_cast<bool>(mode));
- return binder::Status::ok();
-}
-
-binder::Status VrWindowManagerBinder::set2DMode(int32_t mode) {
- app_.Set2DMode(static_cast<bool>(mode));
- return binder::Status::ok();
-}
-
-binder::Status VrWindowManagerBinder::setRotation(int32_t angle) {
- app_.SetRotation(angle);
- return binder::Status::ok();
-}
-
-status_t VrWindowManagerBinder::dump(
- int fd, const Vector<String16>& args [[gnu::unused]]) {
- String8 result;
- const android::IPCThreadState* ipc = android::IPCThreadState::self();
- const pid_t pid = ipc->getCallingPid();
- const uid_t uid = ipc->getCallingUid();
- if ((uid != AID_SHELL) &&
- !PermissionCache::checkPermission(kDumpPermission, pid, uid)) {
- result.appendFormat("Permission denial: can't dump " LOG_TAG
- " from pid=%d, uid=%d\n",
- pid, uid);
- } else {
- app_.dumpInternal(result);
- AshmemControllerDataProvider::dumpInternal(result);
- }
- write(fd, result.string(), result.size());
- return OK;
-}
-
-} // namespace vr
-} // namespace service
-} // namespace android
diff --git a/services/vr/vr_window_manager/vr_window_manager_binder.h b/services/vr/vr_window_manager/vr_window_manager_binder.h
deleted file mode 100644
index 9d0f0b2..0000000
--- a/services/vr/vr_window_manager/vr_window_manager_binder.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef VR_WINDOW_MANAGER_VR_WINDOW_MANAGER_BINDER_H_
-#define VR_WINDOW_MANAGER_VR_WINDOW_MANAGER_BINDER_H_
-
-#include <android/service/vr/BnVrWindowManager.h>
-
-#include <mutex>
-
-#include "controller_data_provider.h"
-#include "shell_view_binder_interface.h"
-
-namespace android {
-namespace service {
-namespace vr {
-
-class AshmemControllerDataProvider : public dvr::ControllerDataProvider {
- public:
- AshmemControllerDataProvider() {}
- virtual ~AshmemControllerDataProvider() {}
-
- status_t Connect(int fd);
- status_t Disconnect();
-
- // ControllerDataProvider:
- const void* LockControllerData() override;
- void UnlockControllerData() override;
-
- protected:
- void dumpInternal(String8& result);
-
- private:
- static constexpr size_t kRegionLength = 8192; // TODO(kpschoedel)
- ::android::base::unique_fd fd_;
-
- // Mutex for guarding shared_region_.
- std::mutex mutex_;
- void* shared_region_ = nullptr;
-
- AshmemControllerDataProvider(const AshmemControllerDataProvider&) = delete;
- void operator=(const AshmemControllerDataProvider&) = delete;
-};
-
-class VrWindowManagerBinder : public BnVrWindowManager,
- public AshmemControllerDataProvider {
- public:
- VrWindowManagerBinder(android::dvr::ShellViewBinderInterface& app)
- : app_(app) {}
- virtual ~VrWindowManagerBinder() {}
-
- // Must be called before clients can connect.
- // Returns 0 if initialization is successful.
- int Initialize();
- static char const* getServiceName() { return "vr_window_manager"; }
-
- protected:
- // Implements IVrWindowManagerBinder.
- ::android::binder::Status connectController(
- const ::android::base::unique_fd& fd) override;
- ::android::binder::Status disconnectController() override;
- ::android::binder::Status enterVrMode() override;
- ::android::binder::Status exitVrMode() override;
- ::android::binder::Status setDebugMode(int32_t mode) override;
- ::android::binder::Status set2DMode(int32_t mode) override;
- ::android::binder::Status setRotation(int32_t angle) override;
-
- // Implements BBinder::dump().
- status_t dump(int fd, const Vector<String16>& args) override;
-
- private:
- android::dvr::ShellViewBinderInterface& app_;
-
- VrWindowManagerBinder(const VrWindowManagerBinder&) = delete;
- void operator=(const VrWindowManagerBinder&) = delete;
-};
-
-} // namespace vr
-} // namespace service
-} // namespace android
-
-#endif // VR_WINDOW_MANAGER_VR_WINDOW_MANAGER_BINDER_H_
diff --git a/services/vr/vr_window_manager/vr_window_manager_binder_test.cpp b/services/vr/vr_window_manager/vr_window_manager_binder_test.cpp
deleted file mode 100644
index f43e803..0000000
--- a/services/vr/vr_window_manager/vr_window_manager_binder_test.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <cutils/log.h>
-
-#include "vr_window_manager_binder.h"
-
-int main() {
- ALOGI("Starting");
- android::service::vr::VrWindowManagerBinder service;
- const int status = service.Initialize();
- LOG_ALWAYS_FATAL_IF(status != 0, "initialization failed: %d", status);
-
- signal(SIGPIPE, SIG_IGN);
- android::sp<android::ProcessState> ps(android::ProcessState::self());
- ps->setThreadPoolMaxThreadCount(4);
- ps->startThreadPool();
- ps->giveThreadPoolName();
-
- android::sp<android::IServiceManager> sm(android::defaultServiceManager());
- const android::status_t service_status = sm->addService(
- android::service::vr::VrWindowManagerBinder::SERVICE_NAME(), &service,
- false /*allowIsolated*/);
- LOG_ALWAYS_FATAL_IF(service_status != android::OK, "service not added: %d",
- static_cast<int>(service_status));
-
- android::IPCThreadState::self()->joinThreadPool();
- return 0;
-}
diff --git a/services/vr/vr_window_manager/vr_wm.rc b/services/vr/vr_window_manager/vr_wm.rc
deleted file mode 100644
index e515bb7..0000000
--- a/services/vr/vr_window_manager/vr_wm.rc
+++ /dev/null
@@ -1,5 +0,0 @@
-service vr_wm /system/bin/vr_wm
- class core
- user system
- group system graphics input
- writepid /dev/cpuset/system/tasks
diff --git a/services/vr/vr_window_manager/vr_wm_ctl.cpp b/services/vr/vr_window_manager/vr_wm_ctl.cpp
deleted file mode 100644
index 758e02b..0000000
--- a/services/vr/vr_window_manager/vr_wm_ctl.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-#include <android/service/vr/BpVrWindowManager.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <inttypes.h>
-
-void usage() { fprintf(stderr, "usage: vr_wm_ctl [enter|exit|debug N]\n"); }
-
-int report(const android::binder::Status& status) {
- if (status.isOk()) {
- fprintf(stderr, "ok\n");
- return 0;
- }
- fprintf(stderr, "failed (%" PRId32 ") %s\n", status.exceptionCode(),
- status.exceptionMessage().string());
- return (int)status.exceptionCode();
-}
-
-int main(int argc, char* argv[]) {
- android::sp<android::IServiceManager> sm(android::defaultServiceManager());
- if (sm == nullptr) {
- fprintf(stderr, "service manager not found\n");
- exit(1);
- }
-
- android::sp<android::service::vr::IVrWindowManager> vrwm =
- android::interface_cast<android::service::vr::IVrWindowManager>(
- sm->getService(
- android::service::vr::IVrWindowManager::SERVICE_NAME()));
- if (vrwm == nullptr) {
- fprintf(stderr, "service not found\n");
- exit(1);
- }
-
- android::binder::Status status;
- if ((argc == 2) && (strcmp(argv[1], "enter") == 0)) {
- exit(report(vrwm->enterVrMode()));
- } else if ((argc == 2) && (strcmp(argv[1], "exit") == 0)) {
- exit(report(vrwm->exitVrMode()));
- } else if ((argc == 3) && (strcmp(argv[1], "debug") == 0)) {
- exit(report(vrwm->setDebugMode(atoi(argv[2]))));
- } else if ((argc == 3) && (strcmp(argv[1], "2d") == 0)) {
- exit(report(vrwm->set2DMode(atoi(argv[2]))));
- } else if ((argc == 3) && (strcmp(argv[1], "rotate") == 0)) {
- exit(report(vrwm->setRotation(atoi(argv[2]))));
- } else {
- usage();
- exit(2);
- }
-
- return 0;
-}
diff --git a/vulkan/libvulkan/driver.cpp b/vulkan/libvulkan/driver.cpp
index f2cd8e6..0005a90 100644
--- a/vulkan/libvulkan/driver.cpp
+++ b/vulkan/libvulkan/driver.cpp
@@ -887,6 +887,19 @@
const VkAllocationCallbacks& data_allocator =
(pAllocator) ? *pAllocator : GetDefaultAllocator();
+ if (pCreateInfo->pApplicationInfo &&
+ pCreateInfo->pApplicationInfo->apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wold-style-cast"
+ ALOGI(
+ "Requested Vulkan instance version %d.%d is greater than max "
+ "supported version (1.0)",
+ VK_VERSION_MAJOR(pCreateInfo->pApplicationInfo->apiVersion),
+ VK_VERSION_MINOR(pCreateInfo->pApplicationInfo->apiVersion));
+#pragma clang diagnostic pop
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+ }
+
CreateInfoWrapper wrapper(*pCreateInfo, data_allocator);
VkResult result = wrapper.Validate();
if (result != VK_SUCCESS)