summaryrefslogtreecommitdiff
authorjiyu.yang <jiyu.yang@amlogic.com>2014-09-29 03:23:48 (GMT)
committer jiyu.yang <jiyu.yang@amlogic.com>2014-09-29 03:23:48 (GMT)
commitbbb143f96076fd29ad76942903454b9792912868 (patch)
treea5fe4ab1f4fe8dce5ecd5972d9e7038ef102ede3
parentf9a383eca8fc213a83ce4f62ed2977cdd02e06d9 (diff)
downloadcamera-bbb143f96076fd29ad76942903454b9792912868.zip
camera-bbb143f96076fd29ad76942903454b9792912868.tar.gz
camera-bbb143f96076fd29ad76942903454b9792912868.tar.bz2
add new camerahal
Diffstat
-rwxr-xr-xAndroid.mk11
-rwxr-xr-xv3/Android.mk155
-rwxr-xr-xv3/CallbackNotifier.cpp302
-rwxr-xr-xv3/CallbackNotifier.h238
-rwxr-xr-xv3/CameraProperties.cpp203
-rwxr-xr-xv3/Converters.cpp173
-rwxr-xr-xv3/Converters.h314
-rw-r--r--v3/EmulatedBaseCamera.cpp89
-rw-r--r--v3/EmulatedBaseCamera.h118
-rwxr-xr-xv3/EmulatedCamera.cpp1041
-rwxr-xr-xv3/EmulatedCamera.h401
-rw-r--r--v3/EmulatedCamera2.cpp410
-rw-r--r--v3/EmulatedCamera2.h274
-rw-r--r--v3/EmulatedCamera3.cpp298
-rw-r--r--v3/EmulatedCamera3.h205
-rwxr-xr-xv3/EmulatedCameraCommon.h60
-rwxr-xr-xv3/EmulatedCameraDevice.cpp397
-rwxr-xr-xv3/EmulatedCameraDevice.h544
-rwxr-xr-xv3/EmulatedCameraFactory.cpp544
-rwxr-xr-xv3/EmulatedCameraFactory.h195
-rwxr-xr-xv3/EmulatedCameraHal.cpp46
-rw-r--r--v3/EmulatedCameraHotplugThread.cpp372
-rw-r--r--v3/EmulatedCameraHotplugThread.h77
-rwxr-xr-xv3/EmulatedFakeCamera.cpp90
-rwxr-xr-xv3/EmulatedFakeCamera.h74
-rw-r--r--v3/EmulatedFakeCamera2.cpp2727
-rw-r--r--v3/EmulatedFakeCamera2.h429
-rwxr-xr-xv3/EmulatedFakeCamera3.cpp2347
-rw-r--r--v3/EmulatedFakeCamera3.h298
-rwxr-xr-xv3/EmulatedFakeCameraDevice.cpp437
-rwxr-xr-xv3/EmulatedFakeCameraDevice.h197
-rwxr-xr-xv3/EmulatedQemuCamera.cpp119
-rwxr-xr-xv3/EmulatedQemuCamera.h73
-rw-r--r--v3/EmulatedQemuCamera2.cpp55
-rw-r--r--v3/EmulatedQemuCamera2.h66
-rwxr-xr-xv3/EmulatedQemuCameraDevice.cpp265
-rwxr-xr-xv3/EmulatedQemuCameraDevice.h121
-rw-r--r--v3/JpegCompressor.cpp95
-rw-r--r--v3/JpegCompressor.h95
-rw-r--r--v3/JpegStub.cpp69
-rw-r--r--v3/JpegStub.h35
-rw-r--r--v3/MCameraParameters.cpp88
-rwxr-xr-xv3/PreviewWindow.cpp216
-rwxr-xr-xv3/PreviewWindow.h165
-rwxr-xr-xv3/QemuClient.cpp559
-rwxr-xr-xv3/QemuClient.h437
-rw-r--r--v3/fake-pipeline2/Android.mk4
-rwxr-xr-xv3/fake-pipeline2/Base.h76
-rwxr-xr-xv3/fake-pipeline2/JpegCompressor.cpp693
-rwxr-xr-xv3/fake-pipeline2/JpegCompressor.h177
-rw-r--r--v3/fake-pipeline2/Scene.cpp478
-rw-r--r--v3/fake-pipeline2/Scene.h191
-rwxr-xr-xv3/fake-pipeline2/Sensor.cpp1207
-rwxr-xr-xv3/fake-pipeline2/Sensor.h332
-rwxr-xr-xv3/fake-pipeline2/camera_hw.cpp382
-rwxr-xr-xv3/fake-pipeline2/camera_hw.h67
-rwxr-xr-xv3/fake-pipeline2/tests/Android.mk39
-rwxr-xr-xv3/fake-pipeline2/tests/test_camera.cpp137
-rw-r--r--v3/fake-pipeline2/v4l2-base.c657
-rwxr-xr-xv3/inc/CameraProperties.h102
-rwxr-xr-xv3/inc/DebugUtils.h79
-rw-r--r--v3/inc/MCameraParameters.h41
-rw-r--r--v3/media_codecs.xml84
-rw-r--r--v3/media_profiles.xml414
64 files changed, 20684 insertions, 0 deletions
diff --git a/Android.mk b/Android.mk
index 928a00e..4defd9c 100755
--- a/Android.mk
+++ b/Android.mk
@@ -1,4 +1,7 @@
LOCAL_PATH:= $(call my-dir)
+#CAMHAL_V3:=true
+
+ifneq ($(CAMHAL_V3),true)
CAMHAL_GIT_VERSION="$(shell cd $(LOCAL_PATH);git log | grep commit -m 1 | cut -d' ' -f 2)"
CAMHAL_GIT_UNCOMMIT_FILE_NUM=$(shell cd $(LOCAL_PATH);git diff | grep +++ -c)
@@ -183,3 +186,11 @@ LOCAL_MODULE_TAGS:= optional
#include $(BUILD_HEAPTRACKED_SHARED_LIBRARY)
include $(BUILD_SHARED_LIBRARY)
+
+else
+
+include $(LOCAL_PATH)/v3/Android.mk
+
+endif
+
+include $(CLEAR_VARS)
diff --git a/v3/Android.mk b/v3/Android.mk
new file mode 100755
index 0000000..c11ef49
--- a/dev/null
+++ b/v3/Android.mk
@@ -0,0 +1,155 @@
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_CFLAGS += -fno-short-enums -DQEMU_HARDWARE
+LOCAL_CFLAGS += -Wno-unused-parameter -Wno-missing-field-initializers
+
+########################################################################################################
+CAMHAL_GIT_VERSION="$(shell cd $(LOCAL_PATH);git log | grep commit -m 1 | cut -d' ' -f 2)"
+CAMHAL_GIT_UNCOMMIT_FILE_NUM=$(shell cd $(LOCAL_PATH);git diff | grep +++ -c)
+CAMHAL_LAST_CHANGED="$(shell cd $(LOCAL_PATH);git log | grep Date -m 1)"
+CAMHAL_BUILD_TIME=" $(shell touch Android.mk;date)"
+CAMHAL_BUILD_NAME=" $(shell echo ${LOGNAME})"
+CAMHAL_BRANCH_NAME="$(shell cd $(LOCAL_PATH);git branch -a | sed -n '/'*'/p')"
+CAMHAL_BUILD_MODE=$(shell echo ${TARGET_BUILD_VARIANT})
+CAMHAL_HOSTNAME="$(shell hostname)"
+CAMHAL_IP="$(shell ifconfig eth0|grep -oE '([0-9]{1,3}\.?){4}'|head -n 1)"
+CAMHAL_PATH="$(shell pwd)/$(LOCAL_PATH)"
+
+LOCAL_CFLAGS+=-DHAVE_VERSION_INFO
+LOCAL_CFLAGS+=-DCAMHAL_GIT_VERSION=\"${CAMHAL_GIT_VERSION}${CAMHAL_GIT_DIRTY}\"
+LOCAL_CFLAGS+=-DCAMHAL_BRANCH_NAME=\"${CAMHAL_BRANCH_NAME}\"
+LOCAL_CFLAGS+=-DCAMHAL_LAST_CHANGED=\"${CAMHAL_LAST_CHANGED}\"
+LOCAL_CFLAGS+=-DCAMHAL_BUILD_TIME=\"${CAMHAL_BUILD_TIME}\"
+LOCAL_CFLAGS+=-DCAMHAL_BUILD_NAME=\"${CAMHAL_BUILD_NAME}\"
+LOCAL_CFLAGS+=-DCAMHAL_GIT_UNCOMMIT_FILE_NUM=${CAMHAL_GIT_UNCOMMIT_FILE_NUM}
+LOCAL_CFLAGS+=-DCAMHAL_HOSTNAME=\"${CAMHAL_HOSTNAME}\"
+LOCAL_CFLAGS+=-DCAMHAL_IP=\"${CAMHAL_IP}\"
+LOCAL_CFLAGS+=-DCAMHAL_PATH=\"${CAMHAL_PATH}\"
+########################################################################################################
+
+LOCAL_SHARED_LIBRARIES:= \
+ libbinder \
+ liblog \
+ libutils \
+ libcutils \
+ libcamera_client \
+ libion \
+ libui \
+ libdl \
+ libjpeg \
+ libjhead
+
+# JPEG conversion libraries and includes.
+LOCAL_SHARED_LIBRARIES += \
+ libjpeg \
+ libcamera_metadata
+
+LOCAL_STATIC_LIBRARIES := \
+ libyuv_static \
+
+LOCAL_C_INCLUDES += external/jpeg \
+ external/jhead/ \
+ frameworks/native/include/media/hardware \
+ external/libyuv/files/include/ \
+ $(LOCAL_PATH)/../opengl/system/OpenglSystemCommon \
+ $(TOP)/system/core/libion/include \
+ $(TOP)/system/core/libion/kernel-headers \
+ $(LOCAL_PATH)/inc \
+ $(LOCAL_PATH)/hardware/amlogic/gralloc \
+ $(call include-path-for, camera)
+
+LOCAL_SRC_FILES := \
+ EmulatedCameraHal.cpp \
+ EmulatedCameraFactory.cpp \
+ EmulatedCameraHotplugThread.cpp \
+ EmulatedBaseCamera.cpp \
+ EmulatedCamera.cpp \
+ EmulatedCameraDevice.cpp \
+ EmulatedQemuCamera.cpp \
+ EmulatedQemuCameraDevice.cpp \
+ EmulatedFakeCamera.cpp \
+ EmulatedFakeCameraDevice.cpp \
+ Converters.cpp \
+ PreviewWindow.cpp \
+ CallbackNotifier.cpp \
+ QemuClient.cpp \
+ JpegCompressor.cpp \
+ EmulatedCamera2.cpp \
+ EmulatedFakeCamera2.cpp \
+ EmulatedQemuCamera2.cpp \
+ fake-pipeline2/Scene.cpp \
+ fake-pipeline2/Sensor.cpp \
+ fake-pipeline2/JpegCompressor.cpp \
+ EmulatedCamera3.cpp \
+ EmulatedFakeCamera3.cpp \
+ fake-pipeline2/camera_hw.cpp \
+
+ifeq ($(TARGET_PRODUCT),vbox_x86)
+LOCAL_MODULE := camera.vbox_x86
+else
+LOCAL_MODULE:= camera.amlogic
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
+$(info ************************************************************)
+$(info target board: $(TARGET_BOARD_PLATFORM))
+$(info git version: $(CAMHAL_GIT_VERSION))
+$(info uncomment: $(CAMHAL_GIT_UNCOMMIT_FILE_NUM) files)
+$(info last changed: $(CAMHAL_LAST_CHANGED))
+$(info build time: $(CAMHAL_BUILD_TIME))
+$(info builder name: $(CAMHAL_BUILD_NAME))
+$(info branch name: $(CAMHAL_BRANCH_NAME))
+$(info build mode: $(CAMHAL_BUILD_MODE))
+$(info host name: $(CAMHAL_HOSTNAME))
+$(info host IP: $(CAMHAL_IP))
+$(info path: $(CAMHAL_PATH))
+$(info ************************************************************)
+$(shell sleep 3)
+
+#################################################################
+ifneq (true,true)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_CFLAGS += -fno-short-enums -DQEMU_HARDWARE
+LOCAL_CFLAGS += -Wno-unused-parameter
+LOCAL_SHARED_LIBRARIES:= \
+ libcutils \
+ liblog \
+ libskia \
+ libandroid_runtime
+
+LOCAL_C_INCLUDES += external/jpeg \
+ external/skia/include/core/ \
+ frameworks/base/core/jni/android/graphics \
+ frameworks/native/include
+
+LOCAL_SRC_FILES := JpegStub.cpp
+
+LOCAL_MODULE := camera.goldfish.jpeg
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif # !PDK
diff --git a/v3/CallbackNotifier.cpp b/v3/CallbackNotifier.cpp
new file mode 100755
index 0000000..b288f52
--- a/dev/null
+++ b/v3/CallbackNotifier.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class CallbackNotifier that manages callbacks set
+ * via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_CallbackNotifier"
+#include <cutils/log.h>
+#include <MetadataBufferType.h>
+#include "EmulatedCameraDevice.h"
+#include "CallbackNotifier.h"
+#include "JpegCompressor.h"
+
+namespace android {
+
+/* String representation of camera messages. */
+static const char* lCameraMessages[] =
+{
+ "CAMERA_MSG_ERROR",
+ "CAMERA_MSG_SHUTTER",
+ "CAMERA_MSG_FOCUS",
+ "CAMERA_MSG_ZOOM",
+ "CAMERA_MSG_PREVIEW_FRAME",
+ "CAMERA_MSG_VIDEO_FRAME",
+ "CAMERA_MSG_POSTVIEW_FRAME",
+ "CAMERA_MSG_RAW_IMAGE",
+ "CAMERA_MSG_COMPRESSED_IMAGE",
+ "CAMERA_MSG_RAW_IMAGE_NOTIFY",
+ "CAMERA_MSG_PREVIEW_METADATA"
+};
+static const int lCameraMessagesNum = sizeof(lCameraMessages) / sizeof(char*);
+
+/* Builds an array of strings for the given set of messages.
+ * Param:
+ * msg - Messages to get strings for,
+ * strings - Array where to save strings
+ * max - Maximum number of entries in the array.
+ * Return:
+ * Number of strings saved into the 'strings' array.
+ */
+static int GetMessageStrings(uint32_t msg, const char** strings, int max)
+{
+ int index = 0;
+ int out = 0;
+ while (msg != 0 && out < max && index < lCameraMessagesNum) {
+ while ((msg & 0x1) == 0 && index < lCameraMessagesNum) {
+ msg >>= 1;
+ index++;
+ }
+ if ((msg & 0x1) != 0 && index < lCameraMessagesNum) {
+ strings[out] = lCameraMessages[index];
+ out++;
+ msg >>= 1;
+ index++;
+ }
+ }
+
+ return out;
+}
+
+/* Logs messages, enabled by the mask. */
+static void PrintMessages(uint32_t msg)
+{
+ const char* strs[lCameraMessagesNum];
+ const int translated = GetMessageStrings(msg, strs, lCameraMessagesNum);
+ for (int n = 0; n < translated; n++) {
+ ALOGV(" %s", strs[n]);
+ }
+}
+
+CallbackNotifier::CallbackNotifier()
+ : mNotifyCB(NULL),
+ mDataCB(NULL),
+ mDataCBTimestamp(NULL),
+ mGetMemoryCB(NULL),
+ mCBOpaque(NULL),
+ mLastFrameTimestamp(0),
+ mFrameRefreshFreq(0),
+ mMessageEnabler(0),
+ mJpegQuality(90),
+ mVideoRecEnabled(false),
+ mTakingPicture(false)
+{
+}
+
+CallbackNotifier::~CallbackNotifier()
+{
+}
+
+/****************************************************************************
+ * Camera API
+ ***************************************************************************/
+
+void CallbackNotifier::setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ ALOGV("%s: %p, %p, %p, %p (%p)",
+ __FUNCTION__, notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mNotifyCB = notify_cb;
+ mDataCB = data_cb;
+ mDataCBTimestamp = data_cb_timestamp;
+ mGetMemoryCB = get_memory;
+ mCBOpaque = user;
+}
+
+void CallbackNotifier::enableMessage(uint msg_type)
+{
+ ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+ PrintMessages(msg_type);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler |= msg_type;
+ ALOGV("**** Currently enabled messages:");
+ PrintMessages(mMessageEnabler);
+}
+
+void CallbackNotifier::disableMessage(uint msg_type)
+{
+ ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+ PrintMessages(msg_type);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler &= ~msg_type;
+ ALOGV("**** Currently enabled messages:");
+ PrintMessages(mMessageEnabler);
+}
+
+status_t CallbackNotifier::enableVideoRecording(int fps)
+{
+ ALOGV("%s: FPS = %d", __FUNCTION__, fps);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mVideoRecEnabled = true;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 1000000000LL / fps;
+
+ return NO_ERROR;
+}
+
+void CallbackNotifier::disableVideoRecording()
+{
+ ALOGV("%s:", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mVideoRecEnabled = false;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 0;
+}
+
+void CallbackNotifier::releaseRecordingFrame(const void* opaque)
+{
+ List<camera_memory_t*>::iterator it = mCameraMemoryTs.begin();
+ for( ; it != mCameraMemoryTs.end(); ++it ) {
+ if ( (*it)->data == opaque ) {
+ (*it)->release( *it );
+ mCameraMemoryTs.erase(it);
+ break;
+ }
+ }
+}
+
+status_t CallbackNotifier::storeMetaDataInBuffers(bool enable)
+{
+ /* Return INVALID_OPERATION means HAL does not support metadata. So HAL will
+ * return actual frame data with CAMERA_MSG_VIDEO_FRRAME. Return
+ * INVALID_OPERATION to mean metadata is not supported. */
+ return INVALID_OPERATION;
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+void CallbackNotifier::cleanupCBNotifier()
+{
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler = 0;
+ mNotifyCB = NULL;
+ mDataCB = NULL;
+ mDataCBTimestamp = NULL;
+ mGetMemoryCB = NULL;
+ mCBOpaque = NULL;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 0;
+ mJpegQuality = 90;
+ mVideoRecEnabled = false;
+ mTakingPicture = false;
+}
+
+void CallbackNotifier::onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev)
+{
+ if (isMessageEnabled(CAMERA_MSG_VIDEO_FRAME) && isVideoRecordingEnabled() &&
+ isNewVideoFrameTime(timestamp)) {
+ camera_memory_t* cam_buff =
+ mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
+ if (NULL != cam_buff && NULL != cam_buff->data) {
+ memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+ mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME,
+ cam_buff, 0, mCBOpaque);
+
+ mCameraMemoryTs.push_back( cam_buff );
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ }
+
+ if (isMessageEnabled(CAMERA_MSG_PREVIEW_FRAME)) {
+ camera_memory_t* cam_buff =
+ mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
+ if (NULL != cam_buff && NULL != cam_buff->data) {
+ memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+ mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
+ cam_buff->release(cam_buff);
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_PREVIEW_FRAME", __FUNCTION__);
+ }
+ }
+
+ if (mTakingPicture) {
+ /* This happens just once. */
+ mTakingPicture = false;
+ /* The sequence of callbacks during picture taking is:
+ * - CAMERA_MSG_SHUTTER
+ * - CAMERA_MSG_RAW_IMAGE_NOTIFY
+ * - CAMERA_MSG_COMPRESSED_IMAGE
+ */
+ if (isMessageEnabled(CAMERA_MSG_SHUTTER)) {
+ mNotifyCB(CAMERA_MSG_SHUTTER, 0, 0, mCBOpaque);
+ }
+ if (isMessageEnabled(CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
+ mNotifyCB(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCBOpaque);
+ }
+ if (isMessageEnabled(CAMERA_MSG_COMPRESSED_IMAGE)) {
+ /* Compress the frame to JPEG. Note that when taking pictures, we
+ * have requested camera device to provide us with NV21 frames. */
+ NV21JpegCompressor compressor;
+ status_t res =
+ compressor.compressRawImage(frame, camera_dev->getFrameWidth(),
+ camera_dev->getFrameHeight(),
+ mJpegQuality);
+ if (res == NO_ERROR) {
+ camera_memory_t* jpeg_buff =
+ mGetMemoryCB(-1, compressor.getCompressedSize(), 1, NULL);
+ if (NULL != jpeg_buff && NULL != jpeg_buff->data) {
+ compressor.getCompressedImage(jpeg_buff->data);
+ mDataCB(CAMERA_MSG_COMPRESSED_IMAGE, jpeg_buff, 0, NULL, mCBOpaque);
+ jpeg_buff->release(jpeg_buff);
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ } else {
+ ALOGE("%s: Compression failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ }
+ }
+}
+
+void CallbackNotifier::onCameraDeviceError(int err)
+{
+ if (isMessageEnabled(CAMERA_MSG_ERROR) && mNotifyCB != NULL) {
+ mNotifyCB(CAMERA_MSG_ERROR, err, 0, mCBOpaque);
+ }
+}
+
+/****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+bool CallbackNotifier::isNewVideoFrameTime(nsecs_t timestamp)
+{
+ Mutex::Autolock locker(&mObjectLock);
+ if ((timestamp - mLastFrameTimestamp) >= mFrameRefreshFreq) {
+ mLastFrameTimestamp = timestamp;
+ return true;
+ }
+ return false;
+}
+
+}; /* namespace android */
diff --git a/v3/CallbackNotifier.h b/v3/CallbackNotifier.h
new file mode 100755
index 0000000..24784b5
--- a/dev/null
+++ b/v3/CallbackNotifier.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H
+#define HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H
+
+/*
+ * Contains declaration of a class CallbackNotifier that manages callbacks set
+ * via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
+ */
+
+#include <utils/List.h>
+
+namespace android {
+
+class EmulatedCameraDevice;
+
+/* Manages callbacks set via set_callbacks, enable_msg_type, and disable_msg_type
+ * camera HAL API.
+ *
+ * Objects of this class are contained in EmulatedCamera objects, and handle
+ * relevant camera API callbacks.
+ * Locking considerations. Apparently, it's not allowed to call callbacks
+ * registered in this class, while holding a lock: recursion is quite possible,
+ * which will cause a deadlock.
+ */
+class CallbackNotifier {
+public:
+ /* Constructs CallbackNotifier instance. */
+ CallbackNotifier();
+
+ /* Destructs CallbackNotifier instance. */
+ ~CallbackNotifier();
+
+ /****************************************************************************
+ * Camera API
+ ***************************************************************************/
+
+public:
+ /* Actual handler for camera_device_ops_t::set_callbacks callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::set_callbacks callback.
+ */
+ void setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::enable_msg_type callback.
+ */
+ void enableMessage(uint msg_type);
+
+ /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::disable_msg_type callback.
+ */
+ void disableMessage(uint msg_type);
+
+ /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers
+ * callback. This method is called by the containing emulated camera object
+ * when it is handing the camera_device_ops_t::store_meta_data_in_buffers
+ * callback.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t storeMetaDataInBuffers(bool enable);
+
+ /* Enables video recording.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::start_recording callback.
+ * Param:
+ * fps - Video frame frequency. This parameter determins when a frame
+ * received via onNextFrameAvailable call will be pushed through the
+ * callback.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t enableVideoRecording(int fps);
+
+ /* Disables video recording.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::stop_recording callback.
+ */
+ void disableVideoRecording();
+
+ /* Releases video frame, sent to the framework.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::release_recording_frame callback.
+ */
+ void releaseRecordingFrame(const void* opaque);
+
+ /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::msg_type_enabled callback.
+ * Note: this method doesn't grab a lock while checking message status, since
+ * upon exit the status would be undefined anyway. So, grab a lock before
+ * calling this method if you care about persisting a defined message status.
+ * Return:
+ * 0 if message is disabled, or non-zero value, if message is enabled.
+ */
+ inline int isMessageEnabled(uint msg_type)
+ {
+ return mMessageEnabler & msg_type;
+ }
+
+ /* Checks id video recording is enabled.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::recording_enabled callback.
+ * Note: this method doesn't grab a lock while checking video recordin status,
+ * since upon exit the status would be undefined anyway. So, grab a lock
+ * before calling this method if you care about persisting of a defined video
+ * recording status.
+ * Return:
+ * true if video recording is enabled, or false if it is disabled.
+ */
+ inline bool isVideoRecordingEnabled()
+ {
+ return mVideoRecEnabled;
+ }
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Resets the callback notifier. */
+ void cleanupCBNotifier();
+
+ /* Next frame is available in the camera device.
+ * This is a notification callback that is invoked by the camera device when
+ * a new frame is available.
+ * Note that most likely this method is called in context of a worker thread
+ * that camera device has created for frame capturing.
+ * Param:
+ * frame - Captured frame, or NULL if camera device didn't pull the frame
+ * yet. If NULL is passed in this parameter use GetCurrentFrame method
+ * of the camera device class to obtain the next frame. Also note that
+ * the size of the frame that is passed here (as well as the frame
+ * returned from the GetCurrentFrame method) is defined by the current
+ * frame settings (width + height + pixel format) for the camera device.
+ * timestamp - Frame's timestamp.
+ * camera_dev - Camera device instance that delivered the frame.
+ */
+ void onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev);
+
+ /* Entry point for notifications that occur in camera device.
+ * Param:
+ * err - CAMERA_ERROR_XXX error code.
+ */
+ void onCameraDeviceError(int err);
+
+ /* Sets, or resets taking picture state.
+ * This state control whether or not to notify the framework about compressed
+ * image, shutter, and other picture related events.
+ */
+ void setTakingPicture(bool taking)
+ {
+ mTakingPicture = taking;
+ }
+
+ /* Sets JPEG quality used to compress frame during picture taking. */
+ void setJpegQuality(int jpeg_quality)
+ {
+ mJpegQuality = jpeg_quality;
+ }
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+protected:
+ /* Checks if it's time to push new video frame.
+ * Note that this method must be called while object is locked.
+ * Param:
+ * timestamp - Timestamp for the new frame. */
+ bool isNewVideoFrameTime(nsecs_t timestamp);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for data change. */
+ Mutex mObjectLock;
+
+ /*
+ * Callbacks, registered in set_callbacks.
+ */
+
+ camera_notify_callback mNotifyCB;
+ camera_data_callback mDataCB;
+ camera_data_timestamp_callback mDataCBTimestamp;
+ camera_request_memory mGetMemoryCB;
+ void* mCBOpaque;
+
+ /* video frame queue for the CameraHeapMemory destruction */
+ List<camera_memory_t*> mCameraMemoryTs;
+
+ /* Timestamp when last frame has been delivered to the framework. */
+ nsecs_t mLastFrameTimestamp;
+
+ /* Video frequency in nanosec. */
+ nsecs_t mFrameRefreshFreq;
+
+ /* Message enabler. */
+ uint32_t mMessageEnabler;
+
+ /* JPEG quality used to compress frame during picture taking. */
+ int mJpegQuality;
+
+ /* Video recording status. */
+ bool mVideoRecEnabled;
+
+ /* Picture taking status. */
+ bool mTakingPicture;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H */
diff --git a/v3/CameraProperties.cpp b/v3/CameraProperties.cpp
new file mode 100755
index 0000000..6dc5f51
--- a/dev/null
+++ b/v3/CameraProperties.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CameraProperties "
+#include <utils/threads.h>
+
+#include "DebugUtils.h"
+#include "CameraProperties.h"
+
+#define CAMERA_ROOT "CameraRoot"
+#define CAMERA_INSTANCE "CameraInstance"
+
+namespace android {
+
+extern "C" void loadCaps(int camera_id, CameraProperties::Properties* params)
+{}
+
+/*********************************************************
+ CameraProperties - public function implemetation
+**********************************************************/
+
+CameraProperties::CameraProperties() : mCamerasSupported(0)
+{
+ LOG_FUNCTION_NAME;
+
+ mCamerasSupported = 0;
+ mInitialized = 0;
+
+ LOG_FUNCTION_NAME_EXIT;
+}
+
+CameraProperties::~CameraProperties()
+{
+ LOG_FUNCTION_NAME;
+
+ LOG_FUNCTION_NAME_EXIT;
+}
+
+// Initializes the CameraProperties class
+status_t CameraProperties::initialize(int cameraid)
+{
+ LOG_FUNCTION_NAME;
+
+ status_t ret = NO_ERROR;
+
+ Mutex::Autolock lock(mLock);
+
+ CAMHAL_LOGDB("%s, mCamerasSupported=%d\n",
+ mInitialized?"initialized":"no initialize", mCamerasSupported);
+
+ if( !mInitialized ){
+
+ int temp = CameraAdapter_CameraNum();
+ for ( int i = 0; i < temp; i++) {
+ mInitialized |= (1 << cameraid);
+ mCamerasSupported ++;
+ mCameraProps[i].set(CameraProperties::CAMERA_SENSOR_INDEX, i);
+ loadCaps(i, &mCameraProps[i]);
+ mCameraProps[i].dump();
+ }
+
+ }else{
+
+ if(!strcmp( mCameraProps[cameraid].get(CameraProperties::RELOAD_WHEN_OPEN), "1")){
+ CAMHAL_LOGDB("cameraid %d reload\n", cameraid);
+ loadCaps(cameraid, &mCameraProps[cameraid]);
+ }else{
+ CAMHAL_LOGDA("device don't need reload\n");
+ }
+
+ }
+
+ LOG_FUNCTION_NAME_EXIT;
+ return ret;
+
+}
+
+///Loads all the Camera related properties
+status_t CameraProperties::loadProperties()
+{
+ LOG_FUNCTION_NAME;
+
+ status_t ret = NO_ERROR;
+ CAMHAL_LOGDA("this func delete!!!\n");
+ return ret;
+}
+
+// Returns the number of Cameras found
+int CameraProperties::camerasSupported()
+{
+ LOG_FUNCTION_NAME;
+ return mCamerasSupported;
+}
+
+// Returns the properties class for a specific Camera
+// Each value is indexed by the CameraProperties::CameraPropertyIndex enum
+int CameraProperties::getProperties(int cameraIndex, CameraProperties::Properties** properties)
+{
+ LOG_FUNCTION_NAME;
+
+ if((unsigned int)cameraIndex >= mCamerasSupported)
+ {
+ LOG_FUNCTION_NAME_EXIT;
+ return -EINVAL;
+ }
+
+ *properties = mCameraProps+cameraIndex;
+
+ LOG_FUNCTION_NAME_EXIT;
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+////////CameraProperties::Properties function/////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+ssize_t CameraProperties::Properties::set(const char *prop, const char *value)
+{
+ if(!prop)
+ return -EINVAL;
+ if(!value)
+ value = DEFAULT_VALUE;
+
+ return mProperties->replaceValueFor(String8(prop), String8(value));
+}
+
+ssize_t CameraProperties::Properties::set(const char *prop, int value)
+{
+ char s_val[30];
+
+ sprintf(s_val, "%d", value);
+
+ return set(prop, s_val);
+}
+
+const char* CameraProperties::Properties::get(const char * prop)
+{
+ String8 value = mProperties->valueFor(String8(prop));
+ return value.string();
+}
+
+void CameraProperties::Properties::dump()
+{
+ for (size_t i = 0; i < mProperties->size(); i++)
+ {
+ CAMHAL_LOGVB("%s = %s\n",
+ mProperties->keyAt(i).string(),
+ mProperties->valueAt(i).string());
+ }
+}
+
+const char* CameraProperties::Properties::keyAt(unsigned int index)
+{
+ if(index < mProperties->size())
+ {
+ return mProperties->keyAt(index).string();
+ }
+ return NULL;
+}
+
+const char* CameraProperties::Properties::valueAt(unsigned int index)
+{
+ if(index < mProperties->size())
+ {
+ return mProperties->valueAt(index).string();
+ }
+ return NULL;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+///////////CameraProperties::const char initialized///////////////////////
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+const char CameraProperties::PIXEL_FORMAT_RGB24[] = "rgb24";
+const char CameraProperties::RELOAD_WHEN_OPEN[]="prop-reload-key";
+const char CameraProperties::DEVICE_NAME[] = "device_name";
+
+const char CameraProperties::DEFAULT_VALUE[] = "";
+const char CameraProperties::PARAMS_DELIMITER []= ",";
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+///////////CameraProperties::const char initialize finished///////////////
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+};
diff --git a/v3/Converters.cpp b/v3/Converters.cpp
new file mode 100755
index 0000000..f63f67f
--- a/dev/null
+++ b/v3/Converters.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implemenation of framebuffer conversion routines.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Converter"
+#include <cutils/log.h>
+#include "Converters.h"
+
+namespace android {
+
+static void _YUV420SToRGB565(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ int dUV,
+ uint16_t* rgb,
+ int width,
+ int height)
+{
+ const uint8_t* U_pos = U;
+ const uint8_t* V_pos = V;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+ const uint8_t nU = *U;
+ const uint8_t nV = *V;
+ *rgb = YUVToRGB565(*Y, nU, nV);
+ Y++; rgb++;
+ *rgb = YUVToRGB565(*Y, nU, nV);
+ Y++; rgb++;
+ }
+ if (y & 0x1) {
+ U_pos = U;
+ V_pos = V;
+ } else {
+ U = U_pos;
+ V = V_pos;
+ }
+ }
+}
+
+static void _YUV420SToRGB32(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ int dUV,
+ uint32_t* rgb,
+ int width,
+ int height)
+{
+ const uint8_t* U_pos = U;
+ const uint8_t* V_pos = V;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+ const uint8_t nU = *U;
+ const uint8_t nV = *V;
+ *rgb = YUVToRGB32(*Y, nU, nV);
+ Y++; rgb++;
+ *rgb = YUVToRGB32(*Y, nU, nV);
+ Y++; rgb++;
+ }
+ if (y & 0x1) {
+ U_pos = U;
+ V_pos = V;
+ } else {
+ U = U_pos;
+ V = V_pos;
+ }
+ }
+}
+
+void YV12ToRGB565(const void* yv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
+ const uint8_t* U = Y + pix_total;
+ const uint8_t* V = U + pix_total / 4;
+ _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast<uint16_t*>(rgb), width, height);
+}
+
+void YV12ToRGB32(const void* yv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
+ const uint8_t* V = Y + pix_total;
+ const uint8_t* U = V + pix_total / 4;
+ _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+}
+
+void YU12ToRGB32(const void* yu12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast<const uint8_t*>(yu12);
+ const uint8_t* U = Y + pix_total;
+ const uint8_t* V = U + pix_total / 4;
+ _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+}
+
+/* Common converter for YUV 4:2:0 interleaved to RGB565.
+ * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
+ */
+static void _NVXXToRGB565(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ uint16_t* rgb,
+ int width,
+ int height)
+{
+ _YUV420SToRGB565(Y, U, V, 2, rgb, width, height);
+}
+
+/* Common converter for YUV 4:2:0 interleaved to RGB32.
+ * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
+ */
+static void _NVXXToRGB32(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ uint32_t* rgb,
+ int width,
+ int height)
+{
+ _YUV420SToRGB32(Y, U, V, 2, rgb, width, height);
+}
+
+void NV12ToRGB565(const void* nv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
+ _NVXXToRGB565(y, y + pix_total, y + pix_total + 1,
+ reinterpret_cast<uint16_t*>(rgb), width, height);
+}
+
+void NV12ToRGB32(const void* nv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
+ _NVXXToRGB32(y, y + pix_total, y + pix_total + 1,
+ reinterpret_cast<uint32_t*>(rgb), width, height);
+}
+
+void NV21ToRGB565(const void* nv21, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
+ _NVXXToRGB565(y, y + pix_total + 1, y + pix_total,
+ reinterpret_cast<uint16_t*>(rgb), width, height);
+}
+
+void NV21ToRGB32(const void* nv21, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
+ _NVXXToRGB32(y, y + pix_total + 1, y + pix_total,
+ reinterpret_cast<uint32_t*>(rgb), width, height);
+}
+
+}; /* namespace android */
diff --git a/v3/Converters.h b/v3/Converters.h
new file mode 100755
index 0000000..13e2a85
--- a/dev/null
+++ b/v3/Converters.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_CONVERTERS_H
+#define HW_EMULATOR_CAMERA_CONVERTERS_H
+
+#include <endian.h>
+
+#ifndef __BYTE_ORDER
+#error "could not determine byte order"
+#endif
+
+/*
+ * Contains declaration of framebuffer conversion routines.
+ *
+ * NOTE: RGB and big/little endian considerations. Wherewer in this code RGB
+ * pixels are represented as WORD, or DWORD, the color order inside the
+ * WORD / DWORD matches the one that would occur if that WORD / DWORD would have
+ * been read from the typecasted framebuffer:
+ *
+ * const uint32_t rgb = *reinterpret_cast<const uint32_t*>(framebuffer);
+ *
+ * So, if this code runs on the little endian CPU, red color in 'rgb' would be
+ * masked as 0x000000ff, and blue color would be masked as 0x00ff0000, while if
+ * the code runs on a big endian CPU, the red color in 'rgb' would be masked as
+ * 0xff000000, and blue color would be masked as 0x0000ff00,
+ */
+
+namespace android {
+
+/*
+ * RGB565 color masks
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static const uint16_t kRed5 = 0x001f;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0xf800;
+#else // __BYTE_ORDER
+static const uint16_t kRed5 = 0xf800;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0x001f;
+#endif // __BYTE_ORDER
+static const uint32_t kBlack16 = 0x0000;
+static const uint32_t kWhite16 = kRed5 | kGreen6 | kBlue5;
+
+/*
+ * RGB32 color masks
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static const uint32_t kRed8 = 0x000000ff;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x00ff0000;
+#else // __BYTE_ORDER
+static const uint32_t kRed8 = 0x00ff0000;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x000000ff;
+#endif // __BYTE_ORDER
+static const uint32_t kBlack32 = 0x00000000;
+static const uint32_t kWhite32 = kRed8 | kGreen8 | kBlue8;
+
+/*
+ * Extracting, and saving color bytes from / to WORD / DWORD RGB.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) static_cast<uint8_t>(rgb & kRed5)
+#define G16(rgb) static_cast<uint8_t>((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast<uint8_t>((rgb & kBlue5) >> 11)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) static_cast<uint8_t>(((rgb & kRed5) << 3) | ((rgb & kRed5) >> 2))
+#define G16_32(rgb) static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) static_cast<uint8_t>(((rgb & kBlue5) >> 8) | ((rgb & kBlue5) >> 14))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) static_cast<uint8_t>(rgb & kRed8)
+#define G32(rgb) static_cast<uint8_t>(((rgb & kGreen8) >> 8) & 0xff)
+#define B32(rgb) static_cast<uint8_t>(((rgb & kBlue8) >> 16) & 0xff)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) static_cast<uint16_t>((((static_cast<uint16_t>(b) << 6) | g) << 5) | r)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) static_cast<uint32_t>((((static_cast<uint32_t>(b) << 8) | g) << 8) | r)
+#else // __BYTE_ORDER
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) static_cast<uint8_t>((rgb & kRed5) >> 11)
+#define G16(rgb) static_cast<uint8_t>((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast<uint8_t>(rgb & kBlue5)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) static_cast<uint8_t>(((rgb & kRed5) >> 8) | ((rgb & kRed5) >> 14))
+#define G16_32(rgb) static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) static_cast<uint8_t>(((rgb & kBlue5) << 3) | ((rgb & kBlue5) >> 2))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) static_cast<uint8_t>((rgb & kRed8) >> 16)
+#define G32(rgb) static_cast<uint8_t>((rgb & kGreen8) >> 8)
+#define B32(rgb) static_cast<uint8_t>(rgb & kBlue8)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) static_cast<uint16_t>((((static_cast<uint16_t>(r) << 6) | g) << 5) | b)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) static_cast<uint32_t>((((static_cast<uint32_t>(r) << 8) | g) << 8) | b)
+#endif // __BYTE_ORDER
+
+/* An union that simplifies breaking 32 bit RGB into separate R, G, and B colors.
+ */
+typedef union RGB32_t {
+ uint32_t color;
+ struct {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ uint8_t r; uint8_t g; uint8_t b; uint8_t a;
+#else // __BYTE_ORDER
+ uint8_t a; uint8_t b; uint8_t g; uint8_t r;
+#endif // __BYTE_ORDER
+ };
+} RGB32_t;
+
+
+/* Clips a value to the unsigned 0-255 range, treating negative values as zero.
+ */
+static __inline__ int
+clamp(int x)
+{
+ if (x > 255) return 255;
+ if (x < 0) return 0;
+ return x;
+}
+
+/********************************************************************************
+ * Basics of RGB -> YUV conversion
+ *******************************************************************************/
+
+/*
+ * RGB -> YUV conversion macros
+ */
+#define RGB2Y(r, g, b) (uint8_t)(((66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
+#define RGB2U(r, g, b) (uint8_t)(((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
+#define RGB2V(r, g, b) (uint8_t)(((112 * (r) - 94 * (g) - 18 * (b) + 128) >> 8) + 128)
+
+/* Converts R8 G8 B8 color to YUV. */
+static __inline__ void
+R8G8B8ToYUV(uint8_t r, uint8_t g, uint8_t b, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ *y = RGB2Y((int)r, (int)g, (int)b);
+ *u = RGB2U((int)r, (int)g, (int)b);
+ *v = RGB2V((int)r, (int)g, (int)b);
+}
+
+/* Converts RGB565 color to YUV. */
+static __inline__ void
+RGB565ToYUV(uint16_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ R8G8B8ToYUV(R16_32(rgb), G16_32(rgb), B16_32(rgb), y, u, v);
+}
+
+/* Converts RGB32 color to YUV. */
+static __inline__ void
+RGB32ToYUV(uint32_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ RGB32_t rgb_c;
+ rgb_c.color = rgb;
+ R8G8B8ToYUV(rgb_c.r, rgb_c.g, rgb_c.b, y, u, v);
+}
+
+/********************************************************************************
+ * Basics of YUV -> RGB conversion.
+ * Note that due to the fact that guest uses RGB only on preview window, and the
+ * RGB format that is used is RGB565, we can limit YUV -> RGB conversions to
+ * RGB565 only.
+ *******************************************************************************/
+
+/*
+ * YUV -> RGB conversion macros
+ */
+
+/* "Optimized" macros that take specialy prepared Y, U, and V values:
+ * C = Y - 16
+ * D = U - 128
+ * E = V - 128
+ */
+#define YUV2RO(C, D, E) clamp((298 * (C) + 409 * (E) + 128) >> 8)
+#define YUV2GO(C, D, E) clamp((298 * (C) - 100 * (D) - 208 * (E) + 128) >> 8)
+#define YUV2BO(C, D, E) clamp((298 * (C) + 516 * (D) + 128) >> 8)
+
+/*
+ * Main macros that take the original Y, U, and V values
+ */
+#define YUV2R(y, u, v) clamp((298 * ((y)-16) + 409 * ((v)-128) + 128) >> 8)
+#define YUV2G(y, u, v) clamp((298 * ((y)-16) - 100 * ((u)-128) - 208 * ((v)-128) + 128) >> 8)
+#define YUV2B(y, u, v) clamp((298 * ((y)-16) + 516 * ((u)-128) + 128) >> 8)
+
+
+/* Converts YUV color to RGB565. */
+static __inline__ uint16_t
+YUVToRGB565(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ const uint16_t r = (YUV2RO(y,u,v) >> 3) & 0x1f;
+ const uint16_t g = (YUV2GO(y,u,v) >> 2) & 0x3f;
+ const uint16_t b = (YUV2BO(y,u,v) >> 3) & 0x1f;
+ return RGB565(r, g, b);
+}
+
+/* Converts YUV color to RGB32. */
+static __inline__ uint32_t
+YUVToRGB32(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ RGB32_t rgb;
+ rgb.r = YUV2RO(y,u,v) & 0xff;
+ rgb.g = YUV2GO(y,u,v) & 0xff;
+ rgb.b = YUV2BO(y,u,v) & 0xff;
+ return rgb.color;
+}
+
+/* YUV pixel descriptor. */
+struct YUVPixel {
+ uint8_t Y;
+ uint8_t U;
+ uint8_t V;
+
+ inline YUVPixel()
+ : Y(0), U(0), V(0)
+ {
+ }
+
+ inline explicit YUVPixel(uint16_t rgb565)
+ {
+ RGB565ToYUV(rgb565, &Y, &U, &V);
+ }
+
+ inline explicit YUVPixel(uint32_t rgb32)
+ {
+ RGB32ToYUV(rgb32, &Y, &U, &V);
+ }
+
+ inline void get(uint8_t* pY, uint8_t* pU, uint8_t* pV) const
+ {
+ *pY = Y; *pU = U; *pV = V;
+ }
+};
+
+/* Converts an YV12 framebuffer to RGB565 framebuffer.
+ * Param:
+ * yv12 - YV12 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YV12ToRGB565(const void* yv12, void* rgb, int width, int height);
+
+/* Converts an YV12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * yv12 - YV12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YV12ToRGB32(const void* yv12, void* rgb, int width, int height);
+
+/* Converts an YU12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * yu12 - YU12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YU12ToRGB32(const void* yu12, void* rgb, int width, int height);
+
+/* Converts an NV12 framebuffer to RGB565 framebuffer.
+ * Param:
+ * nv12 - NV12 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV12ToRGB565(const void* nv12, void* rgb, int width, int height);
+
+/* Converts an NV12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * nv12 - NV12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV12ToRGB32(const void* nv12, void* rgb, int width, int height);
+
+/* Converts an NV21 framebuffer to RGB565 framebuffer.
+ * Param:
+ * nv21 - NV21 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV21ToRGB565(const void* nv21, void* rgb, int width, int height);
+
+/* Converts an NV21 framebuffer to RGB32 framebuffer.
+ * Param:
+ * nv21 - NV21 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV21ToRGB32(const void* nv21, void* rgb, int width, int height);
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_CONVERTERS_H */
diff --git a/v3/EmulatedBaseCamera.cpp b/v3/EmulatedBaseCamera.cpp
new file mode 100644
index 0000000..5fe7d73
--- a/dev/null
+++ b/v3/EmulatedBaseCamera.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedBaseCamera that encapsulates
+ * functionality common to all emulated camera device versions ("fake",
+ * "webcam", "video file", "cam2.0" etc.). Instances of this class (for each
+ * emulated camera) are created during the construction of the
+ * EmulatedCameraFactory instance. This class serves as an entry point for all
+ * camera API calls that are common across all versions of the
+ * camera_device_t/camera_module_t structures.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_BaseCamera"
+#include <cutils/log.h>
+
+#include "EmulatedBaseCamera.h"
+
+namespace android {
+
+EmulatedBaseCamera::EmulatedBaseCamera(int cameraId,
+ uint32_t cameraVersion,
+ struct hw_device_t* device,
+ struct hw_module_t* module)
+ : mCameraInfo(NULL),
+ mCameraID(cameraId),
+ mCameraDeviceVersion(cameraVersion)
+{
+ /*
+ * Initialize camera_device descriptor for this object.
+ */
+
+ /* Common header */
+ device->tag = HARDWARE_DEVICE_TAG;
+ device->version = cameraVersion;
+ device->module = module;
+ device->close = NULL; // Must be filled in by child implementation
+}
+
+EmulatedBaseCamera::~EmulatedBaseCamera()
+{
+}
+
+status_t EmulatedBaseCamera::getCameraInfo(struct camera_info* info)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ info->device_version = mCameraDeviceVersion;
+ if (mCameraDeviceVersion >= HARDWARE_DEVICE_API_VERSION(2, 0)) {
+ info->static_camera_characteristics = mCameraInfo;
+ } else {
+ info->static_camera_characteristics = (camera_metadata_t*)0xcafef00d;
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedBaseCamera::plugCamera() {
+ ALOGE("%s: not supported", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t EmulatedBaseCamera::unplugCamera() {
+ ALOGE("%s: not supported", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+camera_device_status_t EmulatedBaseCamera::getHotplugStatus() {
+ return CAMERA_DEVICE_STATUS_PRESENT;
+}
+
+
+
+
+} /* namespace android */
diff --git a/v3/EmulatedBaseCamera.h b/v3/EmulatedBaseCamera.h
new file mode 100644
index 0000000..539b215
--- a/dev/null
+++ b/v3/EmulatedBaseCamera.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H
+
+#include <hardware/camera_common.h>
+#include <utils/Errors.h>
+#include "DebugUtils.h"
+
+namespace android {
+
+/*
+ * Contains declaration of a class EmulatedBaseCamera that encapsulates
+ * functionality common to all emulated camera device versions ("fake",
+ * "webcam", "video file", etc.). Instances of this class (for each emulated
+ * camera) are created during the construction of the EmulatedCameraFactory
+ * instance. This class serves as an entry point for all camera API calls that
+ * are common across all versions of the camera_device_t/camera_module_t
+ * structures.
+ */
+
+class EmulatedBaseCamera {
+ public:
+ EmulatedBaseCamera(int cameraId,
+ uint32_t cameraVersion,
+ struct hw_device_t* device,
+ struct hw_module_t* module);
+
+ virtual ~EmulatedBaseCamera();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+ public:
+ /* Initializes EmulatedCamera instance.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t Initialize() = 0;
+
+ /****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+ public:
+ /* Creates connection to the emulated camera device.
+ * This method is called in response to hw_module_methods_t::open callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t connectCamera(hw_device_t** device) = 0;
+
+
+ /* Plug the connection for the emulated camera. Until it's plugged in
+ * calls to connectCamera should fail with -ENODEV.
+ */
+ virtual status_t plugCamera();
+
+ /* Unplug the connection from underneath the emulated camera.
+ * This is similar to closing the camera, except that
+ * all function calls into the camera device will return
+ * -EPIPE errors until the camera is reopened.
+ */
+ virtual status_t unplugCamera();
+
+ virtual camera_device_status_t getHotplugStatus();
+
+ /* Closes connection to the emulated camera.
+ * This method is called in response to camera_device::close callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t closeCamera() = 0;
+
+ /* Gets camera information.
+ * This method is called in response to camera_module_t::get_camera_info
+ * callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t getCameraInfo(struct camera_info* info) = 0;
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+ protected:
+ /* Fixed camera information for camera2 devices. Must be valid to access if
+ * mCameraDeviceVersion is >= HARDWARE_DEVICE_API_VERSION(2,0) */
+ camera_metadata_t *mCameraInfo;
+
+ /* Zero-based ID assigned to this camera. */
+ int mCameraID;
+
+ private:
+
+ /* Version of the camera device HAL implemented by this camera */
+ int mCameraDeviceVersion;
+};
+
+} /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H */
diff --git a/v3/EmulatedCamera.cpp b/v3/EmulatedCamera.cpp
new file mode 100755
index 0000000..096c5b2
--- a/dev/null
+++ b/v3/EmulatedCamera.cpp
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all emulated cameras ("fake", "webcam", "video file",
+ * etc.). Instances of this class (for each emulated camera) are created during
+ * the construction of the EmulatedCameraFactory instance. This class serves as
+ * an entry point for all camera API calls that defined by camera_device_ops_t
+ * API.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Camera"
+#include <cutils/log.h>
+#include <ui/Rect.h>
+#include "EmulatedCamera.h"
+//#include "EmulatedFakeCameraDevice.h"
+#include "Converters.h"
+
+/* Defines whether we should trace parameter changes. */
+#define DEBUG_PARAM 1
+
+namespace android {
+
+#if DEBUG_PARAM
+/* Calculates and logs parameter changes.
+ * Param:
+ * current - Current set of camera parameters.
+ * new_par - String representation of new parameters.
+ */
+static void PrintParamDiff(const CameraParameters& current, const char* new_par);
+#else
+#define PrintParamDiff(current, new_par) (void(0))
+#endif /* DEBUG_PARAM */
+
+/* A helper routine that adds a value to the camera parameter.
+ * Param:
+ * param - Camera parameter to add a value to.
+ * val - Value to add.
+ * Return:
+ * A new string containing parameter with the added value on success, or NULL on
+ * a failure. If non-NULL string is returned, the caller is responsible for
+ * freeing it with 'free'.
+ */
+static char* AddValue(const char* param, const char* val);
+
+EmulatedCamera::EmulatedCamera(int cameraId,
+ struct hw_module_t* module)
+ : EmulatedBaseCamera(cameraId,
+ HARDWARE_DEVICE_API_VERSION(1, 0),
+ &common,
+ module),
+ mPreviewWindow(),
+ mCallbackNotifier()
+{
+ /* camera_device v1 fields. */
+ common.close = EmulatedCamera::close;
+ ops = &mDeviceOps;
+ priv = this;
+}
+
+EmulatedCamera::~EmulatedCamera()
+{
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera::Initialize()
+{
+ /* Preview formats supported by this HAL. */
+ char preview_formats[1024];
+ snprintf(preview_formats, sizeof(preview_formats), "%s,%s,%s",
+ CameraParameters::PIXEL_FORMAT_YUV420SP,
+ CameraParameters::PIXEL_FORMAT_YUV420P,
+ CameraParameters::PIXEL_FORMAT_RGBA8888);
+
+ /*
+ * Fake required parameters.
+ */
+
+ mParameters.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, "320x240,0x0");
+
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "512");
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "384");
+ mParameters.set(CameraParameters::KEY_JPEG_QUALITY, "90");
+ mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.31");
+ mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "54.8");
+ mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "42.5");
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, "90");
+
+ /* Preview format settings used here are related to panoramic view only. It's
+ * not related to the preview window that works only with RGB frames, which
+ * is explicitly stated when set_buffers_geometry is called on the preview
+ * window object. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS,
+ preview_formats);
+ mParameters.setPreviewFormat(CameraParameters::PIXEL_FORMAT_YUV420SP);
+
+ /* We don't relay on the actual frame rates supported by the camera device,
+ * since we will emulate them through timeouts in the emulated camera device
+ * worker thread. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
+ "30,24,20,15,10,5");
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, "(5,30)");
+ mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, "5,30");
+ mParameters.setPreviewFrameRate(24);
+
+ /* Only PIXEL_FORMAT_YUV420P is accepted by video framework in emulator! */
+ mParameters.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT,
+ CameraParameters::PIXEL_FORMAT_YUV420P);
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS,
+ CameraParameters::PIXEL_FORMAT_JPEG);
+ mParameters.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG);
+
+ /* Set exposure compensation. */
+ mParameters.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION, "6");
+ mParameters.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION, "-6");
+ mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP, "0.5");
+ mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION, "0");
+
+ /* Sets the white balance modes and the device-dependent scale factors. */
+ char supported_white_balance[1024];
+ snprintf(supported_white_balance, sizeof(supported_white_balance),
+ "%s,%s,%s,%s",
+ CameraParameters::WHITE_BALANCE_AUTO,
+ CameraParameters::WHITE_BALANCE_INCANDESCENT,
+ CameraParameters::WHITE_BALANCE_DAYLIGHT,
+ CameraParameters::WHITE_BALANCE_TWILIGHT);
+ mParameters.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+ supported_white_balance);
+ mParameters.set(CameraParameters::KEY_WHITE_BALANCE,
+ CameraParameters::WHITE_BALANCE_AUTO);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_AUTO, 1.0f, 1.0f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_INCANDESCENT, 1.38f, 0.60f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_DAYLIGHT, 1.09f, 0.92f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_TWILIGHT, 0.92f, 1.22f);
+ getCameraDevice()->setWhiteBalanceMode(CameraParameters::WHITE_BALANCE_AUTO);
+
+ /* Not supported features
+ */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
+ CameraParameters::FOCUS_MODE_FIXED);
+ mParameters.set(CameraParameters::KEY_FOCUS_MODE,
+ CameraParameters::FOCUS_MODE_FIXED);
+
+ return NO_ERROR;
+}
+
+void EmulatedCamera::onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev)
+{
+ /* Notify the preview window first. */
+ mPreviewWindow.onNextFrameAvailable(frame, timestamp, camera_dev);
+
+ /* Notify callback notifier next. */
+ mCallbackNotifier.onNextFrameAvailable(frame, timestamp, camera_dev);
+}
+
+void EmulatedCamera::onCameraDeviceError(int err)
+{
+ /* Errors are reported through the callback notifier */
+ mCallbackNotifier.onCameraDeviceError(err);
+}
+
+/****************************************************************************
+ * Camera API implementation.
+ ***************************************************************************/
+
+status_t EmulatedCamera::connectCamera(hw_device_t** device)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = EINVAL;
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ ALOGE_IF(camera_dev == NULL, "%s: No camera device instance.", __FUNCTION__);
+
+ if (camera_dev != NULL) {
+ /* Connect to the camera device. */
+ res = getCameraDevice()->connectDevice();
+ if (res == NO_ERROR) {
+ *device = &common;
+ }
+ }
+
+ return -res;
+}
+
+status_t EmulatedCamera::closeCamera()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ return cleanupCamera();
+}
+
+status_t EmulatedCamera::getCameraInfo(struct camera_info* info)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ const char* valstr = NULL;
+
+ valstr = mParameters.get(EmulatedCamera::FACING_KEY);
+ if (valstr != NULL) {
+ if (strcmp(valstr, EmulatedCamera::FACING_FRONT) == 0) {
+ info->facing = CAMERA_FACING_FRONT;
+ }
+ else if (strcmp(valstr, EmulatedCamera::FACING_BACK) == 0) {
+ info->facing = CAMERA_FACING_BACK;
+ }
+ } else {
+ info->facing = CAMERA_FACING_BACK;
+ }
+
+ valstr = mParameters.get(EmulatedCamera::ORIENTATION_KEY);
+ if (valstr != NULL) {
+ info->orientation = atoi(valstr);
+ } else {
+ info->orientation = 0;
+ }
+
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+status_t EmulatedCamera::setPreviewWindow(struct preview_stream_ops* window)
+{
+ /* Callback should return a negative errno. */
+ return -mPreviewWindow.setPreviewWindow(window,
+ mParameters.getPreviewFrameRate());
+}
+
+void EmulatedCamera::setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ mCallbackNotifier.setCallbacks(notify_cb, data_cb, data_cb_timestamp,
+ get_memory, user);
+}
+
+void EmulatedCamera::enableMsgType(int32_t msg_type)
+{
+ mCallbackNotifier.enableMessage(msg_type);
+}
+
+void EmulatedCamera::disableMsgType(int32_t msg_type)
+{
+ mCallbackNotifier.disableMessage(msg_type);
+}
+
+int EmulatedCamera::isMsgTypeEnabled(int32_t msg_type)
+{
+ return mCallbackNotifier.isMessageEnabled(msg_type);
+}
+
+status_t EmulatedCamera::startPreview()
+{
+ /* Callback should return a negative errno. */
+ return -doStartPreview();
+}
+
+void EmulatedCamera::stopPreview()
+{
+ doStopPreview();
+}
+
+int EmulatedCamera::isPreviewEnabled()
+{
+ return mPreviewWindow.isPreviewEnabled();
+}
+
+status_t EmulatedCamera::storeMetaDataInBuffers(int enable)
+{
+ /* Callback should return a negative errno. */
+ return -mCallbackNotifier.storeMetaDataInBuffers(enable);
+}
+
+status_t EmulatedCamera::startRecording()
+{
+ /* Callback should return a negative errno. */
+ return -mCallbackNotifier.enableVideoRecording(mParameters.getPreviewFrameRate());
+}
+
+void EmulatedCamera::stopRecording()
+{
+ mCallbackNotifier.disableVideoRecording();
+}
+
+int EmulatedCamera::isRecordingEnabled()
+{
+ return mCallbackNotifier.isVideoRecordingEnabled();
+}
+
+void EmulatedCamera::releaseRecordingFrame(const void* opaque)
+{
+ mCallbackNotifier.releaseRecordingFrame(opaque);
+}
+
+status_t EmulatedCamera::setAutoFocus()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::cancelAutoFocus()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::takePicture()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res;
+ int width, height;
+ uint32_t org_fmt;
+
+ /* Collect frame info for the picture. */
+ mParameters.getPictureSize(&width, &height);
+ const char* pix_fmt = mParameters.getPictureFormat();
+ if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+ org_fmt = V4L2_PIX_FMT_YUV420;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+ org_fmt = V4L2_PIX_FMT_RGB32;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
+ /* We only have JPEG converted for NV21 format. */
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else {
+ ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+ return EINVAL;
+ }
+ /* Get JPEG quality. */
+ int jpeg_quality = mParameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
+ if (jpeg_quality <= 0) {
+ jpeg_quality = 90; /* Fall back to default. */
+ }
+
+ /*
+ * Make sure preview is not running, and device is stopped before taking
+ * picture.
+ */
+
+ const bool preview_on = mPreviewWindow.isPreviewEnabled();
+ if (preview_on) {
+ doStopPreview();
+ }
+
+ /* Camera device should have been stopped when the shutter message has been
+ * enabled. */
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ if (camera_dev->isStarted()) {
+ ALOGW("%s: Camera device is started", __FUNCTION__);
+ camera_dev->stopDeliveringFrames();
+ camera_dev->stopDevice();
+ }
+
+ /*
+ * Take the picture now.
+ */
+
+ /* Start camera device for the picture frame. */
+ ALOGD("Starting camera for picture: %.4s(%s)[%dx%d]",
+ reinterpret_cast<const char*>(&org_fmt), pix_fmt, width, height);
+ res = camera_dev->startDevice(width, height, org_fmt);
+ if (res != NO_ERROR) {
+ if (preview_on) {
+ doStartPreview();
+ }
+ return res;
+ }
+
+ /* Deliver one frame only. */
+ mCallbackNotifier.setJpegQuality(jpeg_quality);
+ mCallbackNotifier.setTakingPicture(true);
+ res = camera_dev->startDeliveringFrames(true);
+ if (res != NO_ERROR) {
+ mCallbackNotifier.setTakingPicture(false);
+ if (preview_on) {
+ doStartPreview();
+ }
+ }
+ return res;
+}
+
+status_t EmulatedCamera::cancelPicture()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::setParameters(const char* parms)
+{
+ ALOGV("%s", __FUNCTION__);
+ PrintParamDiff(mParameters, parms);
+
+ CameraParameters new_param;
+ String8 str8_param(parms);
+ new_param.unflatten(str8_param);
+
+ /*
+ * Check for new exposure compensation parameter.
+ */
+ int new_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ const int min_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION);
+ const int max_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION);
+
+ // Checks if the exposure compensation change is supported.
+ if ((min_exposure_compensation != 0) || (max_exposure_compensation != 0)) {
+ if (new_exposure_compensation > max_exposure_compensation) {
+ new_exposure_compensation = max_exposure_compensation;
+ }
+ if (new_exposure_compensation < min_exposure_compensation) {
+ new_exposure_compensation = min_exposure_compensation;
+ }
+
+ const int current_exposure_compensation = mParameters.getInt(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ if (current_exposure_compensation != new_exposure_compensation) {
+ const float exposure_value = new_exposure_compensation *
+ new_param.getFloat(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP);
+
+ getCameraDevice()->setExposureCompensation(
+ exposure_value);
+ }
+ }
+
+ const char* new_white_balance = new_param.get(
+ CameraParameters::KEY_WHITE_BALANCE);
+ const char* supported_white_balance = new_param.get(
+ CameraParameters::KEY_SUPPORTED_WHITE_BALANCE);
+
+ if ((supported_white_balance != NULL) && (new_white_balance != NULL) &&
+ (strstr(supported_white_balance, new_white_balance) != NULL)) {
+
+ const char* current_white_balance = mParameters.get(
+ CameraParameters::KEY_WHITE_BALANCE);
+ if ((current_white_balance == NULL) ||
+ (strcmp(current_white_balance, new_white_balance) != 0)) {
+ ALOGV("Setting white balance to %s", new_white_balance);
+ getCameraDevice()->setWhiteBalanceMode(new_white_balance);
+ }
+ }
+
+ mParameters = new_param;
+
+ return NO_ERROR;
+}
+
+/* A dumb variable indicating "no params" / error on the exit from
+ * EmulatedCamera::getParameters(). */
+static char lNoParam = '\0';
+char* EmulatedCamera::getParameters()
+{
+ String8 params(mParameters.flatten());
+ char* ret_str =
+ reinterpret_cast<char*>(malloc(sizeof(char) * (params.length()+1)));
+ memset(ret_str, 0, params.length()+1);
+ if (ret_str != NULL) {
+ strncpy(ret_str, params.string(), params.length()+1);
+ return ret_str;
+ } else {
+ ALOGE("%s: Unable to allocate string for %s", __FUNCTION__, params.string());
+ /* Apparently, we can't return NULL fron this routine. */
+ return &lNoParam;
+ }
+}
+
+void EmulatedCamera::putParameters(char* params)
+{
+ /* This method simply frees parameters allocated in getParameters(). */
+ if (params != NULL && params != &lNoParam) {
+ free(params);
+ }
+}
+
+status_t EmulatedCamera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
+{
+ ALOGV("%s: cmd = %d, arg1 = %d, arg2 = %d", __FUNCTION__, cmd, arg1, arg2);
+
+ /* TODO: Future enhancements. */
+ return 0;
+}
+
+void EmulatedCamera::releaseCamera()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ cleanupCamera();
+}
+
+status_t EmulatedCamera::dumpCamera(int fd)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return -EINVAL;
+}
+
+/****************************************************************************
+ * Preview management.
+ ***************************************************************************/
+
+status_t EmulatedCamera::doStartPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ EmulatedCameraDevice* camera_dev = getCameraDevice();
+ if (camera_dev->isStarted()) {
+ camera_dev->stopDeliveringFrames();
+ camera_dev->stopDevice();
+ }
+
+ status_t res = mPreviewWindow.startPreview();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /* Make sure camera device is connected. */
+ if (!camera_dev->isConnected()) {
+ res = camera_dev->connectDevice();
+ if (res != NO_ERROR) {
+ mPreviewWindow.stopPreview();
+ return res;
+ }
+ }
+
+ int width, height;
+ /* Lets see what should we use for frame width, and height. */
+ if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != NULL) {
+ mParameters.getVideoSize(&width, &height);
+ } else {
+ mParameters.getPreviewSize(&width, &height);
+ }
+ /* Lets see what should we use for the frame pixel format. Note that there
+ * are two parameters that define pixel formats for frames sent to the
+ * application via notification callbacks:
+ * - KEY_VIDEO_FRAME_FORMAT, that is used when recording video, and
+ * - KEY_PREVIEW_FORMAT, that is used for preview frame notification.
+ * We choose one or the other, depending on "recording-hint" property set by
+ * the framework that indicating intention: video, or preview. */
+ const char* pix_fmt = NULL;
+ const char* is_video = mParameters.get(EmulatedCamera::RECORDING_HINT_KEY);
+ if (is_video == NULL) {
+ is_video = CameraParameters::FALSE;
+ }
+ if (strcmp(is_video, CameraParameters::TRUE) == 0) {
+ /* Video recording is requested. Lets see if video frame format is set. */
+ pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
+ }
+ /* If this was not video recording, or video frame format is not set, lets
+ * use preview pixel format for the main framebuffer. */
+ if (pix_fmt == NULL) {
+ pix_fmt = mParameters.getPreviewFormat();
+ }
+ if (pix_fmt == NULL) {
+ ALOGE("%s: Unable to obtain video format", __FUNCTION__);
+ mPreviewWindow.stopPreview();
+ return EINVAL;
+ }
+
+ /* Convert framework's pixel format to the FOURCC one. */
+ uint32_t org_fmt;
+ if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+ org_fmt = V4L2_PIX_FMT_YUV420;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+ org_fmt = V4L2_PIX_FMT_RGB32;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else {
+ ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+ mPreviewWindow.stopPreview();
+ return EINVAL;
+ }
+ ALOGD("Starting camera: %dx%d -> %.4s(%s)",
+ width, height, reinterpret_cast<const char*>(&org_fmt), pix_fmt);
+ res = camera_dev->startDevice(width, height, org_fmt);
+ if (res != NO_ERROR) {
+ mPreviewWindow.stopPreview();
+ return res;
+ }
+
+ res = camera_dev->startDeliveringFrames(false);
+ if (res != NO_ERROR) {
+ camera_dev->stopDevice();
+ mPreviewWindow.stopPreview();
+ }
+
+ return res;
+}
+
+status_t EmulatedCamera::doStopPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = NO_ERROR;
+ if (mPreviewWindow.isPreviewEnabled()) {
+ /* Stop the camera. */
+ if (getCameraDevice()->isStarted()) {
+ getCameraDevice()->stopDeliveringFrames();
+ res = getCameraDevice()->stopDevice();
+ }
+
+ if (res == NO_ERROR) {
+ /* Disable preview as well. */
+ mPreviewWindow.stopPreview();
+ }
+ }
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+status_t EmulatedCamera::cleanupCamera()
+{
+ status_t res = NO_ERROR;
+
+ /* If preview is running - stop it. */
+ res = doStopPreview();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+
+ /* Stop and disconnect the camera device. */
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ if (camera_dev != NULL) {
+ if (camera_dev->isStarted()) {
+ camera_dev->stopDeliveringFrames();
+ res = camera_dev->stopDevice();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+ }
+ if (camera_dev->isConnected()) {
+ res = camera_dev->disconnectDevice();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+ }
+ }
+
+ mCallbackNotifier.cleanupCBNotifier();
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera_device_ops structure.
+ *
+ * Callbacks here simply dispatch the calls to an appropriate method inside
+ * EmulatedCamera instance, defined by the 'dev' parameter.
+ ***************************************************************************/
+
+int EmulatedCamera::set_preview_window(struct camera_device* dev,
+ struct preview_stream_ops* window)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setPreviewWindow(window);
+}
+
+void EmulatedCamera::set_callbacks(
+ struct camera_device* dev,
+ camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->setCallbacks(notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+}
+
+void EmulatedCamera::enable_msg_type(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->enableMsgType(msg_type);
+}
+
+void EmulatedCamera::disable_msg_type(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->disableMsgType(msg_type);
+}
+
+int EmulatedCamera::msg_type_enabled(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isMsgTypeEnabled(msg_type);
+}
+
+int EmulatedCamera::start_preview(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->startPreview();
+}
+
+void EmulatedCamera::stop_preview(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->stopPreview();
+}
+
+int EmulatedCamera::preview_enabled(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isPreviewEnabled();
+}
+
+int EmulatedCamera::store_meta_data_in_buffers(struct camera_device* dev,
+ int enable)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->storeMetaDataInBuffers(enable);
+}
+
+int EmulatedCamera::start_recording(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->startRecording();
+}
+
+void EmulatedCamera::stop_recording(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->stopRecording();
+}
+
+int EmulatedCamera::recording_enabled(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isRecordingEnabled();
+}
+
+void EmulatedCamera::release_recording_frame(struct camera_device* dev,
+ const void* opaque)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->releaseRecordingFrame(opaque);
+}
+
+int EmulatedCamera::auto_focus(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setAutoFocus();
+}
+
+int EmulatedCamera::cancel_auto_focus(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->cancelAutoFocus();
+}
+
+int EmulatedCamera::take_picture(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->takePicture();
+}
+
+int EmulatedCamera::cancel_picture(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->cancelPicture();
+}
+
+int EmulatedCamera::set_parameters(struct camera_device* dev, const char* parms)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setParameters(parms);
+}
+
+char* EmulatedCamera::get_parameters(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return NULL;
+ }
+ return ec->getParameters();
+}
+
+void EmulatedCamera::put_parameters(struct camera_device* dev, char* params)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->putParameters(params);
+}
+
+int EmulatedCamera::send_command(struct camera_device* dev,
+ int32_t cmd,
+ int32_t arg1,
+ int32_t arg2)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->sendCommand(cmd, arg1, arg2);
+}
+
+void EmulatedCamera::release(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->releaseCamera();
+}
+
+int EmulatedCamera::dump(struct camera_device* dev, int fd)
+{
+ EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->dumpCamera(fd);
+}
+
+int EmulatedCamera::close(struct hw_device_t* device)
+{
+ EmulatedCamera* ec =
+ reinterpret_cast<EmulatedCamera*>(reinterpret_cast<struct camera_device*>(device)->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->closeCamera();
+}
+
+/****************************************************************************
+ * Static initializer for the camera callback API
+ ****************************************************************************/
+
+camera_device_ops_t EmulatedCamera::mDeviceOps = {
+ EmulatedCamera::set_preview_window,
+ EmulatedCamera::set_callbacks,
+ EmulatedCamera::enable_msg_type,
+ EmulatedCamera::disable_msg_type,
+ EmulatedCamera::msg_type_enabled,
+ EmulatedCamera::start_preview,
+ EmulatedCamera::stop_preview,
+ EmulatedCamera::preview_enabled,
+ EmulatedCamera::store_meta_data_in_buffers,
+ EmulatedCamera::start_recording,
+ EmulatedCamera::stop_recording,
+ EmulatedCamera::recording_enabled,
+ EmulatedCamera::release_recording_frame,
+ EmulatedCamera::auto_focus,
+ EmulatedCamera::cancel_auto_focus,
+ EmulatedCamera::take_picture,
+ EmulatedCamera::cancel_picture,
+ EmulatedCamera::set_parameters,
+ EmulatedCamera::get_parameters,
+ EmulatedCamera::put_parameters,
+ EmulatedCamera::send_command,
+ EmulatedCamera::release,
+ EmulatedCamera::dump
+};
+
+/****************************************************************************
+ * Common keys
+ ***************************************************************************/
+
+const char EmulatedCamera::FACING_KEY[] = "prop-facing";
+const char EmulatedCamera::ORIENTATION_KEY[] = "prop-orientation";
+const char EmulatedCamera::RECORDING_HINT_KEY[] = "recording-hint";
+
+/****************************************************************************
+ * Common string values
+ ***************************************************************************/
+
+const char EmulatedCamera::FACING_BACK[] = "back";
+const char EmulatedCamera::FACING_FRONT[] = "front";
+
+/****************************************************************************
+ * Helper routines
+ ***************************************************************************/
+
+static char* AddValue(const char* param, const char* val)
+{
+ const size_t len1 = strlen(param);
+ const size_t len2 = strlen(val);
+ char* ret = reinterpret_cast<char*>(malloc(len1 + len2 + 2));
+ ALOGE_IF(ret == NULL, "%s: Memory failure", __FUNCTION__);
+ if (ret != NULL) {
+ memcpy(ret, param, len1);
+ ret[len1] = ',';
+ memcpy(ret + len1 + 1, val, len2);
+ ret[len1 + len2 + 1] = '\0';
+ }
+ return ret;
+}
+
+/****************************************************************************
+ * Parameter debugging helpers
+ ***************************************************************************/
+
+#if DEBUG_PARAM
+static void PrintParamDiff(const CameraParameters& current,
+ const char* new_par)
+{
+ char tmp[2048];
+ const char* wrk = new_par;
+
+ /* Divided with ';' */
+ const char* next = strchr(wrk, ';');
+ while (next != NULL) {
+ snprintf(tmp, sizeof(tmp), "%.*s", (int)(intptr_t)(next-wrk), wrk);
+ /* in the form key=value */
+ char* val = strchr(tmp, '=');
+ if (val != NULL) {
+ *val = '\0'; val++;
+ const char* in_current = current.get(tmp);
+ if (in_current != NULL) {
+ if (strcmp(in_current, val)) {
+ ALOGD("=== Value changed: %s: %s -> %s", tmp, in_current, val);
+ }
+ } else {
+ ALOGD("+++ New parameter: %s=%s", tmp, val);
+ }
+ } else {
+ ALOGW("No value separator in %s", tmp);
+ }
+ wrk = next + 1;
+ next = strchr(wrk, ';');
+ }
+}
+#endif /* DEBUG_PARAM */
+
+}; /* namespace android */
diff --git a/v3/EmulatedCamera.h b/v3/EmulatedCamera.h
new file mode 100755
index 0000000..9825d5d
--- a/dev/null
+++ b/v3/EmulatedCamera.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_H
+
+/*
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 1.0 emulated camera devices ("fake",
+ * "webcam", "video file", etc.). Instances of this class (for each emulated
+ * camera) are created during the construction of the EmulatedCameraFactory
+ * instance. This class serves as an entry point for all camera API calls that
+ * defined by camera_device_ops_t API.
+ */
+
+#include <camera/CameraParameters.h>
+#include "EmulatedBaseCamera.h"
+#include "EmulatedCameraDevice.h"
+#include "PreviewWindow.h"
+#include "CallbackNotifier.h"
+
+namespace android {
+
+/* Encapsulates functionality common to all version 1.0 emulated camera devices
+ * ("fake", "webcam", "file stream", etc.).
+ *
+ * Note that EmulatedCameraFactory instantiates object of this class just once,
+ * when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are ivoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera : public camera_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera instance. */
+ virtual ~EmulatedCamera();
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+ /* Gets emulated camera device used by this instance of the emulated camera.
+ */
+ virtual EmulatedCameraDevice* getCameraDevice() = 0;
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /** Override of base class method */
+ virtual status_t Initialize();
+
+ /* Next frame is available in the camera device.
+ * This is a notification callback that is invoked by the camera device when
+ * a new frame is available.
+ * Note that most likely this method is called in context of a worker thread
+ * that camera device has created for frame capturing.
+ * Param:
+ * frame - Captured frame, or NULL if camera device didn't pull the frame
+ * yet. If NULL is passed in this parameter use GetCurrentFrame method
+ * of the camera device class to obtain the next frame. Also note that
+ * the size of the frame that is passed here (as well as the frame
+ * returned from the GetCurrentFrame method) is defined by the current
+ * frame settings (width + height + pixel format) for the camera device.
+ * timestamp - Frame's timestamp.
+ * camera_dev - Camera device instance that delivered the frame.
+ */
+ virtual void onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev);
+
+ /* Entry point for notifications that occur in camera device.
+ * Param:
+ * err - CAMERA_ERROR_XXX error code.
+ */
+ virtual void onCameraDeviceError(int err);
+
+ /****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+public:
+ /** Override of base class method */
+ virtual status_t connectCamera(hw_device_t** device);
+
+ /** Override of base class method */
+ virtual status_t closeCamera();
+
+ /** Override of base class method */
+ virtual status_t getCameraInfo(struct camera_info* info);
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+ /* Actual handler for camera_device_ops_t::set_preview_window callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setPreviewWindow(struct preview_stream_ops *window);
+
+ /* Actual handler for camera_device_ops_t::set_callbacks callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void enableMsgType(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void disableMsgType(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if message(s) is (are) disabled, != 0 if enabled.
+ */
+ virtual int isMsgTypeEnabled(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::start_preview callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t startPreview();
+
+ /* Actual handler for camera_device_ops_t::stop_preview callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void stopPreview();
+
+ /* Actual handler for camera_device_ops_t::preview_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if preview is disabled, != 0 if enabled.
+ */
+ virtual int isPreviewEnabled();
+
+ /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t storeMetaDataInBuffers(int enable);
+
+ /* Actual handler for camera_device_ops_t::start_recording callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t startRecording();
+
+ /* Actual handler for camera_device_ops_t::stop_recording callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void stopRecording();
+
+ /* Actual handler for camera_device_ops_t::recording_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if recording is disabled, != 0 if enabled.
+ */
+ virtual int isRecordingEnabled();
+
+ /* Actual handler for camera_device_ops_t::release_recording_frame callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void releaseRecordingFrame(const void* opaque);
+
+ /* Actual handler for camera_device_ops_t::auto_focus callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setAutoFocus();
+
+ /* Actual handler for camera_device_ops_t::cancel_auto_focus callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t cancelAutoFocus();
+
+ /* Actual handler for camera_device_ops_t::take_picture callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t takePicture();
+
+ /* Actual handler for camera_device_ops_t::cancel_picture callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t cancelPicture();
+
+ /* Actual handler for camera_device_ops_t::set_parameters callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setParameters(const char* parms);
+
+ /* Actual handler for camera_device_ops_t::get_parameters callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * Flattened parameters string. The caller will free the buffer allocated
+ * for the string by calling camera_device_ops_t::put_parameters callback.
+ */
+ virtual char* getParameters();
+
+ /* Actual handler for camera_device_ops_t::put_parameters callback.
+ * Called to free the string returned from camera_device_ops_t::get_parameters
+ * callback. There is nothing more to it: the name of the callback is just
+ * misleading.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void putParameters(char* params);
+
+ /* Actual handler for camera_device_ops_t::send_command callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+
+ /* Actual handler for camera_device_ops_t::release callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void releaseCamera();
+
+ /* Actual handler for camera_device_ops_t::dump callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t dumpCamera(int fd);
+
+ /****************************************************************************
+ * Preview management.
+ ***************************************************************************/
+
+protected:
+ /* Starts preview.
+ * Note that when this method is called mPreviewWindow may be NULL,
+ * indicating that framework has an intention to start displaying video
+ * frames, but didn't create the preview window yet.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t doStartPreview();
+
+ /* Stops preview.
+ * This method reverts DoStartPreview.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t doStopPreview();
+
+ /****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+protected:
+ /* Cleans up camera when released. */
+ virtual status_t cleanupCamera();
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera_device_ops structure.
+ * See hardware/libhardware/include/hardware/camera.h for information on
+ * each of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera class defined by the
+ * 'camera_device' parameter.
+ ***************************************************************************/
+
+private:
+ static int set_preview_window(struct camera_device* dev,
+ struct preview_stream_ops* window);
+
+ static void set_callbacks(struct camera_device* dev,
+ camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ static void enable_msg_type(struct camera_device* dev, int32_t msg_type);
+
+ static void disable_msg_type(struct camera_device* dev, int32_t msg_type);
+
+ static int msg_type_enabled(struct camera_device* dev, int32_t msg_type);
+
+ static int start_preview(struct camera_device* dev);
+
+ static void stop_preview(struct camera_device* dev);
+
+ static int preview_enabled(struct camera_device* dev);
+
+ static int store_meta_data_in_buffers(struct camera_device* dev, int enable);
+
+ static int start_recording(struct camera_device* dev);
+
+ static void stop_recording(struct camera_device* dev);
+
+ static int recording_enabled(struct camera_device* dev);
+
+ static void release_recording_frame(struct camera_device* dev,
+ const void* opaque);
+
+ static int auto_focus(struct camera_device* dev);
+
+ static int cancel_auto_focus(struct camera_device* dev);
+
+ static int take_picture(struct camera_device* dev);
+
+ static int cancel_picture(struct camera_device* dev);
+
+ static int set_parameters(struct camera_device* dev, const char* parms);
+
+ static char* get_parameters(struct camera_device* dev);
+
+ static void put_parameters(struct camera_device* dev, char* params);
+
+ static int send_command(struct camera_device* dev,
+ int32_t cmd,
+ int32_t arg1,
+ int32_t arg2);
+
+ static void release(struct camera_device* dev);
+
+ static int dump(struct camera_device* dev, int fd);
+
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for parameters, state, etc. change. */
+ Mutex mObjectLock;
+
+ /* Camera parameters. */
+ CameraParameters mParameters;
+
+ /* Preview window. */
+ PreviewWindow mPreviewWindow;
+
+ /* Callback notifier. */
+ CallbackNotifier mCallbackNotifier;
+
+private:
+ /* Registered callbacks implementing camera API. */
+ static camera_device_ops_t mDeviceOps;
+
+ /****************************************************************************
+ * Common keys
+ ***************************************************************************/
+
+public:
+ static const char FACING_KEY[];
+ static const char ORIENTATION_KEY[];
+ static const char RECORDING_HINT_KEY[];
+
+ /****************************************************************************
+ * Common string values
+ ***************************************************************************/
+
+ /* Possible values for FACING_KEY */
+ static const char FACING_BACK[];
+ static const char FACING_FRONT[];
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_H */
diff --git a/v3/EmulatedCamera2.cpp b/v3/EmulatedCamera2.cpp
new file mode 100644
index 0000000..ea7424b
--- a/dev/null
+++ b/v3/EmulatedCamera2.cpp
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all version 2.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera2_device_ops_t API.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera2_Camera"
+#include <cutils/log.h>
+
+#include "EmulatedCamera2.h"
+#include "system/camera_metadata.h"
+
+namespace android {
+
+/* Constructs EmulatedCamera2 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+EmulatedCamera2::EmulatedCamera2(int cameraId,
+ struct hw_module_t* module):
+ EmulatedBaseCamera(cameraId,
+ CAMERA_DEVICE_API_VERSION_2_0,
+ &common,
+ module)
+{
+ common.close = EmulatedCamera2::close;
+ ops = &sDeviceOps;
+ priv = this;
+
+ mNotifyCb = NULL;
+
+ mRequestQueueSrc = NULL;
+ mFrameQueueDst = NULL;
+
+ mVendorTagOps.get_camera_vendor_section_name =
+ EmulatedCamera2::get_camera_vendor_section_name;
+ mVendorTagOps.get_camera_vendor_tag_name =
+ EmulatedCamera2::get_camera_vendor_tag_name;
+ mVendorTagOps.get_camera_vendor_tag_type =
+ EmulatedCamera2::get_camera_vendor_tag_type;
+ mVendorTagOps.parent = this;
+
+ mStatusPresent = true;
+}
+
+/* Destructs EmulatedCamera2 instance. */
+EmulatedCamera2::~EmulatedCamera2() {
+}
+
+/****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera2::Initialize() {
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+status_t EmulatedCamera2::connectCamera(hw_device_t** device) {
+ *device = &common;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera2::closeCamera() {
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera2::getCameraInfo(struct camera_info* info) {
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera Device API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+/** Request input queue */
+
+int EmulatedCamera2::requestQueueNotify() {
+ return INVALID_OPERATION;
+}
+
+/** Count of requests in flight */
+int EmulatedCamera2::getInProgressCount() {
+ return INVALID_OPERATION;
+}
+
+/** Cancel all captures in flight */
+int EmulatedCamera2::flushCapturesInProgress() {
+ return INVALID_OPERATION;
+}
+
+/** Construct a default request for a given use case */
+int EmulatedCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request) {
+ return INVALID_OPERATION;
+}
+
+/** Output stream creation and management */
+
+int EmulatedCamera2::allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ return INVALID_OPERATION;
+}
+
+
+int EmulatedCamera2::releaseStream(uint32_t stream_id) {
+ return INVALID_OPERATION;
+}
+
+/** Reprocessing input stream management */
+
+int EmulatedCamera2::allocateReprocessStream(
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::releaseReprocessStream(uint32_t stream_id) {
+ return INVALID_OPERATION;
+}
+
+/** 3A triggering */
+
+int EmulatedCamera2::triggerAction(uint32_t trigger_id,
+ int ext1, int ext2) {
+ return INVALID_OPERATION;
+}
+
+/** Custom tag query methods */
+
+const char* EmulatedCamera2::getVendorSectionName(uint32_t tag) {
+ return NULL;
+}
+
+const char* EmulatedCamera2::getVendorTagName(uint32_t tag) {
+ return NULL;
+}
+
+int EmulatedCamera2::getVendorTagType(uint32_t tag) {
+ return -1;
+}
+
+/** Debug methods */
+
+int EmulatedCamera2::dump(int fd) {
+ return INVALID_OPERATION;
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera2_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera2.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera2 class defined by the
+ * 'camera_device2' parameter, or set a member value in the same.
+ ***************************************************************************/
+
+EmulatedCamera2* getInstance(const camera2_device_t *d) {
+ const EmulatedCamera2* cec = static_cast<const EmulatedCamera2*>(d);
+ return const_cast<EmulatedCamera2*>(cec);
+}
+
+int EmulatedCamera2::set_request_queue_src_ops(const camera2_device_t *d,
+ const camera2_request_queue_src_ops *queue_src_ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ ec->mRequestQueueSrc = queue_src_ops;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::notify_request_queue_not_empty(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->requestQueueNotify();
+}
+
+int EmulatedCamera2::set_frame_queue_dst_ops(const camera2_device_t *d,
+ const camera2_frame_queue_dst_ops *queue_dst_ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ ec->mFrameQueueDst = queue_dst_ops;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::get_in_progress_count(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->getInProgressCount();
+}
+
+int EmulatedCamera2::flush_captures_in_progress(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->flushCapturesInProgress();
+}
+
+int EmulatedCamera2::construct_default_request(const camera2_device_t *d,
+ int request_template,
+ camera_metadata_t **request) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->constructDefaultRequest(request_template, request);
+}
+
+int EmulatedCamera2::allocate_stream(const camera2_device_t *d,
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateStream(width, height, format, stream_ops,
+ stream_id, format_actual, usage, max_buffers);
+}
+
+int EmulatedCamera2::register_stream_buffers(const camera2_device_t *d,
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->registerStreamBuffers(stream_id,
+ num_buffers,
+ buffers);
+}
+int EmulatedCamera2::release_stream(const camera2_device_t *d,
+ uint32_t stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->releaseStream(stream_id);
+}
+
+int EmulatedCamera2::allocate_reprocess_stream(const camera2_device_t *d,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateReprocessStream(width, height, format,
+ reprocess_stream_ops, stream_id, consumer_usage, max_buffers);
+}
+
+int EmulatedCamera2::allocate_reprocess_stream_from_stream(
+ const camera2_device_t *d,
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateReprocessStreamFromStream(output_stream_id,
+ reprocess_stream_ops, stream_id);
+}
+
+
+int EmulatedCamera2::release_reprocess_stream(const camera2_device_t *d,
+ uint32_t stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->releaseReprocessStream(stream_id);
+}
+
+int EmulatedCamera2::trigger_action(const camera2_device_t *d,
+ uint32_t trigger_id,
+ int ext1,
+ int ext2) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->triggerAction(trigger_id, ext1, ext2);
+}
+
+int EmulatedCamera2::set_notify_callback(const camera2_device_t *d,
+ camera2_notify_callback notify_cb, void* user) {
+ EmulatedCamera2* ec = getInstance(d);
+ Mutex::Autolock l(ec->mMutex);
+ ec->mNotifyCb = notify_cb;
+ ec->mNotifyUserPtr = user;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::get_metadata_vendor_tag_ops(const camera2_device_t *d,
+ vendor_tag_query_ops_t **ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ *ops = static_cast<vendor_tag_query_ops_t*>(
+ &ec->mVendorTagOps);
+ return NO_ERROR;
+}
+
+const char* EmulatedCamera2::get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorSectionName(tag);
+}
+
+const char* EmulatedCamera2::get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorTagName(tag);
+}
+
+int EmulatedCamera2::get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorTagType(tag);
+}
+
+int EmulatedCamera2::dump(const camera2_device_t *d, int fd) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->dump(fd);
+}
+
+int EmulatedCamera2::close(struct hw_device_t* device) {
+ EmulatedCamera2* ec =
+ static_cast<EmulatedCamera2*>(
+ reinterpret_cast<camera2_device_t*>(device) );
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera2 device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->closeCamera();
+}
+
+void EmulatedCamera2::sendNotification(int32_t msgType,
+ int32_t ext1, int32_t ext2, int32_t ext3) {
+ camera2_notify_callback notifyCb;
+ {
+ Mutex::Autolock l(mMutex);
+ notifyCb = mNotifyCb;
+ }
+ if (notifyCb != NULL) {
+ notifyCb(msgType, ext1, ext2, ext3, mNotifyUserPtr);
+ }
+}
+
+camera2_device_ops_t EmulatedCamera2::sDeviceOps = {
+ EmulatedCamera2::set_request_queue_src_ops,
+ EmulatedCamera2::notify_request_queue_not_empty,
+ EmulatedCamera2::set_frame_queue_dst_ops,
+ EmulatedCamera2::get_in_progress_count,
+ EmulatedCamera2::flush_captures_in_progress,
+ EmulatedCamera2::construct_default_request,
+ EmulatedCamera2::allocate_stream,
+ EmulatedCamera2::register_stream_buffers,
+ EmulatedCamera2::release_stream,
+ EmulatedCamera2::allocate_reprocess_stream,
+ EmulatedCamera2::allocate_reprocess_stream_from_stream,
+ EmulatedCamera2::release_reprocess_stream,
+ EmulatedCamera2::trigger_action,
+ EmulatedCamera2::set_notify_callback,
+ EmulatedCamera2::get_metadata_vendor_tag_ops,
+ EmulatedCamera2::dump
+};
+
+}; /* namespace android */
diff --git a/v3/EmulatedCamera2.h b/v3/EmulatedCamera2.h
new file mode 100644
index 0000000..9f5f67b
--- a/dev/null
+++ b/v3/EmulatedCamera2.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H
+
+/*
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 2.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera2_device_ops_t API.
+ */
+
+#include "hardware/camera2.h"
+#include "system/camera_metadata.h"
+#include "EmulatedBaseCamera.h"
+#include <utils/Thread.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+/* Encapsulates functionality common to all version 2.0 emulated camera devices
+ *
+ * Note that EmulatedCameraFactory instantiates object of this class just once,
+ * when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are invoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera2 : public camera2_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera2 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera2(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera2 instance. */
+ virtual ~EmulatedCamera2();
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ virtual status_t Initialize();
+
+ /****************************************************************************
+ * Camera module API and generic hardware device API implementation
+ ***************************************************************************/
+
+public:
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info* info) = 0;
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+ /** Request input queue notification */
+ virtual int requestQueueNotify();
+
+ /** Count of requests in flight */
+ virtual int getInProgressCount();
+
+ /** Cancel all captures in flight */
+ virtual int flushCapturesInProgress();
+
+ virtual int constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request);
+
+ /** Output stream creation and management */
+ virtual int allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ virtual int registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ virtual int releaseStream(uint32_t stream_id);
+
+ /** Input stream creation and management */
+ virtual int allocateReprocessStream(
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers);
+
+ virtual int allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id);
+
+ virtual int releaseReprocessStream(uint32_t stream_id);
+
+ /** 3A action triggering */
+ virtual int triggerAction(uint32_t trigger_id,
+ int32_t ext1, int32_t ext2);
+
+ /** Custom tag definitions */
+ virtual const char* getVendorSectionName(uint32_t tag);
+ virtual const char* getVendorTagName(uint32_t tag);
+ virtual int getVendorTagType(uint32_t tag);
+
+ /** Debug methods */
+
+ virtual int dump(int fd);
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera2_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera2.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera2 class defined in
+ * the 'camera_device2' parameter.
+ ***************************************************************************/
+
+private:
+ /** Input request queue */
+ static int set_request_queue_src_ops(const camera2_device_t *,
+ const camera2_request_queue_src_ops *queue_src_ops);
+ static int notify_request_queue_not_empty(const camera2_device_t *);
+
+ /** Output frame queue */
+ static int set_frame_queue_dst_ops(const camera2_device_t *,
+ const camera2_frame_queue_dst_ops *queue_dst_ops);
+
+ /** In-progress request management */
+ static int get_in_progress_count(const camera2_device_t *);
+
+ static int flush_captures_in_progress(const camera2_device_t *);
+
+ /** Request template creation */
+ static int construct_default_request(const camera2_device_t *,
+ int request_template,
+ camera_metadata_t **request);
+
+ /** Stream management */
+ static int allocate_stream(const camera2_device_t *,
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ static int register_stream_buffers(const camera2_device_t *,
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ static int release_stream(const camera2_device_t *,
+ uint32_t stream_id);
+
+ static int allocate_reprocess_stream(const camera2_device_t *,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers);
+
+ static int allocate_reprocess_stream_from_stream(const camera2_device_t *,
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id);
+
+ static int release_reprocess_stream(const camera2_device_t *,
+ uint32_t stream_id);
+
+ /** 3A triggers*/
+ static int trigger_action(const camera2_device_t *,
+ uint32_t trigger_id,
+ int ext1,
+ int ext2);
+
+ /** Notifications to application */
+ static int set_notify_callback(const camera2_device_t *,
+ camera2_notify_callback notify_cb,
+ void *user);
+
+ /** Vendor metadata registration */
+ static int get_metadata_vendor_tag_ops(const camera2_device_t *,
+ vendor_tag_query_ops_t **ops);
+ // for get_metadata_vendor_tag_ops
+ static const char* get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static const char* get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static int get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+
+ static int dump(const camera2_device_t *, int fd);
+
+ /** For hw_device_t ops */
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members shared with implementations
+ ***************************************************************************/
+ protected:
+ /** Mutex for calls through camera2 device interface */
+ Mutex mMutex;
+
+ bool mStatusPresent;
+
+ const camera2_request_queue_src_ops *mRequestQueueSrc;
+ const camera2_frame_queue_dst_ops *mFrameQueueDst;
+
+ struct TagOps : public vendor_tag_query_ops {
+ EmulatedCamera2 *parent;
+ };
+ TagOps mVendorTagOps;
+
+ void sendNotification(int32_t msgType,
+ int32_t ext1, int32_t ext2, int32_t ext3);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+ private:
+ static camera2_device_ops_t sDeviceOps;
+ camera2_notify_callback mNotifyCb;
+ void* mNotifyUserPtr;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H */
diff --git a/v3/EmulatedCamera3.cpp b/v3/EmulatedCamera3.cpp
new file mode 100644
index 0000000..47de44f
--- a/dev/null
+++ b/v3/EmulatedCamera3.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all version 3.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera3_device_ops_t API.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera3_Camera"
+#include <cutils/log.h>
+
+#include "EmulatedCamera3.h"
+#include "system/camera_metadata.h"
+
+namespace android {
+
+/**
+ * Constructs EmulatedCamera3 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+EmulatedCamera3::EmulatedCamera3(int cameraId,
+ struct hw_module_t* module):
+ EmulatedBaseCamera(cameraId,
+ CAMERA_DEVICE_API_VERSION_3_0,
+ &common,
+ module),
+ mStatus(STATUS_ERROR)
+{
+ common.close = EmulatedCamera3::close;
+ ops = &sDeviceOps;
+
+ mCallbackOps = NULL;
+
+ mVendorTagOps.get_camera_vendor_section_name =
+ EmulatedCamera3::get_camera_vendor_section_name;
+ mVendorTagOps.get_camera_vendor_tag_name =
+ EmulatedCamera3::get_camera_vendor_tag_name;
+ mVendorTagOps.get_camera_vendor_tag_type =
+ EmulatedCamera3::get_camera_vendor_tag_type;
+ mVendorTagOps.parent = this;
+}
+
+/* Destructs EmulatedCamera3 instance. */
+EmulatedCamera3::~EmulatedCamera3() {
+}
+
+/****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera3::Initialize() {
+ ALOGV("%s", __FUNCTION__);
+
+ mStatus = STATUS_CLOSED;
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+status_t EmulatedCamera3::connectCamera(hw_device_t** device) {
+ ALOGV("%s", __FUNCTION__);
+ if (device == NULL) return BAD_VALUE;
+
+ if (mStatus != STATUS_CLOSED) {
+ ALOGE("%s: Trying to open a camera in state %d!",
+ __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ *device = &common;
+ mStatus = STATUS_OPEN;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::closeCamera() {
+ mStatus = STATUS_CLOSED;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::getCameraInfo(struct camera_info* info) {
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera Device API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+status_t EmulatedCamera3::initializeDevice(
+ const camera3_callback_ops *callbackOps) {
+ if (callbackOps == NULL) {
+ ALOGE("%s: NULL callback ops provided to HAL!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (mStatus != STATUS_OPEN) {
+ ALOGE("%s: Trying to initialize a camera in state %d!",
+ __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ mCallbackOps = callbackOps;
+ mStatus = STATUS_READY;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::configureStreams(
+ camera3_stream_configuration *streamList) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t EmulatedCamera3::registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+const camera_metadata_t* EmulatedCamera3::constructDefaultRequestSettings(
+ int type) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return NULL;
+}
+
+status_t EmulatedCamera3::processCaptureRequest(
+ camera3_capture_request *request) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+/** Custom tag query methods */
+
+const char* EmulatedCamera3::getVendorSectionName(uint32_t tag) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return NULL;
+}
+
+const char* EmulatedCamera3::getVendorTagName(uint32_t tag) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return NULL;
+}
+
+int EmulatedCamera3::getVendorTagType(uint32_t tag) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return -1;
+}
+
+/** Debug methods */
+
+void EmulatedCamera3::dump(int fd) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return;
+}
+
+/****************************************************************************
+ * Protected API. Callbacks to the framework.
+ ***************************************************************************/
+
+void EmulatedCamera3::sendCaptureResult(camera3_capture_result_t *result) {
+ mCallbackOps->process_capture_result(mCallbackOps, result);
+}
+
+void EmulatedCamera3::sendNotify(camera3_notify_msg_t *msg) {
+ mCallbackOps->notify(mCallbackOps, msg);
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera3_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera3.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera3 class defined by the
+ * 'camera_device3' parameter, or set a member value in the same.
+ ***************************************************************************/
+
+EmulatedCamera3* getInstance(const camera3_device_t *d) {
+ const EmulatedCamera3* cec = static_cast<const EmulatedCamera3*>(d);
+ return const_cast<EmulatedCamera3*>(cec);
+}
+
+int EmulatedCamera3::initialize(const struct camera3_device *d,
+ const camera3_callback_ops_t *callback_ops) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->initializeDevice(callback_ops);
+}
+
+int EmulatedCamera3::configure_streams(const struct camera3_device *d,
+ camera3_stream_configuration_t *stream_list) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->configureStreams(stream_list);
+}
+
+int EmulatedCamera3::register_stream_buffers(
+ const struct camera3_device *d,
+ const camera3_stream_buffer_set_t *buffer_set) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->registerStreamBuffers(buffer_set);
+}
+
+int EmulatedCamera3::process_capture_request(
+ const struct camera3_device *d,
+ camera3_capture_request_t *request) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->processCaptureRequest(request);
+}
+
+const camera_metadata_t* EmulatedCamera3::construct_default_request_settings(
+ const camera3_device_t *d, int type) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->constructDefaultRequestSettings(type);
+}
+
+void EmulatedCamera3::get_metadata_vendor_tag_ops(const camera3_device_t *d,
+ vendor_tag_query_ops_t *ops) {
+ ops->get_camera_vendor_section_name = get_camera_vendor_section_name;
+ ops->get_camera_vendor_tag_name = get_camera_vendor_tag_name;
+ ops->get_camera_vendor_tag_type = get_camera_vendor_tag_type;
+}
+
+const char* EmulatedCamera3::get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera3* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorSectionName(tag);
+}
+
+const char* EmulatedCamera3::get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera3* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorTagName(tag);
+}
+
+int EmulatedCamera3::get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera3* ec = static_cast<const TagOps*>(v)->parent;
+ return ec->getVendorTagType(tag);
+}
+
+void EmulatedCamera3::dump(const camera3_device_t *d, int fd) {
+ EmulatedCamera3* ec = getInstance(d);
+ ec->dump(fd);
+}
+
+int EmulatedCamera3::close(struct hw_device_t* device) {
+ EmulatedCamera3* ec =
+ static_cast<EmulatedCamera3*>(
+ reinterpret_cast<camera3_device_t*>(device) );
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera3 device", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ return ec->closeCamera();
+}
+
+camera3_device_ops_t EmulatedCamera3::sDeviceOps = {
+ EmulatedCamera3::initialize,
+ EmulatedCamera3::configure_streams,
+ EmulatedCamera3::register_stream_buffers,
+ EmulatedCamera3::construct_default_request_settings,
+ EmulatedCamera3::process_capture_request,
+ EmulatedCamera3::get_metadata_vendor_tag_ops,
+ EmulatedCamera3::dump
+};
+
+}; /* namespace android */
diff --git a/v3/EmulatedCamera3.h b/v3/EmulatedCamera3.h
new file mode 100644
index 0000000..c154f5a
--- a/dev/null
+++ b/v3/EmulatedCamera3.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
+
+/**
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 3.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera3_device_ops_t API.
+ */
+
+#include "hardware/camera3.h"
+#include "system/camera_metadata.h"
+#include "EmulatedBaseCamera.h"
+#include "DebugUtils.h"
+
+namespace android {
+
+/**
+ * Encapsulates functionality common to all version 3.0 emulated camera devices
+ *
+ * Note that EmulatedCameraFactory instantiates an object of this class just
+ * once, when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are invoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera3 : public camera3_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera3 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera3(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera2 instance. */
+ virtual ~EmulatedCamera3();
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ virtual status_t Initialize();
+
+ /****************************************************************************
+ * Camera module API and generic hardware device API implementation
+ ***************************************************************************/
+
+public:
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info* info);
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+
+ virtual status_t initializeDevice(
+ const camera3_callback_ops *callbackOps);
+
+ virtual status_t configureStreams(
+ camera3_stream_configuration *streamList);
+
+ virtual status_t registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) ;
+
+ virtual const camera_metadata_t* constructDefaultRequestSettings(
+ int type);
+
+ virtual status_t processCaptureRequest(camera3_capture_request *request);
+
+ /** Debug methods */
+
+ virtual void dump(int fd);
+
+ /** Tag query methods */
+ virtual const char *getVendorSectionName(uint32_t tag);
+
+ virtual const char *getVendorTagName(uint32_t tag);
+
+ virtual int getVendorTagType(uint32_t tag);
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera3_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera3.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera3 class defined in
+ * the 'camera_device3' parameter.
+ ***************************************************************************/
+
+private:
+
+ /** Startup */
+ static int initialize(const struct camera3_device *,
+ const camera3_callback_ops_t *callback_ops);
+
+ /** Stream configuration and buffer registration */
+
+ static int configure_streams(const struct camera3_device *,
+ camera3_stream_configuration_t *stream_list);
+
+ static int register_stream_buffers(const struct camera3_device *,
+ const camera3_stream_buffer_set_t *buffer_set);
+
+ /** Template request settings provision */
+
+ static const camera_metadata_t* construct_default_request_settings(
+ const struct camera3_device *, int type);
+
+ /** Submission of capture requests to HAL */
+
+ static int process_capture_request(const struct camera3_device *,
+ camera3_capture_request_t *request);
+
+ /** Vendor metadata registration */
+ static void get_metadata_vendor_tag_ops(const camera3_device_t *,
+ vendor_tag_query_ops_t *ops);
+ // for get_metadata_vendor_tag_ops
+ static const char* get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static const char* get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static int get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+
+ static void dump(const camera3_device_t *, int fd);
+
+ /** For hw_device_t ops */
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members shared with implementations
+ ***************************************************************************/
+ protected:
+
+ struct TagOps : public vendor_tag_query_ops {
+ EmulatedCamera3 *parent;
+ };
+ TagOps mVendorTagOps;
+
+ enum {
+ // State at construction time, and after a device operation error
+ STATUS_ERROR = 0,
+ // State after startup-time init and after device instance close
+ STATUS_CLOSED,
+ // State after being opened, before device instance init
+ STATUS_OPEN,
+ // State after device instance initialization
+ STATUS_READY,
+ // State while actively capturing data
+ STATUS_ACTIVE
+ } mStatus;
+
+ /**
+ * Callbacks back to the framework
+ */
+
+ void sendCaptureResult(camera3_capture_result_t *result);
+ void sendNotify(camera3_notify_msg_t *msg);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+ private:
+ static camera3_device_ops_t sDeviceOps;
+ const camera3_callback_ops_t *mCallbackOps;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H */
diff --git a/v3/EmulatedCameraCommon.h b/v3/EmulatedCameraCommon.h
new file mode 100755
index 0000000..c1d575c
--- a/dev/null
+++ b/v3/EmulatedCameraCommon.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H
+
+/*
+ * Contains common declarations that are used across the camera emulation.
+ */
+
+#include <linux/videodev2.h>
+#include <hardware/camera.h>
+
+/* A helper class that tracks a routine execution.
+ * Basically, it dumps an enry message in its constructor, and an exit message
+ * in its destructor. Use LOGRE() macro (declared bellow) to create instances
+ * of this class at the beginning of the tracked routines / methods.
+ */
+class HWERoutineTracker {
+public:
+ /* Constructor that prints an "entry" trace message. */
+ explicit HWERoutineTracker(const char* name)
+ : mName(name) {
+ ALOGV("Entering %s", mName);
+ }
+
+ /* Destructor that prints a "leave" trace message. */
+ ~HWERoutineTracker() {
+ ALOGV("Leaving %s", mName);
+ }
+
+private:
+ /* Stores the routine name. */
+ const char* mName;
+};
+
+/* Logs an execution of a routine / method. */
+#define LOGRE() HWERoutineTracker hwertracker_##__LINE__(__FUNCTION__)
+
+/*
+ * min / max macros
+ */
+
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H */
diff --git a/v3/EmulatedCameraDevice.cpp b/v3/EmulatedCameraDevice.cpp
new file mode 100755
index 0000000..b76353d
--- a/dev/null
+++ b/v3/EmulatedCameraDevice.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Device"
+#include <cutils/log.h>
+#include <sys/select.h>
+#include <cmath>
+#include "EmulatedCameraDevice.h"
+
+namespace android {
+
+const float GAMMA_CORRECTION = 2.2f;
+EmulatedCameraDevice::EmulatedCameraDevice(EmulatedCamera* camera_hal)
+ : mObjectLock(),
+ mCurFrameTimestamp(0),
+ mCameraHAL(camera_hal),
+ mCurrentFrame(NULL),
+ mExposureCompensation(1.0f),
+ mWhiteBalanceScale(NULL),
+ mSupportedWhiteBalanceScale(),
+ mState(ECDS_CONSTRUCTED)
+{
+}
+
+EmulatedCameraDevice::~EmulatedCameraDevice()
+{
+ ALOGV("EmulatedCameraDevice destructor");
+ if (mCurrentFrame != NULL) {
+ delete[] mCurrentFrame;
+ }
+ for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) {
+ if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) {
+ delete[] mSupportedWhiteBalanceScale.valueAt(i);
+ }
+ }
+}
+
+/****************************************************************************
+ * Emulated camera device public API
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::Initialize()
+{
+ if (isInitialized()) {
+ ALOGW("%s: Emulated camera device is already initialized: mState = %d",
+ __FUNCTION__, mState);
+ return NO_ERROR;
+ }
+
+ /* Instantiate worker thread object. */
+ mWorkerThread = new WorkerThread(this);
+ if (getWorkerThread() == NULL) {
+ ALOGE("%s: Unable to instantiate worker thread object", __FUNCTION__);
+ return ENOMEM;
+ }
+
+ mState = ECDS_INITIALIZED;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCameraDevice::startDeliveringFrames(bool one_burst)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGE("%s: Device is not started", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Frames will be delivered from the thread routine. */
+ const status_t res = startWorkerThread(one_burst);
+ ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+ return res;
+}
+
+status_t EmulatedCameraDevice::stopDeliveringFrames()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGW("%s: Device is not started", __FUNCTION__);
+ return NO_ERROR;
+ }
+
+ const status_t res = stopWorkerThread();
+ ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+ return res;
+}
+
+void EmulatedCameraDevice::setExposureCompensation(const float ev) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
+ }
+
+ mExposureCompensation = std::pow(2.0f, ev / GAMMA_CORRECTION);
+ ALOGV("New exposure compensation is %f", mExposureCompensation);
+}
+
+void EmulatedCameraDevice::initializeWhiteBalanceModes(const char* mode,
+ const float r_scale,
+ const float b_scale) {
+ ALOGV("%s with %s, %f, %f", __FUNCTION__, mode, r_scale, b_scale);
+ float* value = new float[3];
+ value[0] = r_scale; value[1] = 1.0f; value[2] = b_scale;
+ mSupportedWhiteBalanceScale.add(String8(mode), value);
+}
+
+void EmulatedCameraDevice::setWhiteBalanceMode(const char* mode) {
+ ALOGV("%s with white balance %s", __FUNCTION__, mode);
+ mWhiteBalanceScale =
+ mSupportedWhiteBalanceScale.valueFor(String8(mode));
+}
+
+/* Computes the pixel value after adjusting the white balance to the current
+ * one. The input the y, u, v channel of the pixel and the adjusted value will
+ * be stored in place. The adjustment is done in RGB space.
+ */
+void EmulatedCameraDevice::changeWhiteBalance(uint8_t& y,
+ uint8_t& u,
+ uint8_t& v) const {
+ float r_scale = mWhiteBalanceScale[0];
+ float b_scale = mWhiteBalanceScale[2];
+ int r = static_cast<float>(YUV2R(y, u, v)) / r_scale;
+ int g = YUV2G(y, u, v);
+ int b = static_cast<float>(YUV2B(y, u, v)) / b_scale;
+
+ y = RGB2Y(r, g, b);
+ u = RGB2U(r, g, b);
+ v = RGB2V(r, g, b);
+}
+
+status_t EmulatedCameraDevice::getCurrentPreviewFrame(void* buffer)
+{
+ if (!isStarted()) {
+ ALOGE("%s: Device is not started", __FUNCTION__);
+ return EINVAL;
+ }
+ if (mCurrentFrame == NULL || buffer == NULL) {
+ ALOGE("%s: No framebuffer", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* In emulation the framebuffer is never RGB. */
+ switch (mPixelFormat) {
+ case V4L2_PIX_FMT_YVU420:
+ YV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_YUV420:
+ YU12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_NV21:
+ NV21ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_NV12:
+ NV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+
+ default:
+ ALOGE("%s: Unknown pixel format %.4s",
+ __FUNCTION__, reinterpret_cast<const char*>(&mPixelFormat));
+ return EINVAL;
+ }
+}
+
+/****************************************************************************
+ * Emulated camera device private API
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::commonStartDevice(int width,
+ int height,
+ uint32_t pix_fmt)
+{
+ /* Validate pixel format, and calculate framebuffer size at the same time. */
+ switch (pix_fmt) {
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12:
+ mFrameBufferSize = (width * height * 12) / 8;
+ break;
+
+ default:
+ ALOGE("%s: Unknown pixel format %.4s",
+ __FUNCTION__, reinterpret_cast<const char*>(&pix_fmt));
+ return EINVAL;
+ }
+
+ /* Cache framebuffer info. */
+ mFrameWidth = width;
+ mFrameHeight = height;
+ mPixelFormat = pix_fmt;
+ mTotalPixels = width * height;
+
+ /* Allocate framebuffer. */
+ mCurrentFrame = new uint8_t[mFrameBufferSize];
+ if (mCurrentFrame == NULL) {
+ ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__);
+ return ENOMEM;
+ }
+ ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame",
+ __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels,
+ reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+}
+
+void EmulatedCameraDevice::commonStopDevice()
+{
+ mFrameWidth = mFrameHeight = mTotalPixels = 0;
+ mPixelFormat = 0;
+
+ if (mCurrentFrame != NULL) {
+ delete[] mCurrentFrame;
+ mCurrentFrame = NULL;
+ }
+}
+
+/****************************************************************************
+ * Worker thread management.
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::startWorkerThread(bool one_burst)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isInitialized()) {
+ ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+ return EINVAL;
+ }
+
+ const status_t res = getWorkerThread()->startThread(one_burst);
+ ALOGE_IF(res != NO_ERROR, "%s: Unable to start worker thread", __FUNCTION__);
+ return res;
+}
+
+status_t EmulatedCameraDevice::stopWorkerThread()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isInitialized()) {
+ ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+ return EINVAL;
+ }
+
+ const status_t res = getWorkerThread()->stopThread();
+ ALOGE_IF(res != NO_ERROR, "%s: Unable to stop worker thread", __FUNCTION__);
+ return res;
+}
+
+bool EmulatedCameraDevice::inWorkerThread()
+{
+ /* This will end the thread loop, and will terminate the thread. Derived
+ * classes must override this method. */
+ return false;
+}
+
+/****************************************************************************
+ * Worker thread implementation.
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::WorkerThread::readyToRun()
+{
+ ALOGV("Starting emulated camera device worker thread...");
+
+ ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+ "%s: Thread control FDs are opened", __FUNCTION__);
+ /* Create a pair of FDs that would be used to control the thread. */
+ int thread_fds[2];
+ if (pipe(thread_fds) == 0) {
+ mThreadControl = thread_fds[1];
+ mControlFD = thread_fds[0];
+ ALOGV("Emulated device's worker thread has been started.");
+ return NO_ERROR;
+ } else {
+ ALOGE("%s: Unable to create thread control FDs: %d -> %s",
+ __FUNCTION__, errno, strerror(errno));
+ return errno;
+ }
+}
+
+status_t EmulatedCameraDevice::WorkerThread::stopThread()
+{
+ ALOGV("Stopping emulated camera device's worker thread...");
+
+ status_t res = EINVAL;
+ if (mThreadControl >= 0) {
+ /* Send "stop" message to the thread loop. */
+ const ControlMessage msg = THREAD_STOP;
+ const int wres =
+ TEMP_FAILURE_RETRY(write(mThreadControl, &msg, sizeof(msg)));
+ if (wres == sizeof(msg)) {
+ /* Stop the thread, and wait till it's terminated. */
+ res = requestExitAndWait();
+ if (res == NO_ERROR) {
+ /* Close control FDs. */
+ if (mThreadControl >= 0) {
+ close(mThreadControl);
+ mThreadControl = -1;
+ }
+ if (mControlFD >= 0) {
+ close(mControlFD);
+ mControlFD = -1;
+ }
+ ALOGV("Emulated camera device's worker thread has been stopped.");
+ } else {
+ ALOGE("%s: requestExitAndWait failed: %d -> %s",
+ __FUNCTION__, res, strerror(-res));
+ }
+ } else {
+ ALOGE("%s: Unable to send THREAD_STOP message: %d -> %s",
+ __FUNCTION__, errno, strerror(errno));
+ res = errno ? errno : EINVAL;
+ }
+ } else {
+ ALOGE("%s: Thread control FDs are not opened", __FUNCTION__);
+ }
+
+ return res;
+}
+
+EmulatedCameraDevice::WorkerThread::SelectRes
+EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout)
+{
+ fd_set fds[1];
+ struct timeval tv, *tvp = NULL;
+
+ const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 :
+ mControlFD + 1;
+ FD_ZERO(fds);
+ FD_SET(mControlFD, fds);
+ if (fd >= 0) {
+ FD_SET(fd, fds);
+ }
+ if (timeout) {
+ tv.tv_sec = timeout / 1000000;
+ tv.tv_usec = timeout % 1000000;
+ tvp = &tv;
+ }
+ int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
+ if (res < 0) {
+ ALOGE("%s: select returned %d and failed: %d -> %s",
+ __FUNCTION__, res, errno, strerror(errno));
+ return ERROR;
+ } else if (res == 0) {
+ /* Timeout. */
+ return TIMEOUT;
+ } else if (FD_ISSET(mControlFD, fds)) {
+ /* A control event. Lets read the message. */
+ ControlMessage msg;
+ res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
+ if (res != sizeof(msg)) {
+ ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
+ __FUNCTION__, res, errno, strerror(errno));
+ return ERROR;
+ }
+ /* THREAD_STOP is the only message expected here. */
+ if (msg == THREAD_STOP) {
+ ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
+ return EXIT_THREAD;
+ } else {
+ ALOGE("Unknown worker thread message %d", msg);
+ return ERROR;
+ }
+ } else {
+ /* Must be an FD. */
+ ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
+ __FUNCTION__);
+ return READY;
+ }
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedCameraDevice.h b/v3/EmulatedCameraDevice.h
new file mode 100755
index 0000000..b7cdcb7
--- a/dev/null
+++ b/v3/EmulatedCameraDevice.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H
+
+/*
+ * Contains declaration of an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera device parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+
+#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include "EmulatedCameraCommon.h"
+#include "Converters.h"
+
+namespace android {
+
+class EmulatedCamera;
+
+/* Encapsulates an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera device parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+class EmulatedCameraDevice {
+public:
+ /* Constructs EmulatedCameraDevice instance.
+ * Param:
+ * camera_hal - Emulated camera that implements the camera HAL API, and
+ * manages (contains) this object.
+ */
+ explicit EmulatedCameraDevice(EmulatedCamera* camera_hal);
+
+ /* Destructs EmulatedCameraDevice instance. */
+ virtual ~EmulatedCameraDevice();
+
+ /***************************************************************************
+ * Emulated camera device abstract interface
+ **************************************************************************/
+
+public:
+ /* Connects to the camera device.
+ * This method must be called on an initialized instance of this class.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t connectDevice() = 0;
+
+ /* Disconnects from the camera device.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status. If this method is
+ * called for already disconnected, or uninitialized instance of this class,
+ * a successful status must be returned from this method. If this method is
+ * called for an instance that is in the "started" state, this method must
+ * return a failure.
+ */
+ virtual status_t disconnectDevice() = 0;
+
+ /* Starts the camera device.
+ * This method tells the camera device to start capturing frames of the given
+ * dimensions for the given pixel format. Note that this method doesn't start
+ * the delivery of the captured frames to the emulated camera. Call
+ * startDeliveringFrames method to start delivering frames. This method must
+ * be called on a connected instance of this class. If it is called on a
+ * disconnected instance, this method must return a failure.
+ * Param:
+ * width, height - Frame dimensions to use when capturing video frames.
+ * pix_fmt - Pixel format to use when capturing video frames.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startDevice(int width, int height, uint32_t pix_fmt) = 0;
+
+ /* Stops the camera device.
+ * This method tells the camera device to stop capturing frames. Note that
+ * this method doesn't stop delivering frames to the emulated camera. Always
+ * call stopDeliveringFrames prior to calling this method.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status. If this method is
+ * called for an object that is not capturing frames, or is disconnected,
+ * or is uninitialized, a successful status must be returned from this
+ * method.
+ */
+ virtual status_t stopDevice() = 0;
+
+ /***************************************************************************
+ * Emulated camera device public API
+ **************************************************************************/
+
+public:
+ /* Initializes EmulatedCameraDevice instance.
+ * Derived classes should override this method in order to cache static
+ * properties of the physical device (list of supported pixel formats, frame
+ * sizes, etc.) If this method is called on an already initialized instance,
+ * it must return a successful status.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t Initialize();
+
+ /* Initializes the white balance modes parameters.
+ * The parameters are passed by each individual derived camera API to
+ * represent that different camera manufacturers may have different
+ * preferences on the white balance parameters. Green channel in the RGB
+ * color space is fixed to keep the luminance to be reasonably constant.
+ *
+ * Param:
+ * mode the text describing the current white balance mode
+ * r_scale the scale factor for the R channel in RGB space
+ * b_scale the scale factor for the B channel in RGB space.
+ */
+ void initializeWhiteBalanceModes(const char* mode,
+ const float r_scale,
+ const float b_scale);
+
+ /* Starts delivering frames captured from the camera device.
+ * This method will start the worker thread that would be pulling frames from
+ * the camera device, and will deliver the pulled frames back to the emulated
+ * camera via onNextFrameAvailable callback. This method must be called on a
+ * connected instance of this class with a started camera device. If it is
+ * called on a disconnected instance, or camera device has not been started,
+ * this method must return a failure.
+ * Param:
+ * one_burst - Controls how many frames should be delivered. If this
+ * parameter is 'true', only one captured frame will be delivered to the
+ * emulated camera. If this parameter is 'false', frames will keep
+ * coming until stopDeliveringFrames method is called. Typically, this
+ * parameter is set to 'true' only in order to obtain a single frame
+ * that will be used as a "picture" in takePicture method of the
+ * emulated camera.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startDeliveringFrames(bool one_burst);
+
+ /* Stops delivering frames captured from the camera device.
+ * This method will stop the worker thread started by startDeliveringFrames.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t stopDeliveringFrames();
+
+ /* Sets the exposure compensation for the camera device.
+ */
+ void setExposureCompensation(const float ev);
+
+ /* Sets the white balance mode for the device.
+ */
+ void setWhiteBalanceMode(const char* mode);
+
+ /* Gets current framebuffer, converted into preview frame format.
+ * This method must be called on a connected instance of this class with a
+ * started camera device. If it is called on a disconnected instance, or
+ * camera device has not been started, this method must return a failure.
+ * Note that this method should be called only after at least one frame has
+ * been captured and delivered. Otherwise it will return garbage in the
+ * preview frame buffer. Typically, this method shuld be called from
+ * onNextFrameAvailable callback.
+ * Param:
+ * buffer - Buffer, large enough to contain the entire preview frame.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t getCurrentPreviewFrame(void* buffer);
+
+ /* Gets width of the frame obtained from the physical device.
+ * Return:
+ * Width of the frame obtained from the physical device. Note that value
+ * returned from this method is valid only in case if camera device has been
+ * started.
+ */
+ inline int getFrameWidth() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameWidth;
+ }
+
+ /* Gets height of the frame obtained from the physical device.
+ * Return:
+ * Height of the frame obtained from the physical device. Note that value
+ * returned from this method is valid only in case if camera device has been
+ * started.
+ */
+ inline int getFrameHeight() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameHeight;
+ }
+
+ /* Gets byte size of the current frame buffer.
+ * Return:
+ * Byte size of the frame buffer. Note that value returned from this method
+ * is valid only in case if camera device has been started.
+ */
+ inline size_t getFrameBufferSize() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameBufferSize;
+ }
+
+ /* Gets number of pixels in the current frame buffer.
+ * Return:
+ * Number of pixels in the frame buffer. Note that value returned from this
+ * method is valid only in case if camera device has been started.
+ */
+ inline int getPixelNum() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mTotalPixels;
+ }
+
+ /* Gets pixel format of the frame that camera device streams to this class.
+ * Throughout camera framework, there are three different forms of pixel
+ * format representation:
+ * - Original format, as reported by the actual camera device. Values for
+ * this format are declared in bionic/libc/kernel/common/linux/videodev2.h
+ * - String representation as defined in CameraParameters::PIXEL_FORMAT_XXX
+ * strings in frameworks/base/include/camera/CameraParameters.h
+ * - HAL_PIXEL_FORMAT_XXX format, as defined in system/core/include/system/graphics.h
+ * Since emulated camera device gets its data from the actual device, it gets
+ * pixel format in the original form. And that's the pixel format
+ * representation that will be returned from this method. HAL components will
+ * need to translate value returned from this method to the appropriate form.
+ * This method must be called only on started instance of this class, since
+ * it's applicable only when camera device is ready to stream frames.
+ * Param:
+ * pix_fmt - Upon success contains the original pixel format.
+ * Return:
+ * Current framebuffer's pixel format. Note that value returned from this
+ * method is valid only in case if camera device has been started.
+ */
+ inline uint32_t getOriginalPixelFormat() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mPixelFormat;
+ }
+
+ /*
+ * State checkers.
+ */
+
+ inline bool isInitialized() const {
+ /* Instance is initialized when the worker thread has been successfuly
+ * created (but not necessarily started). */
+ return mWorkerThread.get() != NULL && mState != ECDS_CONSTRUCTED;
+ }
+ inline bool isConnected() const {
+ /* Instance is connected when its status is either"connected", or
+ * "started". */
+ return mState == ECDS_CONNECTED || mState == ECDS_STARTED;
+ }
+ inline bool isStarted() const {
+ return mState == ECDS_STARTED;
+ }
+
+ /****************************************************************************
+ * Emulated camera device private API
+ ***************************************************************************/
+protected:
+ /* Performs common validation and calculation of startDevice parameters.
+ * Param:
+ * width, height, pix_fmt - Parameters passed to the startDevice method.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t commonStartDevice(int width, int height, uint32_t pix_fmt);
+
+ /* Performs common cleanup on stopDevice.
+ * This method will undo what commonStartDevice had done.
+ */
+ virtual void commonStopDevice();
+
+ /** Computes a luminance value after taking the exposure compensation.
+ * value into account.
+ *
+ * Param:
+ * inputY - The input luminance value.
+ * Return:
+ * The luminance value after adjusting the exposure compensation.
+ */
+ inline uint8_t changeExposure(const uint8_t& inputY) const {
+ return static_cast<uint8_t>(clamp(static_cast<float>(inputY) *
+ mExposureCompensation));
+ }
+
+ /** Computes the pixel value in YUV space after adjusting to the current
+ * white balance mode.
+ */
+ void changeWhiteBalance(uint8_t& y, uint8_t& u, uint8_t& v) const;
+
+ /****************************************************************************
+ * Worker thread management.
+ * Typicaly when emulated camera device starts capturing frames from the
+ * actual device, it does that in a worker thread created in StartCapturing,
+ * and terminated in StopCapturing. Since this is such a typical scenario,
+ * it makes sence to encapsulate worker thread management in the base class
+ * for all emulated camera devices.
+ ***************************************************************************/
+
+protected:
+ /* Starts the worker thread.
+ * Typically, worker thread is started from startDeliveringFrames method of
+ * this class.
+ * Param:
+ * one_burst - Controls how many times thread loop should run. If this
+ * parameter is 'true', thread routine will run only once If this
+ * parameter is 'false', thread routine will run until stopWorkerThread
+ * method is called. See startDeliveringFrames for more info.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startWorkerThread(bool one_burst);
+
+ /* Stops the worker thread.
+ * Note that this method will always wait for the worker thread to terminate.
+ * Typically, worker thread is started from stopDeliveringFrames method of
+ * this class.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t stopWorkerThread();
+
+ /* Implementation of the worker thread routine.
+ * In the default implementation of the worker thread routine we simply
+ * return 'false' forcing the thread loop to exit, and the thread to
+ * terminate. Derived class should override that method to provide there the
+ * actual frame delivery.
+ * Return:
+ * true To continue thread loop (this method will be called again), or false
+ * to exit the thread loop and to terminate the thread.
+ */
+ virtual bool inWorkerThread();
+
+ /* Encapsulates a worker thread used by the emulated camera device.
+ */
+ friend class WorkerThread;
+ class WorkerThread : public Thread {
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+ public:
+ inline explicit WorkerThread(EmulatedCameraDevice* camera_dev)
+ : Thread(true), // Callbacks may involve Java calls.
+ mCameraDevice(camera_dev),
+ mThreadControl(-1),
+ mControlFD(-1)
+ {
+ }
+
+ inline ~WorkerThread()
+ {
+ ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+ "%s: Control FDs are opened in the destructor",
+ __FUNCTION__);
+ if (mThreadControl >= 0) {
+ close(mThreadControl);
+ }
+ if (mControlFD >= 0) {
+ close(mControlFD);
+ }
+ }
+
+ /* Starts the thread
+ * Param:
+ * one_burst - Controls how many times thread loop should run. If
+ * this parameter is 'true', thread routine will run only once
+ * If this parameter is 'false', thread routine will run until
+ * stopThread method is called. See startWorkerThread for more
+ * info.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ inline status_t startThread(bool one_burst)
+ {
+ mOneBurst = one_burst;
+ return run(NULL, ANDROID_PRIORITY_URGENT_DISPLAY, 0);
+ }
+
+ /* Overriden base class method.
+ * It is overriden in order to provide one-time initialization just
+ * prior to starting the thread routine.
+ */
+ status_t readyToRun();
+
+ /* Stops the thread. */
+ status_t stopThread();
+
+ /* Values returned from the Select method of this class. */
+ enum SelectRes {
+ /* A timeout has occurred. */
+ TIMEOUT,
+ /* Data are available for read on the provided FD. */
+ READY,
+ /* Thread exit request has been received. */
+ EXIT_THREAD,
+ /* An error has occurred. */
+ ERROR
+ };
+
+ /* Select on an FD event, keeping in mind thread exit message.
+ * Param:
+ * fd - File descriptor on which to wait for an event. This
+ * parameter may be negative. If it is negative this method will
+ * only wait on a control message to the thread.
+ * timeout - Timeout in microseconds. 0 indicates no timeout (wait
+ * forever).
+ * Return:
+ * See SelectRes enum comments.
+ */
+ SelectRes Select(int fd, int timeout);
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+ private:
+ /* Implements abstract method of the base Thread class. */
+ bool threadLoop()
+ {
+ /* Simply dispatch the call to the containing camera device. */
+ if (mCameraDevice->inWorkerThread()) {
+ /* Respect "one burst" parameter (see startThread). */
+ return !mOneBurst;
+ } else {
+ return false;
+ }
+ }
+
+ /* Containing camera device object. */
+ EmulatedCameraDevice* mCameraDevice;
+
+ /* FD that is used to send control messages into the thread. */
+ int mThreadControl;
+
+ /* FD that thread uses to receive control messages. */
+ int mControlFD;
+
+ /* Controls number of times the thread loop runs.
+ * See startThread for more information. */
+ bool mOneBurst;
+
+ /* Enumerates control messages that can be sent into the thread. */
+ enum ControlMessage {
+ /* Stop the thread. */
+ THREAD_STOP
+ };
+ };
+
+ /* Worker thread accessor. */
+ inline WorkerThread* getWorkerThread() const
+ {
+ return mWorkerThread.get();
+ }
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for parameters, state, etc. change. */
+ Mutex mObjectLock;
+
+ /* Worker thread that is used in frame capturing. */
+ sp<WorkerThread> mWorkerThread;
+
+ /* Timestamp of the current frame. */
+ nsecs_t mCurFrameTimestamp;
+
+ /* Emulated camera object containing this instance. */
+ EmulatedCamera* mCameraHAL;
+
+ /* Framebuffer containing the current frame. */
+ uint8_t* mCurrentFrame;
+
+ /*
+ * Framebuffer properties.
+ */
+
+ /* Byte size of the framebuffer. */
+ size_t mFrameBufferSize;
+
+ /* Original pixel format (one of the V4L2_PIX_FMT_XXX values, as defined in
+ * bionic/libc/kernel/common/linux/videodev2.h */
+ uint32_t mPixelFormat;
+
+ /* Frame width */
+ int mFrameWidth;
+
+ /* Frame height */
+ int mFrameHeight;
+
+ /* Total number of pixels */
+ int mTotalPixels;
+
+ /* Exposure compensation value */
+ float mExposureCompensation;
+
+ float* mWhiteBalanceScale;
+
+ DefaultKeyedVector<String8, float*> mSupportedWhiteBalanceScale;
+
+ /* Defines possible states of the emulated camera device object.
+ */
+ enum EmulatedCameraDeviceState {
+ /* Object has been constructed. */
+ ECDS_CONSTRUCTED,
+ /* Object has been initialized. */
+ ECDS_INITIALIZED,
+ /* Object has been connected to the physical device. */
+ ECDS_CONNECTED,
+ /* Camera device has been started. */
+ ECDS_STARTED,
+ };
+
+ /* Object state. */
+ EmulatedCameraDeviceState mState;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H */
diff --git a/v3/EmulatedCameraFactory.cpp b/v3/EmulatedCameraFactory.cpp
new file mode 100755
index 0000000..c3bb8a6
--- a/dev/null
+++ b/v3/EmulatedCameraFactory.cpp
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCameraFactory that manages cameras
+ * available for emulation.
+ */
+
+//#define LOG_NDEBUG 0
+//#define LOG_NDDEBUG 0
+//#define LOG_NIDEBUG 0
+#define LOG_TAG "EmulatedCamera_Factory"
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include "EmulatedQemuCamera.h"
+#include "EmulatedFakeCamera.h"
+#include "EmulatedFakeCamera2.h"
+#include "EmulatedFakeCamera3.h"
+#include "EmulatedCameraHotplugThread.h"
+#include "EmulatedCameraFactory.h"
+
+extern camera_module_t HAL_MODULE_INFO_SYM;
+volatile int32_t gCamHal_LogLevel = 6;
+
+/* A global instance of EmulatedCameraFactory is statically instantiated and
+ * initialized when camera emulation HAL is loaded.
+ */
+android::EmulatedCameraFactory gEmulatedCameraFactory;
+
+namespace android {
+
+EmulatedCameraFactory::EmulatedCameraFactory()
+ : mQemuClient(),
+ mEmulatedCameras(NULL),
+ mEmulatedCameraNum(0),
+ mFakeCameraNum(0),
+ mConstructedOK(false),
+ mCallbacks(NULL)
+{
+ status_t res;
+ /* Connect to the factory service in the emulator, and create Qemu cameras. */
+ if (mQemuClient.connectClient(NULL) == NO_ERROR) {
+ /* Connection has succeeded. Create emulated cameras for each camera
+ * device, reported by the service. */
+ createQemuCameras();
+ }
+
+ if (isBackFakeCameraEmulationOn()) {
+ /* Camera ID. */
+ const int camera_id = mEmulatedCameraNum;
+ /* Use fake camera to emulate back-facing camera. */
+ mEmulatedCameraNum++;
+
+ /* Make sure that array is allocated (in case there were no 'qemu'
+ * cameras created. Note that we preallocate the array so it may contain
+ * two fake cameras: one facing back, and another facing front. */
+ if (mEmulatedCameras == NULL) {
+ mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum + 1];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, mEmulatedCameraNum);
+ return;
+ }
+ memset(mEmulatedCameras, 0,
+ (mEmulatedCameraNum + 1) * sizeof(EmulatedBaseCamera*));
+ }
+
+ /* Create, and initialize the fake camera */
+ switch (getBackCameraHalVersion()) {
+ case 1:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 2:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera2(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 3:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera3(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ default:
+ ALOGE("%s: Unknown back camera hal version requested: %d", __FUNCTION__,
+ getBackCameraHalVersion());
+ }
+ if (mEmulatedCameras[camera_id] != NULL) {
+ ALOGV("%s: Back camera device version is %d", __FUNCTION__,
+ getBackCameraHalVersion());
+ res = mEmulatedCameras[camera_id]->Initialize();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to intialize back camera %d: %s (%d)",
+ __FUNCTION__, camera_id, strerror(-res), res);
+ delete mEmulatedCameras[camera_id];
+ mEmulatedCameraNum--;
+ }
+ } else {
+ mEmulatedCameraNum--;
+ ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
+ }
+ }
+
+ if (isFrontFakeCameraEmulationOn()) {
+ /* Camera ID. */
+ const int camera_id = mEmulatedCameraNum;
+ /* Use fake camera to emulate front-facing camera. */
+ mEmulatedCameraNum++;
+
+ /* Make sure that array is allocated (in case there were no 'qemu'
+ * cameras created. */
+ if (mEmulatedCameras == NULL) {
+ mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, mEmulatedCameraNum);
+ return;
+ }
+ memset(mEmulatedCameras, 0,
+ mEmulatedCameraNum * sizeof(EmulatedBaseCamera*));
+ }
+
+ /* Create, and initialize the fake camera */
+ switch (getFrontCameraHalVersion()) {
+ case 1:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 2:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera2(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 3:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera3(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ default:
+ ALOGE("%s: Unknown front camera hal version requested: %d",
+ __FUNCTION__,
+ getFrontCameraHalVersion());
+ }
+ if (mEmulatedCameras[camera_id] != NULL) {
+ ALOGV("%s: Front camera device version is %d", __FUNCTION__,
+ getFrontCameraHalVersion());
+ res = mEmulatedCameras[camera_id]->Initialize();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to intialize front camera %d: %s (%d)",
+ __FUNCTION__, camera_id, strerror(-res), res);
+ delete mEmulatedCameras[camera_id];
+ mEmulatedCameraNum--;
+ }
+ } else {
+ mEmulatedCameraNum--;
+ ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
+ }
+ }
+
+ ALOGV("%d cameras are being emulated. %d of them are fake cameras.",
+ mEmulatedCameraNum, mFakeCameraNum);
+
+ /* Create hotplug thread */
+ {
+ Vector<int> cameraIdVector;
+ for (int i = 0; i < mEmulatedCameraNum; ++i) {
+ cameraIdVector.push_back(i);
+ }
+ mHotplugThread = new EmulatedCameraHotplugThread(&cameraIdVector[0],
+ mEmulatedCameraNum);
+ mHotplugThread->run();
+ }
+
+ mConstructedOK = true;
+}
+
+EmulatedCameraFactory::~EmulatedCameraFactory()
+{
+ if (mEmulatedCameras != NULL) {
+ for (int n = 0; n < mEmulatedCameraNum; n++) {
+ if (mEmulatedCameras[n] != NULL) {
+ delete mEmulatedCameras[n];
+ }
+ }
+ delete[] mEmulatedCameras;
+ }
+
+ if (mHotplugThread != NULL) {
+ mHotplugThread->requestExit();
+ mHotplugThread->join();
+ }
+}
+
+/****************************************************************************
+ * Camera HAL API handlers.
+ *
+ * Each handler simply verifies existence of an appropriate EmulatedBaseCamera
+ * instance, and dispatches the call to that instance.
+ *
+ ***************************************************************************/
+
+int EmulatedCameraFactory::cameraDeviceOpen(int camera_id, hw_device_t** device)
+{
+ ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+
+ *device = NULL;
+
+ if (!isConstructedOK()) {
+ ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+ ALOGE("%s: Camera id %d is out of bounds (%d)",
+ __FUNCTION__, camera_id, getEmulatedCameraNum());
+ return -ENODEV;
+ }
+
+ return mEmulatedCameras[camera_id]->connectCamera(device);
+}
+
+int EmulatedCameraFactory::getCameraInfo(int camera_id, struct camera_info* info)
+{
+ ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+
+ if (!isConstructedOK()) {
+ ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+ ALOGE("%s: Camera id %d is out of bounds (%d)",
+ __FUNCTION__, camera_id, getEmulatedCameraNum());
+ return -ENODEV;
+ }
+
+ return mEmulatedCameras[camera_id]->getCameraInfo(info);
+}
+
+int EmulatedCameraFactory::setCallbacks(
+ const camera_module_callbacks_t *callbacks)
+{
+ ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks);
+
+ mCallbacks = callbacks;
+
+ return OK;
+}
+
+/****************************************************************************
+ * Camera HAL API callbacks.
+ ***************************************************************************/
+
+int EmulatedCameraFactory::device_open(const hw_module_t* module,
+ const char* name,
+ hw_device_t** device)
+{
+ /*
+ * Simply verify the parameters, and dispatch the call inside the
+ * EmulatedCameraFactory instance.
+ */
+
+ if (module != &HAL_MODULE_INFO_SYM.common) {
+ ALOGE("%s: Invalid module %p expected %p",
+ __FUNCTION__, module, &HAL_MODULE_INFO_SYM.common);
+ return -EINVAL;
+ }
+ if (name == NULL) {
+ ALOGE("%s: NULL name is not expected here", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ return gEmulatedCameraFactory.cameraDeviceOpen(atoi(name), device);
+}
+
+int EmulatedCameraFactory::get_number_of_cameras(void)
+{
+ return gEmulatedCameraFactory.getEmulatedCameraNum();
+}
+
+int EmulatedCameraFactory::get_camera_info(int camera_id,
+ struct camera_info* info)
+{
+ return gEmulatedCameraFactory.getCameraInfo(camera_id, info);
+}
+
+int EmulatedCameraFactory::set_callbacks(
+ const camera_module_callbacks_t *callbacks)
+{
+ return gEmulatedCameraFactory.setCallbacks(callbacks);
+}
+
+/********************************************************************************
+ * Internal API
+ *******************************************************************************/
+
+/*
+ * Camera information tokens passed in response to the "list" factory query.
+ */
+
+/* Device name token. */
+static const char lListNameToken[] = "name=";
+/* Frame dimensions token. */
+static const char lListDimsToken[] = "framedims=";
+/* Facing direction token. */
+static const char lListDirToken[] = "dir=";
+
+void EmulatedCameraFactory::createQemuCameras()
+{
+ /* Obtain camera list. */
+ char* camera_list = NULL;
+ status_t res = mQemuClient.listCameras(&camera_list);
+ /* Empty list, or list containing just an EOL means that there were no
+ * connected cameras found. */
+ if (res != NO_ERROR || camera_list == NULL || *camera_list == '\0' ||
+ *camera_list == '\n') {
+ if (camera_list != NULL) {
+ free(camera_list);
+ }
+ return;
+ }
+
+ /*
+ * Calculate number of connected cameras. Number of EOLs in the camera list
+ * is the number of the connected cameras.
+ */
+
+ int num = 0;
+ const char* eol = strchr(camera_list, '\n');
+ while (eol != NULL) {
+ num++;
+ eol = strchr(eol + 1, '\n');
+ }
+
+ /* Allocate the array for emulated camera instances. Note that we allocate
+ * two more entries for back and front fake camera emulation. */
+ mEmulatedCameras = new EmulatedBaseCamera*[num + 2];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, num + 1);
+ free(camera_list);
+ return;
+ }
+ memset(mEmulatedCameras, 0, sizeof(EmulatedBaseCamera*) * (num + 1));
+
+ /*
+ * Iterate the list, creating, and initializin emulated qemu cameras for each
+ * entry (line) in the list.
+ */
+
+ int index = 0;
+ char* cur_entry = camera_list;
+ while (cur_entry != NULL && *cur_entry != '\0' && index < num) {
+ /* Find the end of the current camera entry, and terminate it with zero
+ * for simpler string manipulation. */
+ char* next_entry = strchr(cur_entry, '\n');
+ if (next_entry != NULL) {
+ *next_entry = '\0';
+ next_entry++; // Start of the next entry.
+ }
+
+ /* Find 'name', 'framedims', and 'dir' tokens that are required here. */
+ char* name_start = strstr(cur_entry, lListNameToken);
+ char* dim_start = strstr(cur_entry, lListDimsToken);
+ char* dir_start = strstr(cur_entry, lListDirToken);
+ if (name_start != NULL && dim_start != NULL && dir_start != NULL) {
+ /* Advance to the token values. */
+ name_start += strlen(lListNameToken);
+ dim_start += strlen(lListDimsToken);
+ dir_start += strlen(lListDirToken);
+
+ /* Terminate token values with zero. */
+ char* s = strchr(name_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+ s = strchr(dim_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+ s = strchr(dir_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+
+ /* Create and initialize qemu camera. */
+ EmulatedQemuCamera* qemu_cam =
+ new EmulatedQemuCamera(index, &HAL_MODULE_INFO_SYM.common);
+ if (NULL != qemu_cam) {
+ res = qemu_cam->Initialize(name_start, dim_start, dir_start);
+ if (res == NO_ERROR) {
+ mEmulatedCameras[index] = qemu_cam;
+ index++;
+ } else {
+ delete qemu_cam;
+ }
+ } else {
+ ALOGE("%s: Unable to instantiate EmulatedQemuCamera",
+ __FUNCTION__);
+ }
+ } else {
+ ALOGW("%s: Bad camera information: %s", __FUNCTION__, cur_entry);
+ }
+
+ cur_entry = next_entry;
+ }
+
+ mEmulatedCameraNum = index;
+}
+
+bool EmulatedCameraFactory::isBackFakeCameraEmulationOn()
+{
+ /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
+ * is set to 'both', or 'back', then fake camera is used to emulate back
+ * camera. */
+#if 0
+ char prop[PROPERTY_VALUE_MAX];
+ if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
+ (!strcmp(prop, "both") || !strcmp(prop, "back"))) {
+ return true;
+ } else {
+ return false;
+ }
+#else
+ return true;
+#endif
+}
+
+int EmulatedCameraFactory::getBackCameraHalVersion()
+{
+ /* Defined by 'qemu.sf.back_camera_hal_version' boot property: if the
+ * property doesn't exist, it is assumed to be 1. */
+#if 0
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("qemu.sf.back_camera_hal", prop, NULL) > 0) {
+ char *prop_end = prop;
+ int val = strtol(prop, &prop_end, 10);
+ if (*prop_end == '\0') {
+ return val;
+ }
+ // Badly formatted property, should just be a number
+ ALOGE("qemu.sf.back_camera_hal is not a number: %s", prop);
+ }
+ return 1;
+#else
+ return 3;
+#endif
+}
+
+bool EmulatedCameraFactory::isFrontFakeCameraEmulationOn()
+{
+ /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
+ * is set to 'both', or 'front', then fake camera is used to emulate front
+ * camera. */
+#if 0
+ char prop[PROPERTY_VALUE_MAX];
+ if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
+ (!strcmp(prop, "both") || !strcmp(prop, "front"))) {
+ return true;
+ } else {
+ return false;
+ }
+#else
+ return true;
+#endif
+}
+
+int EmulatedCameraFactory::getFrontCameraHalVersion()
+{
+ /* Defined by 'qemu.sf.front_camera_hal_version' boot property: if the
+ * property doesn't exist, it is assumed to be 1. */
+#if 0
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("qemu.sf.front_camera_hal", prop, NULL) > 0) {
+ char *prop_end = prop;
+ int val = strtol(prop, &prop_end, 10);
+ if (*prop_end == '\0') {
+ return val;
+ }
+ // Badly formatted property, should just be a number
+ ALOGE("qemu.sf.front_camera_hal is not a number: %s", prop);
+ }
+ return 1;
+#else
+ return 3;
+#endif
+}
+
+void EmulatedCameraFactory::onStatusChanged(int cameraId, int newStatus) {
+
+ EmulatedBaseCamera *cam = mEmulatedCameras[cameraId];
+ if (!cam) {
+ ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+ return;
+ }
+
+ /**
+ * (Order is important)
+ * Send the callback first to framework, THEN close the camera.
+ */
+
+ if (newStatus == cam->getHotplugStatus()) {
+ ALOGW("%s: Ignoring transition to the same status", __FUNCTION__);
+ return;
+ }
+
+ const camera_module_callbacks_t* cb = mCallbacks;
+ if (cb != NULL && cb->camera_device_status_change != NULL) {
+ cb->camera_device_status_change(cb, cameraId, newStatus);
+ }
+
+ if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+ cam->unplugCamera();
+ } else if (newStatus == CAMERA_DEVICE_STATUS_PRESENT) {
+ cam->plugCamera();
+ }
+
+}
+
+/********************************************************************************
+ * Initializer for the static member structure.
+ *******************************************************************************/
+
+/* Entry point for camera HAL API. */
+struct hw_module_methods_t EmulatedCameraFactory::mCameraModuleMethods = {
+ open: EmulatedCameraFactory::device_open
+};
+
+}; /* namespace android */
diff --git a/v3/EmulatedCameraFactory.h b/v3/EmulatedCameraFactory.h
new file mode 100755
index 0000000..470f5ea
--- a/dev/null
+++ b/v3/EmulatedCameraFactory.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
+
+#include <utils/RefBase.h>
+#include "EmulatedBaseCamera.h"
+#include "QemuClient.h"
+
+namespace android {
+
+struct EmulatedCameraHotplugThread;
+
+/*
+ * Contains declaration of a class EmulatedCameraFactory that manages cameras
+ * available for the emulation. A global instance of this class is statically
+ * instantiated and initialized when camera emulation HAL is loaded.
+ */
+
+/* Class EmulatedCameraFactoryManages cameras available for the emulation.
+ *
+ * When the global static instance of this class is created on the module load,
+ * it enumerates cameras available for the emulation by connecting to the
+ * emulator's 'camera' service. For every camera found out there it creates an
+ * instance of an appropriate class, and stores it an in array of emulated
+ * cameras. In addition to the cameras reported by the emulator, a fake camera
+ * emulator is always created, so there is always at least one camera that is
+ * available.
+ *
+ * Instance of this class is also used as the entry point for the camera HAL API,
+ * including:
+ * - hw_module_methods_t::open entry point
+ * - camera_module_t::get_number_of_cameras entry point
+ * - camera_module_t::get_camera_info entry point
+ *
+ */
+class EmulatedCameraFactory {
+public:
+ /* Constructs EmulatedCameraFactory instance.
+ * In this constructor the factory will create and initialize a list of
+ * emulated cameras. All errors that occur on this constructor are reported
+ * via mConstructedOK data member of this class.
+ */
+ EmulatedCameraFactory();
+
+ /* Destructs EmulatedCameraFactory instance. */
+ ~EmulatedCameraFactory();
+
+ /****************************************************************************
+ * Camera HAL API handlers.
+ ***************************************************************************/
+
+public:
+ /* Opens (connects to) a camera device.
+ * This method is called in response to hw_module_methods_t::open callback.
+ */
+ int cameraDeviceOpen(int camera_id, hw_device_t** device);
+
+ /* Gets emulated camera information.
+ * This method is called in response to camera_module_t::get_camera_info callback.
+ */
+ int getCameraInfo(int camera_id, struct camera_info *info);
+
+ /* Sets emulated camera callbacks.
+ * This method is called in response to camera_module_t::set_callbacks callback.
+ */
+ int setCallbacks(const camera_module_callbacks_t *callbacks);
+
+ /****************************************************************************
+ * Camera HAL API callbacks.
+ ***************************************************************************/
+
+public:
+ /* camera_module_t::get_number_of_cameras callback entry point. */
+ static int get_number_of_cameras(void);
+
+ /* camera_module_t::get_camera_info callback entry point. */
+ static int get_camera_info(int camera_id, struct camera_info *info);
+
+ /* camera_module_t::set_callbacks callback entry point. */
+ static int set_callbacks(const camera_module_callbacks_t *callbacks);
+
+private:
+ /* hw_module_methods_t::open callback entry point. */
+ static int device_open(const hw_module_t* module,
+ const char* name,
+ hw_device_t** device);
+
+ /****************************************************************************
+ * Public API.
+ ***************************************************************************/
+
+public:
+
+ /* Gets fake camera orientation. */
+ int getFakeCameraOrientation() {
+ /* TODO: Have a boot property that controls that. */
+ return 90;
+ }
+
+ /* Gets qemu camera orientation. */
+ int getQemuCameraOrientation() {
+ /* TODO: Have a boot property that controls that. */
+ return 270;
+ }
+
+ /* Gets number of emulated cameras.
+ */
+ int getEmulatedCameraNum() const {
+ return mEmulatedCameraNum;
+ }
+
+ /* Checks whether or not the constructor has succeeded.
+ */
+ bool isConstructedOK() const {
+ return mConstructedOK;
+ }
+
+ void onStatusChanged(int cameraId, int newStatus);
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+private:
+ /* Populates emulated cameras array with cameras that are available via
+ * 'camera' service in the emulator. For each such camera and instance of
+ * the EmulatedCameraQemud will be created and added to the mEmulatedCameras
+ * array.
+ */
+ void createQemuCameras();
+
+ /* Checks if fake camera emulation is on for the camera facing back. */
+ bool isBackFakeCameraEmulationOn();
+
+ /* Gets camera device version number to use for back camera emulation */
+ int getBackCameraHalVersion();
+
+ /* Checks if fake camera emulation is on for the camera facing front. */
+ bool isFrontFakeCameraEmulationOn();
+
+ /* Gets camera device version number to use for front camera emulation */
+ int getFrontCameraHalVersion();
+
+ /****************************************************************************
+ * Data members.
+ ***************************************************************************/
+
+private:
+ /* Connection to the camera service in the emulator. */
+ FactoryQemuClient mQemuClient;
+
+ /* Array of cameras available for the emulation. */
+ EmulatedBaseCamera** mEmulatedCameras;
+
+ /* Number of emulated cameras (including the fake ones). */
+ int mEmulatedCameraNum;
+
+ /* Number of emulated fake cameras. */
+ int mFakeCameraNum;
+
+ /* Flags whether or not constructor has succeeded. */
+ bool mConstructedOK;
+
+ /* Camera callbacks (for status changing) */
+ const camera_module_callbacks_t* mCallbacks;
+
+ /* Hotplug thread (to call onStatusChanged) */
+ sp<EmulatedCameraHotplugThread> mHotplugThread;
+
+public:
+ /* Contains device open entry point, as required by HAL API. */
+ static struct hw_module_methods_t mCameraModuleMethods;
+};
+
+}; /* namespace android */
+
+/* References the global EmulatedCameraFactory instance. */
+extern android::EmulatedCameraFactory gEmulatedCameraFactory;
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H */
diff --git a/v3/EmulatedCameraHal.cpp b/v3/EmulatedCameraHal.cpp
new file mode 100755
index 0000000..5a99a16
--- a/dev/null
+++ b/v3/EmulatedCameraHal.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of the camera HAL layer in the system running
+ * under the emulator.
+ *
+ * This file contains only required HAL header, which directs all the API calls
+ * to the EmulatedCameraFactory class implementation, wich is responsible for
+ * managing emulated cameras.
+ */
+
+#include "EmulatedCameraFactory.h"
+
+/*
+ * Required HAL header.
+ */
+camera_module_t HAL_MODULE_INFO_SYM = {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ module_api_version: CAMERA_MODULE_API_VERSION_2_1,
+ hal_api_version: HARDWARE_HAL_API_VERSION,
+ id: CAMERA_HARDWARE_MODULE_ID,
+ name: "Camera Module",
+ author: "The Multi-media team from Amlogic SH.",
+ methods: &android::EmulatedCameraFactory::mCameraModuleMethods,
+ dso: NULL,
+ reserved: {0},
+ },
+ get_number_of_cameras: android::EmulatedCameraFactory::get_number_of_cameras,
+ get_camera_info: android::EmulatedCameraFactory::get_camera_info,
+ set_callbacks: android::EmulatedCameraFactory::set_callbacks,
+};
diff --git a/v3/EmulatedCameraHotplugThread.cpp b/v3/EmulatedCameraHotplugThread.cpp
new file mode 100644
index 0000000..0ce2aeb
--- a/dev/null
+++ b/v3/EmulatedCameraHotplugThread.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_HotplugThread"
+#include <cutils/log.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/inotify.h>
+
+#include "EmulatedCameraHotplugThread.h"
+#include "EmulatedCameraFactory.h"
+
+#define FAKE_HOTPLUG_FILE "/data/misc/media/emulator.camera.hotplug"
+
+#define EVENT_SIZE (sizeof(struct inotify_event))
+#define EVENT_BUF_LEN (1024*(EVENT_SIZE+16))
+
+#define SubscriberInfo EmulatedCameraHotplugThread::SubscriberInfo
+
+namespace android {
+
+EmulatedCameraHotplugThread::EmulatedCameraHotplugThread(
+ const int* cameraIdArray,
+ size_t size) :
+ Thread(/*canCallJava*/false) {
+
+ mRunning = true;
+ mInotifyFd = 0;
+
+ for (size_t i = 0; i < size; ++i) {
+ int id = cameraIdArray[i];
+
+ if (createFileIfNotExists(id)) {
+ mSubscribedCameraIds.push_back(id);
+ }
+ }
+}
+
+EmulatedCameraHotplugThread::~EmulatedCameraHotplugThread() {
+}
+
+status_t EmulatedCameraHotplugThread::requestExitAndWait() {
+ ALOGE("%s: Not implemented. Use requestExit + join instead",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+void EmulatedCameraHotplugThread::requestExit() {
+ Mutex::Autolock al(mMutex);
+
+ ALOGV("%s: Requesting thread exit", __FUNCTION__);
+ mRunning = false;
+
+ bool rmWatchFailed = false;
+ Vector<SubscriberInfo>::iterator it;
+ for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+
+ if (inotify_rm_watch(mInotifyFd, it->WatchID) == -1) {
+
+ ALOGE("%s: Could not remove watch for camID '%d',"
+ " error: '%s' (%d)",
+ __FUNCTION__, it->CameraID, strerror(errno),
+ errno);
+
+ rmWatchFailed = true ;
+ } else {
+ ALOGV("%s: Removed watch for camID '%d'",
+ __FUNCTION__, it->CameraID);
+ }
+ }
+
+ if (rmWatchFailed) { // unlikely
+ // Give the thread a fighting chance to error out on the next
+ // read
+ if (TEMP_FAILURE_RETRY(close(mInotifyFd)) == -1) {
+ ALOGE("%s: close failure error: '%s' (%d)",
+ __FUNCTION__, strerror(errno), errno);
+ }
+ }
+
+ ALOGV("%s: Request exit complete.", __FUNCTION__);
+}
+
+status_t EmulatedCameraHotplugThread::readyToRun() {
+ Mutex::Autolock al(mMutex);
+
+ mInotifyFd = -1;
+
+ do {
+ ALOGV("%s: Initializing inotify", __FUNCTION__);
+
+ mInotifyFd = inotify_init();
+ if (mInotifyFd == -1) {
+ ALOGE("%s: inotify_init failure error: '%s' (%d)",
+ __FUNCTION__, strerror(errno), errno);
+ mRunning = false;
+ break;
+ }
+
+ /**
+ * For each fake camera file, add a watch for when
+ * the file is closed (if it was written to)
+ */
+ Vector<int>::const_iterator it, end;
+ it = mSubscribedCameraIds.begin();
+ end = mSubscribedCameraIds.end();
+ for (; it != end; ++it) {
+ int cameraId = *it;
+ if (!addWatch(cameraId)) {
+ mRunning = false;
+ break;
+ }
+ }
+ } while(false);
+
+ if (!mRunning) {
+ status_t err = -errno;
+
+ if (mInotifyFd != -1) {
+ TEMP_FAILURE_RETRY(close(mInotifyFd));
+ }
+
+ return err;
+ }
+
+ return OK;
+}
+
+bool EmulatedCameraHotplugThread::threadLoop() {
+
+ // If requestExit was already called, mRunning will be false
+ while (mRunning) {
+ char buffer[EVENT_BUF_LEN];
+ int length = TEMP_FAILURE_RETRY(
+ read(mInotifyFd, buffer, EVENT_BUF_LEN));
+
+ if (length < 0) {
+ ALOGE("%s: Error reading from inotify FD, error: '%s' (%d)",
+ __FUNCTION__, strerror(errno),
+ errno);
+ mRunning = false;
+ break;
+ }
+
+ ALOGV("%s: Read %d bytes from inotify FD", __FUNCTION__, length);
+
+ int i = 0;
+ while (i < length) {
+ inotify_event* event = (inotify_event*) &buffer[i];
+
+ if (event->mask & IN_IGNORED) {
+ Mutex::Autolock al(mMutex);
+ if (!mRunning) {
+ ALOGV("%s: Shutting down thread", __FUNCTION__);
+ break;
+ } else {
+ ALOGE("%s: File was deleted, aborting",
+ __FUNCTION__);
+ mRunning = false;
+ break;
+ }
+ } else if (event->mask & IN_CLOSE_WRITE) {
+ int cameraId = getCameraId(event->wd);
+
+ if (cameraId < 0) {
+ ALOGE("%s: Got bad camera ID from WD '%d",
+ __FUNCTION__, event->wd);
+ } else {
+ // Check the file for the new hotplug event
+ String8 filePath = getFilePath(cameraId);
+ /**
+ * NOTE: we carefully avoid getting an inotify
+ * for the same exact file because it's opened for
+ * read-only, but our inotify is for write-only
+ */
+ int newStatus = readFile(filePath);
+
+ if (newStatus < 0) {
+ mRunning = false;
+ break;
+ }
+
+ int halStatus = newStatus ?
+ CAMERA_DEVICE_STATUS_PRESENT :
+ CAMERA_DEVICE_STATUS_NOT_PRESENT;
+ gEmulatedCameraFactory.onStatusChanged(cameraId,
+ halStatus);
+ }
+
+ } else {
+ ALOGW("%s: Unknown mask 0x%x",
+ __FUNCTION__, event->mask);
+ }
+
+ i += EVENT_SIZE + event->len;
+ }
+ }
+
+ if (!mRunning) {
+ TEMP_FAILURE_RETRY(close(mInotifyFd));
+ return false;
+ }
+
+ return true;
+}
+
+String8 EmulatedCameraHotplugThread::getFilePath(int cameraId) const {
+ return String8::format(FAKE_HOTPLUG_FILE ".%d", cameraId);
+}
+
+bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const
+{
+ String8 filePath = getFilePath(cameraId);
+ // make sure this file exists and we have access to it
+ int fd = TEMP_FAILURE_RETRY(
+ open(filePath.string(), O_WRONLY | O_CREAT | O_TRUNC,
+ /* mode = ug+rwx */ S_IRWXU | S_IRWXG ));
+ if (fd == -1) {
+ ALOGE("%s: Could not create file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return false;
+ }
+
+ // File has '1' by default since we are plugged in by default
+ if (TEMP_FAILURE_RETRY(write(fd, "1\n", /*count*/2)) == -1) {
+ ALOGE("%s: Could not write '1' to file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return false;
+ }
+
+ TEMP_FAILURE_RETRY(close(fd));
+ return true;
+}
+
+int EmulatedCameraHotplugThread::getCameraId(String8 filePath) const {
+ Vector<int>::const_iterator it, end;
+ it = mSubscribedCameraIds.begin();
+ end = mSubscribedCameraIds.end();
+ for (; it != end; ++it) {
+ String8 camPath = getFilePath(*it);
+
+ if (camPath == filePath) {
+ return *it;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+int EmulatedCameraHotplugThread::getCameraId(int wd) const {
+ for (size_t i = 0; i < mSubscribers.size(); ++i) {
+ if (mSubscribers[i].WatchID == wd) {
+ return mSubscribers[i].CameraID;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId)
+{
+ for (size_t i = 0; i < mSubscribers.size(); ++i) {
+ if (mSubscribers[i].CameraID == cameraId) {
+ return (SubscriberInfo*)&mSubscribers[i];
+ }
+ }
+
+ return NULL;
+}
+
+bool EmulatedCameraHotplugThread::addWatch(int cameraId) {
+ String8 camPath = getFilePath(cameraId);
+ int wd = inotify_add_watch(mInotifyFd,
+ camPath.string(),
+ IN_CLOSE_WRITE);
+
+ if (wd == -1) {
+ ALOGE("%s: Could not add watch for '%s', error: '%s' (%d)",
+ __FUNCTION__, camPath.string(), strerror(errno),
+ errno);
+
+ mRunning = false;
+ return false;
+ }
+
+ ALOGV("%s: Watch added for camID='%d', wd='%d'",
+ __FUNCTION__, cameraId, wd);
+
+ SubscriberInfo si = { cameraId, wd };
+ mSubscribers.push_back(si);
+
+ return true;
+}
+
+bool EmulatedCameraHotplugThread::removeWatch(int cameraId) {
+ SubscriberInfo* si = getSubscriberInfo(cameraId);
+
+ if (!si) return false;
+
+ if (inotify_rm_watch(mInotifyFd, si->WatchID) == -1) {
+
+ ALOGE("%s: Could not remove watch for camID '%d', error: '%s' (%d)",
+ __FUNCTION__, cameraId, strerror(errno),
+ errno);
+
+ return false;
+ }
+
+ Vector<SubscriberInfo>::iterator it;
+ for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+ if (it->CameraID == cameraId) {
+ break;
+ }
+ }
+
+ if (it != mSubscribers.end()) {
+ mSubscribers.erase(it);
+ }
+
+ return true;
+}
+
+int EmulatedCameraHotplugThread::readFile(String8 filePath) const {
+
+ int fd = TEMP_FAILURE_RETRY(
+ open(filePath.string(), O_RDONLY, /*mode*/0));
+ if (fd == -1) {
+ ALOGE("%s: Could not open file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return -1;
+ }
+
+ char buffer[1];
+ int length;
+
+ length = TEMP_FAILURE_RETRY(
+ read(fd, buffer, sizeof(buffer)));
+
+ int retval;
+
+ ALOGV("%s: Read file '%s', length='%d', buffer='%c'",
+ __FUNCTION__, filePath.string(), length, buffer[0]);
+
+ if (length == 0) { // EOF
+ retval = 0; // empty file is the same thing as 0
+ } else if (buffer[0] == '0') {
+ retval = 0;
+ } else { // anything non-empty that's not beginning with '0'
+ retval = 1;
+ }
+
+ TEMP_FAILURE_RETRY(close(fd));
+
+ return retval;
+}
+
+} //namespace android
diff --git a/v3/EmulatedCameraHotplugThread.h b/v3/EmulatedCameraHotplugThread.h
new file mode 100644
index 0000000..3e26e71
--- a/dev/null
+++ b/v3/EmulatedCameraHotplugThread.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_HOTPLUG_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_HOTPLUG_H
+
+/**
+ * This class emulates hotplug events by inotifying on a file, specific
+ * to a camera ID. When the file changes between 1/0 the hotplug
+ * status goes between PRESENT and NOT_PRESENT.
+ *
+ * Refer to FAKE_HOTPLUG_FILE in EmulatedCameraHotplugThread.cpp
+ */
+
+#include "EmulatedCamera2.h"
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+class EmulatedCameraHotplugThread : public Thread {
+ public:
+ EmulatedCameraHotplugThread(const int* cameraIdArray, size_t size);
+ ~EmulatedCameraHotplugThread();
+
+ virtual void requestExit();
+ virtual status_t requestExitAndWait();
+
+ private:
+
+
+ virtual status_t readyToRun();
+ virtual bool threadLoop();
+
+ struct SubscriberInfo {
+ int CameraID;
+ int WatchID;
+ };
+
+ bool addWatch(int cameraId);
+ bool removeWatch(int cameraId);
+ SubscriberInfo* getSubscriberInfo(int cameraId);
+
+ int getCameraId(String8 filePath) const;
+ int getCameraId(int wd) const;
+
+ String8 getFilePath(int cameraId) const;
+ int readFile(String8 filePath) const;
+
+ bool createFileIfNotExists(int cameraId) const;
+
+ int mInotifyFd;
+ Vector<int> mSubscribedCameraIds;
+ Vector<SubscriberInfo> mSubscribers;
+
+ // variables above are unguarded:
+ // -- accessed in thread loop or in constructor only
+
+ Mutex mMutex;
+
+ bool mRunning; // guarding only when it's important
+};
+} // namespace android
+
+#endif
diff --git a/v3/EmulatedFakeCamera.cpp b/v3/EmulatedFakeCamera.cpp
new file mode 100755
index 0000000..457850d
--- a/dev/null
+++ b/v3/EmulatedFakeCamera.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera that encapsulates
+ * functionality of a fake camera.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera"
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include "EmulatedFakeCamera.h"
+#include "EmulatedCameraFactory.h"
+
+namespace android {
+
+EmulatedFakeCamera::EmulatedFakeCamera(int cameraId,
+ bool facingBack,
+ struct hw_module_t* module)
+ : EmulatedCamera(cameraId, module),
+ mFacingBack(facingBack),
+ mFakeCameraDevice(this)
+{
+}
+
+EmulatedFakeCamera::~EmulatedFakeCamera()
+{
+}
+
+/****************************************************************************
+ * Public API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera::Initialize()
+{
+ status_t res = mFakeCameraDevice.Initialize();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ const char* facing = mFacingBack ? EmulatedCamera::FACING_BACK :
+ EmulatedCamera::FACING_FRONT;
+
+ mParameters.set(EmulatedCamera::FACING_KEY, facing);
+ ALOGD("%s: Fake camera is facing %s", __FUNCTION__, facing);
+
+ mParameters.set(EmulatedCamera::ORIENTATION_KEY,
+ gEmulatedCameraFactory.getFakeCameraOrientation());
+
+ res = EmulatedCamera::Initialize();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /*
+ * Parameters provided by the camera device.
+ */
+
+ /* 352x288 and 320x240 frame dimensions are required by the framework for
+ * video mode preview and video recording. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
+ "640x480,352x288,320x240");
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
+ "640x480,352x288,320x240");
+ mParameters.setPreviewSize(640, 480);
+ mParameters.setPictureSize(640, 480);
+
+ return NO_ERROR;
+}
+
+EmulatedCameraDevice* EmulatedFakeCamera::getCameraDevice()
+{
+ return &mFakeCameraDevice;
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedFakeCamera.h b/v3/EmulatedFakeCamera.h
new file mode 100755
index 0000000..4bfbd70
--- a/dev/null
+++ b/v3/EmulatedFakeCamera.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H
+
+/*
+ * Contains declaration of a class EmulatedFakeCamera that encapsulates
+ * functionality of a fake camera. This class is nothing more than a placeholder
+ * for EmulatedFakeCameraDevice instance.
+ */
+
+#include "EmulatedCamera.h"
+#include "EmulatedFakeCameraDevice.h"
+
+namespace android {
+
+/* Encapsulates functionality of a fake camera.
+ * This class is nothing more than a placeholder for EmulatedFakeCameraDevice
+ * instance that emulates a fake camera device.
+ */
+class EmulatedFakeCamera : public EmulatedCamera {
+public:
+ /* Constructs EmulatedFakeCamera instance. */
+ EmulatedFakeCamera(int cameraId, bool facingBack, struct hw_module_t* module);
+
+ /* Destructs EmulatedFakeCamera instance. */
+ ~EmulatedFakeCamera();
+
+ /****************************************************************************
+ * EmulatedCamera virtual overrides.
+ ***************************************************************************/
+
+public:
+ /* Initializes EmulatedFakeCamera instance. */
+ status_t Initialize();
+
+ /****************************************************************************
+ * EmulatedCamera abstract API implementation.
+ ***************************************************************************/
+
+protected:
+ /* Gets emulated camera device ised by this instance of the emulated camera.
+ */
+ EmulatedCameraDevice* getCameraDevice();
+
+ /****************************************************************************
+ * Data memebers.
+ ***************************************************************************/
+
+protected:
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+ /* Contained fake camera device object. */
+ EmulatedFakeCameraDevice mFakeCameraDevice;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H */
diff --git a/v3/EmulatedFakeCamera2.cpp b/v3/EmulatedFakeCamera2.cpp
new file mode 100644
index 0000000..4d8edfb
--- a/dev/null
+++ b/v3/EmulatedFakeCamera2.cpp
@@ -0,0 +1,2727 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera2 that encapsulates
+ * functionality of an advanced fake camera.
+ */
+
+#include <inttypes.h>
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera2"
+#include <utils/Log.h>
+
+#include "EmulatedFakeCamera2.h"
+#include "EmulatedCameraFactory.h"
+#include <ui/Rect.h>
+#include <ui/GraphicBufferMapper.h>
+#include "../../gralloc/gralloc_priv.h"
+
+#define ERROR_CAMERA_NOT_PRESENT -EPIPE
+
+#define CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT 0xFFFFFFFF
+
+namespace android {
+
+const int64_t USEC = 1000LL;
+const int64_t MSEC = USEC * 1000LL;
+const int64_t SEC = MSEC * 1000LL;
+
+const uint32_t EmulatedFakeCamera2::kAvailableFormats[4] = {
+ HAL_PIXEL_FORMAT_RAW_SENSOR,
+ HAL_PIXEL_FORMAT_BLOB,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ // HAL_PIXEL_FORMAT_YV12,
+ HAL_PIXEL_FORMAT_YCrCb_420_SP
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableRawSizes[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera2::kAvailableRawMinDurations[1] = {
+ Sensor::kFrameDurationRange[0]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableProcessedSizesBack[4] = {
+ 640, 480, 320, 240
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableProcessedSizesFront[4] = {
+ 320, 240, 160, 120
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera2::kAvailableProcessedMinDurations[1] = {
+ Sensor::kFrameDurationRange[0]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableJpegSizesBack[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableJpegSizesFront[2] = {
+ 320, 240
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+
+const uint64_t EmulatedFakeCamera2::kAvailableJpegMinDurations[1] = {
+ Sensor::kFrameDurationRange[0]
+};
+
+
+EmulatedFakeCamera2::EmulatedFakeCamera2(int cameraId,
+ bool facingBack,
+ struct hw_module_t* module)
+ : EmulatedCamera2(cameraId,module),
+ mFacingBack(facingBack),
+ mIsConnected(false)
+{
+ ALOGD("Constructing emulated fake camera 2 facing %s",
+ facingBack ? "back" : "front");
+}
+
+EmulatedFakeCamera2::~EmulatedFakeCamera2() {
+ if (mCameraInfo != NULL) {
+ free_camera_metadata(mCameraInfo);
+ }
+}
+
+/****************************************************************************
+ * Public API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera2::Initialize() {
+ status_t res;
+
+ res = constructStaticInfo(&mCameraInfo, true);
+ if (res != OK) {
+ ALOGE("%s: Unable to allocate static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ res = constructStaticInfo(&mCameraInfo, false);
+ if (res != OK) {
+ ALOGE("%s: Unable to fill in static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ if (res != OK) return res;
+
+ mNextStreamId = 1;
+ mNextReprocessStreamId = 1;
+ mRawStreamCount = 0;
+ mProcessedStreamCount = 0;
+ mJpegStreamCount = 0;
+ mReprocessStreamCount = 0;
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera module API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera2::connectCamera(hw_device_t** device) {
+ status_t res;
+ ALOGV("%s", __FUNCTION__);
+
+ {
+ Mutex::Autolock l(mMutex);
+ if (!mStatusPresent) {
+ ALOGE("%s: Camera ID %d is unplugged", __FUNCTION__,
+ mCameraID);
+ return -ENODEV;
+ }
+ }
+
+ mConfigureThread = new ConfigureThread(this);
+ mReadoutThread = new ReadoutThread(this);
+ mControlThread = new ControlThread(this);
+ mSensor = new Sensor();
+ mJpegCompressor = new JpegCompressor();
+
+ mNextStreamId = 1;
+ mNextReprocessStreamId = 1;
+
+ res = mSensor->startUp(mCameraID);
+ if (res != NO_ERROR) return res;
+
+ res = mConfigureThread->run("EmulatedFakeCamera2::configureThread");
+ if (res != NO_ERROR) return res;
+
+ res = mReadoutThread->run("EmulatedFakeCamera2::readoutThread");
+ if (res != NO_ERROR) return res;
+
+ res = mControlThread->run("EmulatedFakeCamera2::controlThread");
+ if (res != NO_ERROR) return res;
+
+ status_t ret = EmulatedCamera2::connectCamera(device);
+
+ if (ret >= 0) {
+ mIsConnected = true;
+ }
+
+ return ret;
+}
+
+status_t EmulatedFakeCamera2::plugCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGI("%s: Plugged back in", __FUNCTION__);
+ mStatusPresent = true;
+ }
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::unplugCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mStatusPresent) {
+ ALOGI("%s: Unplugged camera", __FUNCTION__);
+ mStatusPresent = false;
+ }
+ }
+
+ return closeCamera();
+}
+
+camera_device_status_t EmulatedFakeCamera2::getHotplugStatus() {
+ Mutex::Autolock l(mMutex);
+ return mStatusPresent ?
+ CAMERA_DEVICE_STATUS_PRESENT :
+ CAMERA_DEVICE_STATUS_NOT_PRESENT;
+}
+
+
+
+status_t EmulatedFakeCamera2::closeCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ status_t res;
+ ALOGV("%s", __FUNCTION__);
+
+ if (!mIsConnected) {
+ return NO_ERROR;
+ }
+
+ res = mSensor->shutDown();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+ return res;
+ }
+
+ mConfigureThread->requestExit();
+ mReadoutThread->requestExit();
+ mControlThread->requestExit();
+ mJpegCompressor->cancel();
+ }
+
+ // give up the lock since we will now block and the threads
+ // can call back into this object
+ mConfigureThread->join();
+ mReadoutThread->join();
+ mControlThread->join();
+
+ ALOGV("%s exit", __FUNCTION__);
+
+ {
+ Mutex::Autolock l(mMutex);
+ mIsConnected = false;
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::getCameraInfo(struct camera_info *info) {
+ info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+ info->orientation = gEmulatedCameraFactory.getFakeCameraOrientation();
+ return EmulatedCamera2::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera device API overrides
+ ***************************************************************************/
+
+/** Request input queue */
+
+int EmulatedFakeCamera2::requestQueueNotify() {
+ ALOGV("Request queue notification received");
+
+ ALOG_ASSERT(mRequestQueueSrc != NULL,
+ "%s: Request queue src not set, but received queue notification!",
+ __FUNCTION__);
+ ALOG_ASSERT(mFrameQueueDst != NULL,
+ "%s: Request queue src not set, but received queue notification!",
+ __FUNCTION__);
+ ALOG_ASSERT(mStreams.size() != 0,
+ "%s: No streams allocated, but received queue notification!",
+ __FUNCTION__);
+ return mConfigureThread->newRequestAvailable();
+}
+
+int EmulatedFakeCamera2::getInProgressCount() {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ int requestCount = 0;
+ requestCount += mConfigureThread->getInProgressCount();
+ requestCount += mReadoutThread->getInProgressCount();
+ requestCount += mJpegCompressor->isBusy() ? 1 : 0;
+
+ return requestCount;
+}
+
+int EmulatedFakeCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request) {
+
+ if (request == NULL) return BAD_VALUE;
+ if (request_template < 0 || request_template >= CAMERA2_TEMPLATE_COUNT) {
+ return BAD_VALUE;
+ }
+
+ {
+ Mutex::Autolock l(mMutex);
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+ }
+
+ status_t res;
+ // Pass 1, calculate size and allocate
+ res = constructDefaultRequest(request_template,
+ request,
+ true);
+ if (res != OK) {
+ return res;
+ }
+ // Pass 2, build request
+ res = constructDefaultRequest(request_template,
+ request,
+ false);
+ if (res != OK) {
+ ALOGE("Unable to populate new request for template %d",
+ request_template);
+ }
+
+ return res;
+}
+
+int EmulatedFakeCamera2::allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ // Temporary shim until FORMAT_ZSL is removed
+ if (format == CAMERA2_HAL_PIXEL_FORMAT_ZSL) {
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ unsigned int numFormats = sizeof(kAvailableFormats) / sizeof(uint32_t);
+ unsigned int formatIdx = 0;
+ unsigned int sizeOffsetIdx = 0;
+ for (; formatIdx < numFormats; formatIdx++) {
+ if (format == (int)kAvailableFormats[formatIdx]) break;
+ }
+ if (formatIdx == numFormats) {
+ ALOGE("%s: Format 0x%x is not supported", __FUNCTION__, format);
+ return BAD_VALUE;
+ }
+ }
+
+ const uint32_t *availableSizes;
+ size_t availableSizeCount;
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RAW_SENSOR:
+ availableSizes = kAvailableRawSizes;
+ availableSizeCount = sizeof(kAvailableRawSizes)/sizeof(uint32_t);
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ availableSizes = mFacingBack ?
+ kAvailableJpegSizesBack : kAvailableJpegSizesFront;
+ availableSizeCount = mFacingBack ?
+ sizeof(kAvailableJpegSizesBack)/sizeof(uint32_t) :
+ sizeof(kAvailableJpegSizesFront)/sizeof(uint32_t);
+ break;
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_YV12:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ availableSizes = mFacingBack ?
+ kAvailableProcessedSizesBack : kAvailableProcessedSizesFront;
+ availableSizeCount = mFacingBack ?
+ sizeof(kAvailableProcessedSizesBack)/sizeof(uint32_t) :
+ sizeof(kAvailableProcessedSizesFront)/sizeof(uint32_t);
+ break;
+ default:
+ ALOGE("%s: Unknown format 0x%x", __FUNCTION__, format);
+ return BAD_VALUE;
+ }
+
+ unsigned int resIdx = 0;
+ for (; resIdx < availableSizeCount; resIdx++) {
+ if (availableSizes[resIdx * 2] == width &&
+ availableSizes[resIdx * 2 + 1] == height) break;
+ }
+ if (resIdx == availableSizeCount) {
+ ALOGE("%s: Format 0x%x does not support resolution %d, %d", __FUNCTION__,
+ format, width, height);
+ return BAD_VALUE;
+ }
+
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RAW_SENSOR:
+ if (mRawStreamCount >= kMaxRawStreamCount) {
+ ALOGE("%s: Cannot allocate another raw stream (%d already allocated)",
+ __FUNCTION__, mRawStreamCount);
+ return INVALID_OPERATION;
+ }
+ mRawStreamCount++;
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ if (mJpegStreamCount >= kMaxJpegStreamCount) {
+ ALOGE("%s: Cannot allocate another JPEG stream (%d already allocated)",
+ __FUNCTION__, mJpegStreamCount);
+ return INVALID_OPERATION;
+ }
+ mJpegStreamCount++;
+ break;
+ default:
+ if (mProcessedStreamCount >= kMaxProcessedStreamCount) {
+ ALOGE("%s: Cannot allocate another processed stream (%d already allocated)",
+ __FUNCTION__, mProcessedStreamCount);
+ return INVALID_OPERATION;
+ }
+ mProcessedStreamCount++;
+ }
+
+ Stream newStream;
+ newStream.ops = stream_ops;
+ newStream.width = width;
+ newStream.height = height;
+ newStream.format = format;
+ // TODO: Query stride from gralloc
+ newStream.stride = width;
+
+ mStreams.add(mNextStreamId, newStream);
+
+ *stream_id = mNextStreamId;
+ if (format_actual) *format_actual = format;
+ *usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+ *max_buffers = kMaxBufferCount;
+
+ ALOGV("Stream allocated: %d, %d x %d, 0x%x. U: %x, B: %d",
+ *stream_id, width, height, format, *usage, *max_buffers);
+
+ mNextStreamId++;
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ ALOGV("%s: Stream %d registering %d buffers", __FUNCTION__,
+ stream_id, num_buffers);
+ // Need to find out what the final concrete pixel format for our stream is
+ // Assumes that all buffers have the same format.
+ if (num_buffers < 1) {
+ ALOGE("%s: Stream %d only has %d buffers!",
+ __FUNCTION__, stream_id, num_buffers);
+ return BAD_VALUE;
+ }
+ const private_handle_t *streamBuffer =
+ reinterpret_cast<const private_handle_t*>(buffers[0]);
+
+ int finalFormat = streamBuffer->format;
+
+ if (finalFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ ALOGE("%s: Stream %d: Bad final pixel format "
+ "HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED; "
+ "concrete pixel format required!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ Stream &stream = mStreams.editValueAt(streamIndex);
+
+ ALOGV("%s: Stream %d format set to %x, previously %x",
+ __FUNCTION__, stream_id, finalFormat, stream.format);
+
+ stream.format = finalFormat;
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::releaseStream(uint32_t stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ if (isStreamInUse(stream_id)) {
+ ALOGE("%s: Cannot release stream %d; in use!", __FUNCTION__,
+ stream_id);
+ return BAD_VALUE;
+ }
+
+ switch(mStreams.valueAt(streamIndex).format) {
+ case HAL_PIXEL_FORMAT_RAW_SENSOR:
+ mRawStreamCount--;
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ mJpegStreamCount--;
+ break;
+ default:
+ mProcessedStreamCount--;
+ break;
+ }
+
+ mStreams.removeItemsAt(streamIndex);
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *stream_ops,
+ uint32_t *stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ ssize_t baseStreamIndex = mStreams.indexOfKey(output_stream_id);
+ if (baseStreamIndex < 0) {
+ ALOGE("%s: Unknown output stream id %d!", __FUNCTION__, output_stream_id);
+ return BAD_VALUE;
+ }
+
+ const Stream &baseStream = mStreams[baseStreamIndex];
+
+ // We'll reprocess anything we produced
+
+ if (mReprocessStreamCount >= kMaxReprocessStreamCount) {
+ ALOGE("%s: Cannot allocate another reprocess stream (%d already allocated)",
+ __FUNCTION__, mReprocessStreamCount);
+ return INVALID_OPERATION;
+ }
+ mReprocessStreamCount++;
+
+ ReprocessStream newStream;
+ newStream.ops = stream_ops;
+ newStream.width = baseStream.width;
+ newStream.height = baseStream.height;
+ newStream.format = baseStream.format;
+ newStream.stride = baseStream.stride;
+ newStream.sourceStreamId = output_stream_id;
+
+ *stream_id = mNextReprocessStreamId;
+ mReprocessStreams.add(mNextReprocessStreamId, newStream);
+
+ ALOGV("Reprocess stream allocated: %d: %d, %d, 0x%x. Parent stream: %d",
+ *stream_id, newStream.width, newStream.height, newStream.format,
+ output_stream_id);
+
+ mNextReprocessStreamId++;
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::releaseReprocessStream(uint32_t stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ ssize_t streamIndex = mReprocessStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown reprocess stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ if (isReprocessStreamInUse(stream_id)) {
+ ALOGE("%s: Cannot release reprocessing stream %d; in use!", __FUNCTION__,
+ stream_id);
+ return BAD_VALUE;
+ }
+
+ mReprocessStreamCount--;
+ mReprocessStreams.removeItemsAt(streamIndex);
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::triggerAction(uint32_t trigger_id,
+ int32_t ext1,
+ int32_t ext2) {
+ Mutex::Autolock l(mMutex);
+
+ if (trigger_id == CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT) {
+ ALOGI("%s: Disconnect trigger - camera must be closed", __FUNCTION__);
+ mStatusPresent = false;
+
+ gEmulatedCameraFactory.onStatusChanged(
+ mCameraID,
+ CAMERA_DEVICE_STATUS_NOT_PRESENT);
+ }
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ return mControlThread->triggerAction(trigger_id,
+ ext1, ext2);
+}
+
+/** Shutdown and debug methods */
+
+int EmulatedFakeCamera2::dump(int fd) {
+ String8 result;
+
+ result.appendFormat(" Camera HAL device: EmulatedFakeCamera2\n");
+ result.appendFormat(" Streams:\n");
+ for (size_t i = 0; i < mStreams.size(); i++) {
+ int id = mStreams.keyAt(i);
+ const Stream& s = mStreams.valueAt(i);
+ result.appendFormat(
+ " Stream %d: %d x %d, format 0x%x, stride %d\n",
+ id, s.width, s.height, s.format, s.stride);
+ }
+
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+void EmulatedFakeCamera2::signalError() {
+ // TODO: Let parent know so we can shut down cleanly
+ ALOGE("Worker thread is signaling a serious error");
+}
+
+/** Pipeline control worker thread methods */
+
+EmulatedFakeCamera2::ConfigureThread::ConfigureThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent),
+ mRequestCount(0),
+ mNextBuffers(NULL) {
+ mRunning = false;
+}
+
+EmulatedFakeCamera2::ConfigureThread::~ConfigureThread() {
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+
+ ALOGV("Starting up ConfigureThread");
+ mRequest = NULL;
+ mActive = false;
+ mRunning = true;
+
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for configure thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::newRequestAvailable() {
+ waitUntilRunning();
+
+ Mutex::Autolock lock(mInputMutex);
+
+ mActive = true;
+ mInputSignal.signal();
+
+ return OK;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::isStreamInUse(uint32_t id) {
+ Mutex::Autolock lock(mInternalsMutex);
+
+ if (mNextBuffers == NULL) return false;
+ for (size_t i=0; i < mNextBuffers->size(); i++) {
+ if ((*mNextBuffers)[i].streamId == (int)id) return true;
+ }
+ return false;
+}
+
+int EmulatedFakeCamera2::ConfigureThread::getInProgressCount() {
+ Mutex::Autolock lock(mInputMutex);
+ return mRequestCount;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::threadLoop() {
+ status_t res;
+
+ // Check if we're currently processing or just waiting
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mActive) {
+ // Inactive, keep waiting until we've been signaled
+ status_t res;
+ res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+ if (res != NO_ERROR && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for input requests: %d",
+ __FUNCTION__, res);
+ return false;
+ }
+ if (!mActive) return true;
+ ALOGV("New request available");
+ }
+ // Active
+ }
+
+ if (mRequest == NULL) {
+ Mutex::Autolock il(mInternalsMutex);
+
+ ALOGV("Configure: Getting next request");
+ res = mParent->mRequestQueueSrc->dequeue_request(
+ mParent->mRequestQueueSrc,
+ &mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Error dequeuing next request: %d", __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ if (mRequest == NULL) {
+ ALOGV("Configure: Request queue empty, going inactive");
+ // No requests available, go into inactive mode
+ Mutex::Autolock lock(mInputMutex);
+ mActive = false;
+ return true;
+ } else {
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount++;
+ }
+
+ camera_metadata_entry_t type;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_TYPE,
+ &type);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading request type", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ bool success = false;;
+ switch (type.data.u8[0]) {
+ case ANDROID_REQUEST_TYPE_CAPTURE:
+ success = setupCapture();
+ break;
+ case ANDROID_REQUEST_TYPE_REPROCESS:
+ success = setupReprocess();
+ break;
+ default:
+ ALOGE("%s: Unexpected request type %d",
+ __FUNCTION__, type.data.u8[0]);
+ mParent->signalError();
+ break;
+ }
+ if (!success) return false;
+
+ }
+
+ if (mWaitingForReadout) {
+ bool readoutDone;
+ readoutDone = mParent->mReadoutThread->waitForReady(kWaitPerLoop);
+ if (!readoutDone) return true;
+
+ if (mNextNeedsJpeg) {
+ ALOGV("Configure: Waiting for JPEG compressor");
+ } else {
+ ALOGV("Configure: Waiting for sensor");
+ }
+ mWaitingForReadout = false;
+ }
+
+ if (mNextNeedsJpeg) {
+ bool jpegDone;
+ jpegDone = mParent->mJpegCompressor->waitForDone(kWaitPerLoop);
+ if (!jpegDone) return true;
+
+ ALOGV("Configure: Waiting for sensor");
+ mNextNeedsJpeg = false;
+ }
+
+ if (mNextIsCapture) {
+ return configureNextCapture();
+ } else {
+ return configureNextReprocess();
+ }
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::setupCapture() {
+ status_t res;
+
+ mNextIsCapture = true;
+ // Get necessary parameters for sensor config
+ mParent->mControlThread->processRequest(mRequest);
+
+ camera_metadata_entry_t streams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ &streams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ mNextBuffers = new Buffers;
+ mNextNeedsJpeg = false;
+ ALOGV("Configure: Setting up buffers for capture");
+ for (size_t i = 0; i < streams.count; i++) {
+ int streamId = streams.data.i32[i];
+ const Stream &s = mParent->getStreamInfo(streamId);
+ if (s.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ ALOGE("%s: Stream %d does not have a concrete pixel format, but "
+ "is included in a request!", __FUNCTION__, streamId);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = streams.data.u8[i];
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+ "stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+ mNextNeedsJpeg = true;
+ }
+ }
+
+ camera_metadata_entry_t e;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameNumber = *e.data.i32;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading exposure time tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextExposureTime = *e.data.i64;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_FRAME_DURATION,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame duration tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameDuration = *e.data.i64;
+
+ if (mNextFrameDuration <
+ mNextExposureTime + Sensor::kMinVerticalBlank) {
+ mNextFrameDuration = mNextExposureTime + Sensor::kMinVerticalBlank;
+ }
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_SENSITIVITY,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading sensitivity tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ mNextSensitivity = *e.data.i32;
+
+ // Start waiting on readout thread
+ mWaitingForReadout = true;
+ ALOGV("Configure: Waiting for readout thread");
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::configureNextCapture() {
+ bool vsync = mParent->mSensor->waitForVSync(kWaitPerLoop);
+ if (!vsync) return true;
+
+ Mutex::Autolock il(mInternalsMutex);
+ ALOGV("Configure: Configuring sensor for capture %d", mNextFrameNumber);
+ mParent->mSensor->setExposureTime(mNextExposureTime);
+ mParent->mSensor->setFrameDuration(mNextFrameDuration);
+ mParent->mSensor->setSensitivity(mNextSensitivity);
+
+ getBuffers();
+
+ ALOGV("Configure: Done configure for capture %d", mNextFrameNumber);
+ mParent->mReadoutThread->setNextOperation(true, mRequest, mNextBuffers);
+ mParent->mSensor->setDestinationBuffers(mNextBuffers);
+
+ mRequest = NULL;
+ mNextBuffers = NULL;
+
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount--;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::setupReprocess() {
+ status_t res;
+
+ mNextNeedsJpeg = true;
+ mNextIsCapture = false;
+
+ camera_metadata_entry_t reprocessStreams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_INPUT_STREAMS,
+ &reprocessStreams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ mNextBuffers = new Buffers;
+
+ ALOGV("Configure: Setting up input buffers for reprocess");
+ for (size_t i = 0; i < reprocessStreams.count; i++) {
+ int streamId = reprocessStreams.data.i32[i];
+ const ReprocessStream &s = mParent->getReprocessStreamInfo(streamId);
+ if (s.format != HAL_PIXEL_FORMAT_RGB_888) {
+ ALOGE("%s: Only ZSL reprocessing supported!",
+ __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = -streamId;
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ }
+
+ camera_metadata_entry_t streams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ &streams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ ALOGV("Configure: Setting up output buffers for reprocess");
+ for (size_t i = 0; i < streams.count; i++) {
+ int streamId = streams.data.i32[i];
+ const Stream &s = mParent->getStreamInfo(streamId);
+ if (s.format != HAL_PIXEL_FORMAT_BLOB) {
+ // TODO: Support reprocess to YUV
+ ALOGE("%s: Non-JPEG output stream %d for reprocess not supported",
+ __FUNCTION__, streamId);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = streams.data.u8[i];
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+ "stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ }
+
+ camera_metadata_entry_t e;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameNumber = *e.data.i32;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::configureNextReprocess() {
+ Mutex::Autolock il(mInternalsMutex);
+
+ getBuffers();
+
+ ALOGV("Configure: Done configure for reprocess %d", mNextFrameNumber);
+ mParent->mReadoutThread->setNextOperation(false, mRequest, mNextBuffers);
+
+ mRequest = NULL;
+ mNextBuffers = NULL;
+
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount--;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::getBuffers() {
+ status_t res;
+ /** Get buffers to fill for this frame */
+ for (size_t i = 0; i < mNextBuffers->size(); i++) {
+ StreamBuffer &b = mNextBuffers->editItemAt(i);
+
+ if (b.streamId > 0) {
+ Stream s = mParent->getStreamInfo(b.streamId);
+ ALOGV("Configure: Dequeing buffer from stream %d", b.streamId);
+ res = s.ops->dequeue_buffer(s.ops, &(b.buffer) );
+ if (res != NO_ERROR || b.buffer == NULL) {
+ ALOGE("%s: Unable to dequeue buffer from stream %d: %s (%d)",
+ __FUNCTION__, b.streamId, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ /* Lock the buffer from the perspective of the graphics mapper */
+ const Rect rect(s.width, s.height);
+
+ res = GraphicBufferMapper::get().lock(*(b.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE,
+ rect, (void**)&(b.img) );
+
+ if (res != NO_ERROR) {
+ ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ s.ops->cancel_buffer(s.ops,
+ b.buffer);
+ mParent->signalError();
+ return false;
+ }
+ } else {
+ ReprocessStream s = mParent->getReprocessStreamInfo(-b.streamId);
+ ALOGV("Configure: Acquiring buffer from reprocess stream %d",
+ -b.streamId);
+ res = s.ops->acquire_buffer(s.ops, &(b.buffer) );
+ if (res != NO_ERROR || b.buffer == NULL) {
+ ALOGE("%s: Unable to acquire buffer from reprocess stream %d: "
+ "%s (%d)", __FUNCTION__, -b.streamId,
+ strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ /* Lock the buffer from the perspective of the graphics mapper */
+ const Rect rect(s.width, s.height);
+
+ res = GraphicBufferMapper::get().lock(*(b.buffer),
+ GRALLOC_USAGE_HW_CAMERA_READ,
+ rect, (void**)&(b.img) );
+ if (res != NO_ERROR) {
+ ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ s.ops->release_buffer(s.ops,
+ b.buffer);
+ mParent->signalError();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+EmulatedFakeCamera2::ReadoutThread::ReadoutThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent),
+ mRunning(false),
+ mActive(false),
+ mRequestCount(0),
+ mRequest(NULL),
+ mBuffers(NULL) {
+ mInFlightQueue = new InFlightQueue[kInFlightQueueSize];
+ mInFlightHead = 0;
+ mInFlightTail = 0;
+}
+
+EmulatedFakeCamera2::ReadoutThread::~ReadoutThread() {
+ delete mInFlightQueue;
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+ ALOGV("Starting up ReadoutThread");
+ mRunning = true;
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for readout thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::waitForReady(nsecs_t timeout) {
+ status_t res;
+ Mutex::Autolock lock(mInputMutex);
+ while (!readyForNextCapture()) {
+ res = mReadySignal.waitRelative(mInputMutex, timeout);
+ if (res == TIMED_OUT) return false;
+ if (res != OK) {
+ ALOGE("%s: Error waiting for ready: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::readyForNextCapture() {
+ return (mInFlightTail + 1) % kInFlightQueueSize != mInFlightHead;
+}
+
+void EmulatedFakeCamera2::ReadoutThread::setNextOperation(
+ bool isCapture,
+ camera_metadata_t *request,
+ Buffers *buffers) {
+ Mutex::Autolock lock(mInputMutex);
+ if ( !readyForNextCapture() ) {
+ ALOGE("In flight queue full, dropping captures");
+ mParent->signalError();
+ return;
+ }
+ mInFlightQueue[mInFlightTail].isCapture = isCapture;
+ mInFlightQueue[mInFlightTail].request = request;
+ mInFlightQueue[mInFlightTail].buffers = buffers;
+ mInFlightTail = (mInFlightTail + 1) % kInFlightQueueSize;
+ mRequestCount++;
+
+ if (!mActive) {
+ mActive = true;
+ mInputSignal.signal();
+ }
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::isStreamInUse(uint32_t id) {
+ // acquire in same order as threadLoop
+ Mutex::Autolock iLock(mInternalsMutex);
+ Mutex::Autolock lock(mInputMutex);
+
+ size_t i = mInFlightHead;
+ while (i != mInFlightTail) {
+ for (size_t j = 0; j < mInFlightQueue[i].buffers->size(); j++) {
+ if ( (*(mInFlightQueue[i].buffers))[j].streamId == (int)id )
+ return true;
+ }
+ i = (i + 1) % kInFlightQueueSize;
+ }
+
+
+ if (mBuffers != NULL) {
+ for (i = 0; i < mBuffers->size(); i++) {
+ if ( (*mBuffers)[i].streamId == (int)id) return true;
+ }
+ }
+
+ return false;
+}
+
+int EmulatedFakeCamera2::ReadoutThread::getInProgressCount() {
+ Mutex::Autolock lock(mInputMutex);
+
+ return mRequestCount;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::threadLoop() {
+ static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+ status_t res;
+ int32_t frameNumber;
+
+ // Check if we're currently processing or just waiting
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mActive) {
+ // Inactive, keep waiting until we've been signaled
+ res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+ if (res != NO_ERROR && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for capture requests: %d",
+ __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ if (!mActive) return true;
+ }
+ // Active, see if we need a new request
+ if (mRequest == NULL) {
+ if (mInFlightHead == mInFlightTail) {
+ // Go inactive
+ ALOGV("Waiting for sensor data");
+ mActive = false;
+ return true;
+ } else {
+ Mutex::Autolock iLock(mInternalsMutex);
+ mReadySignal.signal();
+ mIsCapture = mInFlightQueue[mInFlightHead].isCapture;
+ mRequest = mInFlightQueue[mInFlightHead].request;
+ mBuffers = mInFlightQueue[mInFlightHead].buffers;
+ mInFlightQueue[mInFlightHead].request = NULL;
+ mInFlightQueue[mInFlightHead].buffers = NULL;
+ mInFlightHead = (mInFlightHead + 1) % kInFlightQueueSize;
+ ALOGV("Ready to read out request %p, %zu buffers",
+ mRequest, mBuffers->size());
+ }
+ }
+ }
+
+ // Active with request, wait on sensor to complete
+
+ nsecs_t captureTime;
+
+ if (mIsCapture) {
+ bool gotFrame;
+ gotFrame = mParent->mSensor->waitForNewFrame(kWaitPerLoop,
+ &captureTime);
+
+ if (!gotFrame) return true;
+ }
+
+ Mutex::Autolock iLock(mInternalsMutex);
+
+ camera_metadata_entry_t entry;
+ if (!mIsCapture) {
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_TIMESTAMP,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading reprocessing timestamp: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ captureTime = entry.data.i64[0];
+ }
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ frameNumber = *entry.data.i32;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_METADATA_MODE,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading metadata mode tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ // Got sensor data and request, construct frame and send it out
+ ALOGV("Readout: Constructing metadata and frames for request %d",
+ frameNumber);
+
+ if (*entry.data.u8 == ANDROID_REQUEST_METADATA_MODE_FULL) {
+ ALOGV("Readout: Metadata requested, constructing");
+
+ camera_metadata_t *frame = NULL;
+
+ size_t frame_entries = get_camera_metadata_entry_count(mRequest);
+ size_t frame_data = get_camera_metadata_data_count(mRequest);
+
+ // TODO: Dynamically calculate based on enabled statistics, etc
+ frame_entries += 10;
+ frame_data += 100;
+
+ res = mParent->mFrameQueueDst->dequeue_frame(mParent->mFrameQueueDst,
+ frame_entries, frame_data, &frame);
+
+ if (res != NO_ERROR || frame == NULL) {
+ ALOGE("%s: Unable to dequeue frame metadata buffer", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ res = append_camera_metadata(frame, mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("Unable to append request metadata");
+ }
+
+ if (mIsCapture) {
+ add_camera_metadata_entry(frame,
+ ANDROID_SENSOR_TIMESTAMP,
+ &captureTime,
+ 1);
+
+ collectStatisticsMetadata(frame);
+ // TODO: Collect all final values used from sensor in addition to timestamp
+ }
+
+ ALOGV("Readout: Enqueue frame %d", frameNumber);
+ mParent->mFrameQueueDst->enqueue_frame(mParent->mFrameQueueDst,
+ frame);
+ }
+ ALOGV("Readout: Free request");
+ res = mParent->mRequestQueueSrc->free_request(mParent->mRequestQueueSrc, mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to return request buffer to queue: %d",
+ __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ mRequest = NULL;
+
+ int compressedBufferIndex = -1;
+ ALOGV("Readout: Processing %zu buffers", mBuffers->size());
+ for (size_t i = 0; i < mBuffers->size(); i++) {
+ const StreamBuffer &b = (*mBuffers)[i];
+ ALOGV("Readout: Buffer %zu: Stream %d, %d x %d, format 0x%x, stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ if (b.streamId > 0) {
+ if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+ // Assumes only one BLOB buffer type per capture
+ compressedBufferIndex = i;
+ } else {
+ ALOGV("Readout: Sending image buffer %zu (%p) to output stream %d",
+ i, (void*)*(b.buffer), b.streamId);
+ GraphicBufferMapper::get().unlock(*(b.buffer));
+ const Stream &s = mParent->getStreamInfo(b.streamId);
+ res = s.ops->enqueue_buffer(s.ops, captureTime, b.buffer);
+ if (res != OK) {
+ ALOGE("Error enqueuing image buffer %p: %s (%d)", b.buffer,
+ strerror(-res), res);
+ mParent->signalError();
+ }
+ }
+ }
+ }
+
+ if (compressedBufferIndex == -1) {
+ delete mBuffers;
+ } else {
+ ALOGV("Readout: Starting JPEG compression for buffer %d, stream %d",
+ compressedBufferIndex,
+ (*mBuffers)[compressedBufferIndex].streamId);
+ mJpegTimestamp = captureTime;
+ // Takes ownership of mBuffers
+ mParent->mJpegCompressor->start(mBuffers, this);
+ }
+ mBuffers = NULL;
+
+ Mutex::Autolock l(mInputMutex);
+ mRequestCount--;
+ ALOGV("Readout: Done with request %d", frameNumber);
+ return true;
+}
+
+void EmulatedFakeCamera2::ReadoutThread::onJpegDone(
+ const StreamBuffer &jpegBuffer, bool success) {
+ status_t res;
+ if (!success) {
+ ALOGE("%s: Error queueing compressed image buffer %p",
+ __FUNCTION__, jpegBuffer.buffer);
+ mParent->signalError();
+ return;
+ }
+
+ // Write to JPEG output stream
+ ALOGV("%s: Compression complete, pushing to stream %d", __FUNCTION__,
+ jpegBuffer.streamId);
+
+ GraphicBufferMapper::get().unlock(*(jpegBuffer.buffer));
+ const Stream &s = mParent->getStreamInfo(jpegBuffer.streamId);
+ res = s.ops->enqueue_buffer(s.ops, mJpegTimestamp, jpegBuffer.buffer);
+}
+
+void EmulatedFakeCamera2::ReadoutThread::onJpegInputDone(
+ const StreamBuffer &inputBuffer) {
+ status_t res;
+ GraphicBufferMapper::get().unlock(*(inputBuffer.buffer));
+ const ReprocessStream &s =
+ mParent->getReprocessStreamInfo(-inputBuffer.streamId);
+ res = s.ops->release_buffer(s.ops, inputBuffer.buffer);
+ if (res != OK) {
+ ALOGE("Error releasing reprocess buffer %p: %s (%d)",
+ inputBuffer.buffer, strerror(-res), res);
+ mParent->signalError();
+ }
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::collectStatisticsMetadata(
+ camera_metadata_t *frame) {
+ // Completely fake face rectangles, don't correspond to real faces in scene
+ ALOGV("Readout: Collecting statistics metadata");
+
+ status_t res;
+ camera_metadata_entry_t entry;
+ res = find_camera_metadata_entry(frame,
+ ANDROID_STATISTICS_FACE_DETECT_MODE,
+ &entry);
+ if (res != OK) {
+ ALOGE("%s: Unable to find face detect mode!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) return OK;
+
+ // The coordinate system for the face regions is the raw sensor pixel
+ // coordinates. Here, we map from the scene coordinates (0-19 in both axis)
+ // to raw pixels, for the scene defined in fake-pipeline2/Scene.cpp. We
+ // approximately place two faces on top of the windows of the house. No
+ // actual faces exist there, but might one day. Note that this doesn't
+ // account for the offsets used to account for aspect ratio differences, so
+ // the rectangles don't line up quite right.
+ const size_t numFaces = 2;
+ int32_t rects[numFaces * 4] = {
+ Sensor::kResolution[0] * 10 / 20,
+ Sensor::kResolution[1] * 15 / 20,
+ Sensor::kResolution[0] * 12 / 20,
+ Sensor::kResolution[1] * 17 / 20,
+
+ Sensor::kResolution[0] * 16 / 20,
+ Sensor::kResolution[1] * 15 / 20,
+ Sensor::kResolution[0] * 18 / 20,
+ Sensor::kResolution[1] * 17 / 20
+ };
+ // To simulate some kind of real detection going on, we jitter the rectangles on
+ // each frame by a few pixels in each dimension.
+ for (size_t i = 0; i < numFaces * 4; i++) {
+ rects[i] += (int32_t)(((float)rand() / RAND_MAX) * 6 - 3);
+ }
+ // The confidence scores (0-100) are similarly jittered.
+ uint8_t scores[numFaces] = { 85, 95 };
+ for (size_t i = 0; i < numFaces; i++) {
+ scores[i] += (int32_t)(((float)rand() / RAND_MAX) * 10 - 5);
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_RECTANGLES,
+ rects, numFaces * 4);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face rectangles!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_SCORES,
+ scores, numFaces);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE) return OK;
+
+ // Advanced face detection options - add eye/mouth coordinates. The
+ // coordinates in order are (leftEyeX, leftEyeY, rightEyeX, rightEyeY,
+ // mouthX, mouthY). The mapping is the same as the face rectangles.
+ int32_t features[numFaces * 6] = {
+ Sensor::kResolution[0] * 10.5 / 20,
+ Sensor::kResolution[1] * 16 / 20,
+ Sensor::kResolution[0] * 11.5 / 20,
+ Sensor::kResolution[1] * 16 / 20,
+ Sensor::kResolution[0] * 11 / 20,
+ Sensor::kResolution[1] * 16.5 / 20,
+
+ Sensor::kResolution[0] * 16.5 / 20,
+ Sensor::kResolution[1] * 16 / 20,
+ Sensor::kResolution[0] * 17.5 / 20,
+ Sensor::kResolution[1] * 16 / 20,
+ Sensor::kResolution[0] * 17 / 20,
+ Sensor::kResolution[1] * 16.5 / 20,
+ };
+ // Jitter these a bit less than the rects
+ for (size_t i = 0; i < numFaces * 6; i++) {
+ features[i] += (int32_t)(((float)rand() / RAND_MAX) * 4 - 2);
+ }
+ // These are unique IDs that are used to identify each face while it's
+ // visible to the detector (if a face went away and came back, it'd get a
+ // new ID).
+ int32_t ids[numFaces] = {
+ 100, 200
+ };
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_LANDMARKS,
+ features, numFaces * 6);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face landmarks!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_IDS,
+ ids, numFaces);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
+EmulatedFakeCamera2::ControlThread::ControlThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent) {
+ mRunning = false;
+}
+
+EmulatedFakeCamera2::ControlThread::~ControlThread() {
+}
+
+status_t EmulatedFakeCamera2::ControlThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+
+ ALOGV("Starting up ControlThread");
+ mRunning = true;
+ mStartAf = false;
+ mCancelAf = false;
+ mStartPrecapture = false;
+
+ mControlMode = ANDROID_CONTROL_MODE_AUTO;
+
+ mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ mSceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+
+ mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ mAfModeChange = false;
+
+ mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+
+ mAfTriggerId = 0;
+ mPrecaptureTriggerId = 0;
+
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+
+ mExposureTime = kNormalExposureTime;
+
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ControlThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for control thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+// Override android.control.* fields with 3A values before sending request to sensor
+status_t EmulatedFakeCamera2::ControlThread::processRequest(camera_metadata_t *request) {
+ Mutex::Autolock lock(mInputMutex);
+ // TODO: Add handling for all android.control.* fields here
+ camera_metadata_entry_t mode;
+ status_t res;
+
+#define READ_IF_OK(res, what, def) \
+ (((res) == OK) ? (what) : (uint8_t)(def))
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_MODE,
+ &mode);
+ mControlMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_MODE_OFF);
+
+ // disable all 3A
+ if (mControlMode == ANDROID_CONTROL_MODE_OFF) {
+ mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ mSceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+ mAfMode = ANDROID_CONTROL_AF_MODE_OFF;
+ mAeLock = ANDROID_CONTROL_AE_LOCK_ON;
+ mAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+ mAfModeChange = true;
+ mStartAf = false;
+ mCancelAf = true;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+ return res;
+ }
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_EFFECT_MODE,
+ &mode);
+ mEffectMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_EFFECT_MODE_OFF);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_SCENE_MODE,
+ &mode);
+ mSceneMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_SCENE_MODE_DISABLED);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AF_MODE,
+ &mode);
+ if (mAfMode != mode.data.u8[0]) {
+ ALOGV("AF new mode: %d, old mode %d", mode.data.u8[0], mAfMode);
+ mAfMode = mode.data.u8[0];
+ mAfModeChange = true;
+ mStartAf = false;
+ mCancelAf = false;
+ }
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AE_MODE,
+ &mode);
+ mAeMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AE_MODE_OFF);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AE_LOCK,
+ &mode);
+ uint8_t aeLockVal = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AE_LOCK_ON);
+ bool aeLock = (aeLockVal == ANDROID_CONTROL_AE_LOCK_ON);
+ if (mAeLock && !aeLock) {
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ }
+ mAeLock = aeLock;
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AWB_MODE,
+ &mode);
+ mAwbMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AWB_MODE_OFF);
+
+ // TODO: Override more control fields
+
+ if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
+ camera_metadata_entry_t exposureTime;
+ res = find_camera_metadata_entry(request,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ &exposureTime);
+ if (res == OK) {
+ exposureTime.data.i64[0] = mExposureTime;
+ }
+ }
+
+#undef READ_IF_OK
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::ControlThread::triggerAction(uint32_t msgType,
+ int32_t ext1, int32_t ext2) {
+ ALOGV("%s: Triggering %d (%d, %d)", __FUNCTION__, msgType, ext1, ext2);
+ Mutex::Autolock lock(mInputMutex);
+ switch (msgType) {
+ case CAMERA2_TRIGGER_AUTOFOCUS:
+ mAfTriggerId = ext1;
+ mStartAf = true;
+ mCancelAf = false;
+ break;
+ case CAMERA2_TRIGGER_CANCEL_AUTOFOCUS:
+ mAfTriggerId = ext1;
+ mStartAf = false;
+ mCancelAf = true;
+ break;
+ case CAMERA2_TRIGGER_PRECAPTURE_METERING:
+ mPrecaptureTriggerId = ext1;
+ mStartPrecapture = true;
+ break;
+ default:
+ ALOGE("%s: Unknown action triggered: %d (arguments %d %d)",
+ __FUNCTION__, msgType, ext1, ext2);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+const nsecs_t EmulatedFakeCamera2::ControlThread::kControlCycleDelay = 100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAfDuration = 500 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAfDuration = 900 * MSEC;
+const float EmulatedFakeCamera2::ControlThread::kAfSuccessRate = 0.9;
+ // Once every 5 seconds
+const float EmulatedFakeCamera2::ControlThread::kContinuousAfStartRate =
+ kControlCycleDelay / 5.0 * SEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAeDuration = 500 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAeDuration = 2 * SEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinPrecaptureAeDuration = 100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxPrecaptureAeDuration = 400 * MSEC;
+ // Once every 3 seconds
+const float EmulatedFakeCamera2::ControlThread::kAeScanStartRate =
+ kControlCycleDelay / 3000000000.0;
+
+const nsecs_t EmulatedFakeCamera2::ControlThread::kNormalExposureTime = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kExposureJump = 2 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinExposureTime = 1 * MSEC;
+
+bool EmulatedFakeCamera2::ControlThread::threadLoop() {
+ bool afModeChange = false;
+ bool afTriggered = false;
+ bool afCancelled = false;
+ uint8_t afState;
+ uint8_t afMode;
+ int32_t afTriggerId;
+ bool precaptureTriggered = false;
+ uint8_t aeState;
+ uint8_t aeMode;
+ bool aeLock;
+ int32_t precaptureTriggerId;
+ nsecs_t nextSleep = kControlCycleDelay;
+
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (mStartAf) {
+ ALOGD("Starting AF trigger processing");
+ afTriggered = true;
+ mStartAf = false;
+ } else if (mCancelAf) {
+ ALOGD("Starting cancel AF trigger processing");
+ afCancelled = true;
+ mCancelAf = false;
+ }
+ afState = mAfState;
+ afMode = mAfMode;
+ afModeChange = mAfModeChange;
+ mAfModeChange = false;
+
+ afTriggerId = mAfTriggerId;
+
+ if(mStartPrecapture) {
+ ALOGD("Starting precapture trigger processing");
+ precaptureTriggered = true;
+ mStartPrecapture = false;
+ }
+ aeState = mAeState;
+ aeMode = mAeMode;
+ aeLock = mAeLock;
+ precaptureTriggerId = mPrecaptureTriggerId;
+ }
+
+ if (afCancelled || afModeChange) {
+ ALOGV("Resetting AF state due to cancel/mode change");
+ afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ updateAfState(afState, afTriggerId);
+ mAfScanDuration = 0;
+ mLockAfterPassiveScan = false;
+ }
+
+ uint8_t oldAfState = afState;
+
+ if (afTriggered) {
+ afState = processAfTrigger(afMode, afState);
+ }
+
+ afState = maybeStartAfScan(afMode, afState);
+ afState = updateAfScan(afMode, afState, &nextSleep);
+ updateAfState(afState, afTriggerId);
+
+ if (precaptureTriggered) {
+ aeState = processPrecaptureTrigger(aeMode, aeState);
+ }
+
+ aeState = maybeStartAeScan(aeMode, aeLock, aeState);
+ aeState = updateAeScan(aeMode, aeLock, aeState, &nextSleep);
+ updateAeState(aeState, precaptureTriggerId);
+
+ int ret;
+ timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = nextSleep;
+ do {
+ ret = nanosleep(&t, &t);
+ } while (ret != 0);
+
+ if (mAfScanDuration > 0) {
+ mAfScanDuration -= nextSleep;
+ }
+ if (mAeScanDuration > 0) {
+ mAeScanDuration -= nextSleep;
+ }
+
+ return true;
+}
+
+int EmulatedFakeCamera2::ControlThread::processAfTrigger(uint8_t afMode,
+ uint8_t afState) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
+ // Do nothing
+ break;
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ switch (afState) {
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Start new focusing cycle
+ mAfScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
+ afState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+ ALOGV("%s: AF scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAfScanDuration / 1000000);
+ break;
+ case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+ // Ignore new request, already scanning
+ break;
+ default:
+ ALOGE("Unexpected AF state in AUTO/MACRO AF mode: %d",
+ afState);
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ switch (afState) {
+ // Picture mode waits for passive scan to complete
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+ mLockAfterPassiveScan = true;
+ break;
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Must cancel to get out of these states
+ break;
+ default:
+ ALOGE("Unexpected AF state in CONTINUOUS_PICTURE AF mode: %d",
+ afState);
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ switch (afState) {
+ // Video mode does not wait for passive scan to complete
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Must cancel to get out of these states
+ break;
+ default:
+ ALOGE("Unexpected AF state in CONTINUOUS_VIDEO AF mode: %d",
+ afState);
+ }
+ break;
+ default:
+ break;
+ }
+ return afState;
+}
+
+int EmulatedFakeCamera2::ControlThread::maybeStartAfScan(uint8_t afMode,
+ uint8_t afState) {
+ if ((afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO ||
+ afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE) &&
+ (afState == ANDROID_CONTROL_AF_STATE_INACTIVE ||
+ afState == ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)) {
+
+ bool startScan = ((double)rand() / RAND_MAX) < kContinuousAfStartRate;
+ if (startScan) {
+ // Start new passive focusing cycle
+ mAfScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
+ ALOGV("%s: AF passive scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAfScanDuration / 1000000);
+ }
+ }
+ return afState;
+}
+
+int EmulatedFakeCamera2::ControlThread::updateAfScan(uint8_t afMode,
+ uint8_t afState, nsecs_t *maxSleep) {
+ if (! (afState == ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN ||
+ afState == ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN ) ) {
+ return afState;
+ }
+
+ if (mAfScanDuration <= 0) {
+ ALOGV("%s: AF scan done", __FUNCTION__);
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_AUTO: {
+ bool success = ((double)rand() / RAND_MAX) < kAfSuccessRate;
+ if (success) {
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ } else {
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ }
+ break;
+ }
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ if (mLockAfterPassiveScan) {
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ mLockAfterPassiveScan = false;
+ } else {
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+ break;
+ default:
+ ALOGE("Unexpected AF mode in scan state");
+ }
+ } else {
+ if (mAfScanDuration <= *maxSleep) {
+ *maxSleep = mAfScanDuration;
+ }
+ }
+ return afState;
+}
+
+void EmulatedFakeCamera2::ControlThread::updateAfState(uint8_t newState,
+ int32_t triggerId) {
+ Mutex::Autolock lock(mInputMutex);
+ if (mAfState != newState) {
+ ALOGV("%s: Autofocus state now %d, id %d", __FUNCTION__,
+ newState, triggerId);
+ mAfState = newState;
+ mParent->sendNotification(CAMERA2_MSG_AUTOFOCUS,
+ newState, triggerId, 0);
+ }
+}
+
+int EmulatedFakeCamera2::ControlThread::processPrecaptureTrigger(uint8_t aeMode,
+ uint8_t aeState) {
+ switch (aeMode) {
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ // Don't do anything for these
+ return aeState;
+ case ANDROID_CONTROL_AE_MODE_ON:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+ // Trigger a precapture cycle
+ aeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
+ mAeScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxPrecaptureAeDuration - kMinPrecaptureAeDuration) +
+ kMinPrecaptureAeDuration;
+ ALOGD("%s: AE precapture scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAeScanDuration / 1000000);
+
+ }
+ return aeState;
+}
+
+int EmulatedFakeCamera2::ControlThread::maybeStartAeScan(uint8_t aeMode,
+ bool aeLocked,
+ uint8_t aeState) {
+ if (aeLocked) return aeState;
+ switch (aeMode) {
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ break;
+ case ANDROID_CONTROL_AE_MODE_ON:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: {
+ if (aeState != ANDROID_CONTROL_AE_STATE_INACTIVE &&
+ aeState != ANDROID_CONTROL_AE_STATE_CONVERGED) break;
+
+ bool startScan = ((double)rand() / RAND_MAX) < kAeScanStartRate;
+ if (startScan) {
+ mAeScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAeDuration - kMinAeDuration) + kMinAeDuration;
+ aeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
+ ALOGV("%s: AE scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAeScanDuration / 1000000);
+ }
+ }
+ }
+
+ return aeState;
+}
+
+int EmulatedFakeCamera2::ControlThread::updateAeScan(uint8_t aeMode,
+ bool aeLock, uint8_t aeState, nsecs_t *maxSleep) {
+ if (aeLock && aeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ mAeScanDuration = 0;
+ aeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+ } else if ((aeState == ANDROID_CONTROL_AE_STATE_SEARCHING) ||
+ (aeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE ) ) {
+ if (mAeScanDuration <= 0) {
+ ALOGV("%s: AE scan done", __FUNCTION__);
+ aeState = aeLock ?
+ ANDROID_CONTROL_AE_STATE_LOCKED :ANDROID_CONTROL_AE_STATE_CONVERGED;
+
+ Mutex::Autolock lock(mInputMutex);
+ mExposureTime = kNormalExposureTime;
+ } else {
+ if (mAeScanDuration <= *maxSleep) {
+ *maxSleep = mAeScanDuration;
+ }
+
+ int64_t exposureDelta =
+ ((double)rand() / RAND_MAX) * 2 * kExposureJump -
+ kExposureJump;
+ Mutex::Autolock lock(mInputMutex);
+ mExposureTime = mExposureTime + exposureDelta;
+ if (mExposureTime < kMinExposureTime) mExposureTime = kMinExposureTime;
+ }
+ }
+
+ return aeState;
+}
+
+
+void EmulatedFakeCamera2::ControlThread::updateAeState(uint8_t newState,
+ int32_t triggerId) {
+ Mutex::Autolock lock(mInputMutex);
+ if (mAeState != newState) {
+ ALOGV("%s: Autoexposure state now %d, id %d", __FUNCTION__,
+ newState, triggerId);
+ mAeState = newState;
+ mParent->sendNotification(CAMERA2_MSG_AUTOEXPOSURE,
+ newState, triggerId, 0);
+ }
+}
+
+/** Private methods */
+
+status_t EmulatedFakeCamera2::constructStaticInfo(
+ camera_metadata_t **info,
+ bool sizeRequest) const {
+
+ size_t entryCount = 0;
+ size_t dataCount = 0;
+ status_t ret;
+
+#define ADD_OR_SIZE( tag, data, count ) \
+ if ( ( ret = addOrSize(*info, sizeRequest, &entryCount, &dataCount, \
+ tag, data, count) ) != OK ) return ret
+
+ // android.lens
+
+ // 5 cm min focus distance for back camera, infinity (fixed focus) for front
+ const float minFocusDistance = mFacingBack ? 1.0/0.05 : 0.0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ &minFocusDistance, 1);
+ // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
+ const float hyperFocalDistance = mFacingBack ? 1.0/5.0 : 0.0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ &minFocusDistance, 1);
+
+ static const float focalLength = 3.30f; // mm
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ &focalLength, 1);
+ static const float aperture = 2.8f;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ &aperture, 1);
+ static const float filterDensity = 0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+ &filterDensity, 1);
+ static const uint8_t availableOpticalStabilization =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ &availableOpticalStabilization, 1);
+
+ static const int32_t lensShadingMapSize[] = {1, 1};
+ ADD_OR_SIZE(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+ sizeof(lensShadingMapSize)/sizeof(int32_t));
+
+ int32_t lensFacing = mFacingBack ?
+ ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+ ADD_OR_SIZE(ANDROID_LENS_FACING, &lensFacing, 1);
+
+ float lensPosition[3];
+ if (mFacingBack) {
+ // Back-facing camera is center-top on device
+ lensPosition[0] = 0;
+ lensPosition[1] = 20;
+ lensPosition[2] = -5;
+ } else {
+ // Front-facing camera is center-right on device
+ lensPosition[0] = 20;
+ lensPosition[1] = 20;
+ lensPosition[2] = 0;
+ }
+ ADD_OR_SIZE(ANDROID_LENS_POSITION, lensPosition, sizeof(lensPosition)/
+ sizeof(float));
+
+ // android.sensor
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ Sensor::kExposureTimeRange, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ &Sensor::kFrameDurationRange[1], 1);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ Sensor::kSensitivityRange,
+ sizeof(Sensor::kSensitivityRange)
+ /sizeof(int32_t));
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ &Sensor::kColorFilterArrangement, 1);
+
+ static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ sensorPhysicalSize, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ Sensor::kResolution, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ Sensor::kResolution, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_WHITE_LEVEL,
+ &Sensor::kMaxRawValue, 1);
+
+ static const int32_t blackLevelPattern[4] = {
+ Sensor::kBlackLevel, Sensor::kBlackLevel,
+ Sensor::kBlackLevel, Sensor::kBlackLevel
+ };
+ ADD_OR_SIZE(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
+ blackLevelPattern, sizeof(blackLevelPattern)/sizeof(int32_t));
+
+ //TODO: sensor color calibration fields
+
+ // android.flash
+ static const uint8_t flashAvailable = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+
+ static const int64_t flashChargeDuration = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_INFO_CHARGE_DURATION, &flashChargeDuration, 1);
+
+ // android.tonemap
+
+ static const int32_t tonemapCurvePoints = 128;
+ ADD_OR_SIZE(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+
+ // android.scaler
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_FORMATS,
+ kAvailableFormats,
+ sizeof(kAvailableFormats)/sizeof(uint32_t));
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+ kAvailableRawSizes,
+ sizeof(kAvailableRawSizes)/sizeof(uint32_t));
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+ kAvailableRawMinDurations,
+ sizeof(kAvailableRawMinDurations)/sizeof(uint64_t));
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ kAvailableProcessedSizesBack,
+ sizeof(kAvailableProcessedSizesBack)/sizeof(uint32_t));
+ } else {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ kAvailableProcessedSizesFront,
+ sizeof(kAvailableProcessedSizesFront)/sizeof(uint32_t));
+ }
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
+ kAvailableProcessedMinDurations,
+ sizeof(kAvailableProcessedMinDurations)/sizeof(uint64_t));
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ kAvailableJpegSizesBack,
+ sizeof(kAvailableJpegSizesBack)/sizeof(uint32_t));
+ } else {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ kAvailableJpegSizesFront,
+ sizeof(kAvailableJpegSizesFront)/sizeof(uint32_t));
+ }
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
+ kAvailableJpegMinDurations,
+ sizeof(kAvailableJpegMinDurations)/sizeof(uint64_t));
+
+ static const float maxZoom = 10;
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ &maxZoom, 1);
+
+ // android.jpeg
+
+ static const int32_t jpegThumbnailSizes[] = {
+ 0, 0,
+ 160, 120,
+ 320, 240
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ jpegThumbnailSizes, sizeof(jpegThumbnailSizes)/sizeof(int32_t));
+
+ static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+ ADD_OR_SIZE(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+
+ // android.stats
+
+ static const uint8_t availableFaceDetectModes[] = {
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
+ };
+
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ availableFaceDetectModes,
+ sizeof(availableFaceDetectModes));
+
+ static const int32_t maxFaceCount = 8;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ &maxFaceCount, 1);
+
+ static const int32_t histogramSize = 64;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+ &histogramSize, 1);
+
+ static const int32_t maxHistogramCount = 1000;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+ &maxHistogramCount, 1);
+
+ static const int32_t sharpnessMapSize[2] = {64, 64};
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+ sharpnessMapSize, sizeof(sharpnessMapSize)/sizeof(int32_t));
+
+ static const int32_t maxSharpnessMapValue = 1000;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+ &maxSharpnessMapValue, 1);
+
+ // android.control
+
+ static const uint8_t availableSceneModes[] = {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ availableSceneModes, sizeof(availableSceneModes));
+
+ static const uint8_t availableEffects[] = {
+ ANDROID_CONTROL_EFFECT_MODE_OFF
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ availableEffects, sizeof(availableEffects));
+
+ static const int32_t max3aRegions[] = {/*AE*/ 0,/*AWB*/ 0,/*AF*/ 0};
+ ADD_OR_SIZE(ANDROID_CONTROL_MAX_REGIONS,
+ max3aRegions, sizeof(max3aRegions)/sizeof(max3aRegions[0]));
+
+ static const uint8_t availableAeModes[] = {
+ ANDROID_CONTROL_AE_MODE_OFF,
+ ANDROID_CONTROL_AE_MODE_ON
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ availableAeModes, sizeof(availableAeModes));
+
+ static const camera_metadata_rational exposureCompensationStep = {
+ 1, 3
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ &exposureCompensationStep, 1);
+
+ int32_t exposureCompensationRange[] = {-9, 9};
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ exposureCompensationRange,
+ sizeof(exposureCompensationRange)/sizeof(int32_t));
+
+ static const int32_t availableTargetFpsRanges[] = {
+ 5, 30, 15, 30
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ availableTargetFpsRanges,
+ sizeof(availableTargetFpsRanges)/sizeof(int32_t));
+
+ static const uint8_t availableAntibandingModes[] = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ availableAntibandingModes, sizeof(availableAntibandingModes));
+
+ static const uint8_t availableAwbModes[] = {
+ ANDROID_CONTROL_AWB_MODE_OFF,
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+ ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+ ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+ ANDROID_CONTROL_AWB_MODE_SHADE
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ availableAwbModes, sizeof(availableAwbModes));
+
+ static const uint8_t availableAfModesBack[] = {
+ ANDROID_CONTROL_AF_MODE_OFF,
+ ANDROID_CONTROL_AF_MODE_AUTO,
+ ANDROID_CONTROL_AF_MODE_MACRO,
+ ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+ ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
+ };
+
+ static const uint8_t availableAfModesFront[] = {
+ ANDROID_CONTROL_AF_MODE_OFF
+ };
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesBack, sizeof(availableAfModesBack));
+ } else {
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesFront, sizeof(availableAfModesFront));
+ }
+
+ static const uint8_t availableVstabModes[] = {
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ availableVstabModes, sizeof(availableVstabModes));
+
+#undef ADD_OR_SIZE
+ /** Allocate metadata if sizing */
+ if (sizeRequest) {
+ ALOGV("Allocating %zu entries, %zu extra bytes for "
+ "static camera info",
+ entryCount, dataCount);
+ *info = allocate_camera_metadata(entryCount, dataCount);
+ if (*info == NULL) {
+ ALOGE("Unable to allocate camera static info"
+ "(%zu entries, %zu bytes extra data)",
+ entryCount, dataCount);
+ return NO_MEMORY;
+ }
+ }
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request,
+ bool sizeRequest) const {
+
+ size_t entryCount = 0;
+ size_t dataCount = 0;
+ status_t ret;
+
+#define ADD_OR_SIZE( tag, data, count ) \
+ if ( ( ret = addOrSize(*request, sizeRequest, &entryCount, &dataCount, \
+ tag, data, count) ) != OK ) return ret
+
+ /** android.request */
+
+ static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
+ ADD_OR_SIZE(ANDROID_REQUEST_TYPE, &requestType, 1);
+
+ static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+ ADD_OR_SIZE(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+ static const int32_t id = 0;
+ ADD_OR_SIZE(ANDROID_REQUEST_ID, &id, 1);
+
+ static const int32_t frameCount = 0;
+ ADD_OR_SIZE(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+ // OUTPUT_STREAMS set by user
+ entryCount += 1;
+ dataCount += 5; // TODO: Should be maximum stream number
+
+ /** android.lens */
+
+ static const float focusDistance = 0;
+ ADD_OR_SIZE(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+ static const float aperture = 2.8f;
+ ADD_OR_SIZE(ANDROID_LENS_APERTURE, &aperture, 1);
+
+ static const float focalLength = 5.0f;
+ ADD_OR_SIZE(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+ static const float filterDensity = 0;
+ ADD_OR_SIZE(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+ static const uint8_t opticalStabilizationMode =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ &opticalStabilizationMode, 1);
+
+ // FOCUS_RANGE set only in frame
+
+ /** android.sensor */
+
+ static const int64_t exposureTime = 10 * MSEC;
+ ADD_OR_SIZE(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+
+ static const int64_t frameDuration = 33333333L; // 1/30 s
+ ADD_OR_SIZE(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+
+ static const int32_t sensitivity = 100;
+ ADD_OR_SIZE(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+
+ // TIMESTAMP set only in frame
+
+ /** android.flash */
+
+ static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_FLASH_MODE, &flashMode, 1);
+
+ static const uint8_t flashPower = 10;
+ ADD_OR_SIZE(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+ static const int64_t firingTime = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+
+ /** Processing block modes */
+ uint8_t hotPixelMode = 0;
+ uint8_t demosaicMode = 0;
+ uint8_t noiseMode = 0;
+ uint8_t shadingMode = 0;
+ uint8_t colorMode = 0;
+ uint8_t tonemapMode = 0;
+ uint8_t edgeMode = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ // fall-through
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+ shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+ tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+ edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+ break;
+ case CAMERA2_TEMPLATE_PREVIEW:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ // fall-through
+ default:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+ shadingMode = ANDROID_SHADING_MODE_FAST;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+ tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+ edgeMode = ANDROID_EDGE_MODE_FAST;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+ ADD_OR_SIZE(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+ ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+ ADD_OR_SIZE(ANDROID_SHADING_MODE, &shadingMode, 1);
+ ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+ ADD_OR_SIZE(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+ ADD_OR_SIZE(ANDROID_EDGE_MODE, &edgeMode, 1);
+
+ /** android.noise */
+ static const uint8_t noiseStrength = 5;
+ ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_STRENGTH, &noiseStrength, 1);
+
+ /** android.color */
+ static const float colorTransform[9] = {
+ 1.0f, 0.f, 0.f,
+ 0.f, 1.f, 0.f,
+ 0.f, 0.f, 1.f
+ };
+ ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+ /** android.tonemap */
+ static const float tonemapCurve[4] = {
+ 0.f, 0.f,
+ 1.f, 1.f
+ };
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+
+ /** android.edge */
+ static const uint8_t edgeStrength = 5;
+ ADD_OR_SIZE(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
+
+ /** android.scaler */
+ static const int32_t cropRegion[3] = {
+ 0, 0, Sensor::kResolution[0]
+ };
+ ADD_OR_SIZE(ANDROID_SCALER_CROP_REGION, cropRegion, 3);
+
+ /** android.jpeg */
+ static const int32_t jpegQuality = 80;
+ ADD_OR_SIZE(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+ static const int32_t thumbnailSize[2] = {
+ 640, 480
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+ static const int32_t thumbnailQuality = 80;
+ ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+ static const double gpsCoordinates[2] = {
+ 0, 0
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+ static const uint8_t gpsProcessingMethod[32] = "None";
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+ static const int64_t gpsTimestamp = 0;
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+ static const int32_t jpegOrientation = 0;
+ ADD_OR_SIZE(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+
+ /** android.stats */
+
+ static const uint8_t faceDetectMode =
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+ static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
+
+ static const uint8_t sharpnessMapMode =
+ ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
+
+ // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+ // sharpnessMap only in frames
+
+ /** android.control */
+
+ uint8_t controlIntent = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ break;
+ default:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+ static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+ static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+ static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+ ADD_OR_SIZE(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+ static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+ static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+ static const int32_t controlRegions[5] = {
+ 0, 0, Sensor::kResolution[0], Sensor::kResolution[1], 1000
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+ static const int32_t aeExpCompensation = 0;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+ static const int32_t aeTargetFpsRange[2] = {
+ 10, 30
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+ static const uint8_t aeAntibandingMode =
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+ static const uint8_t awbMode =
+ ANDROID_CONTROL_AWB_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+ static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_REGIONS, controlRegions, 5);
+
+ uint8_t afMode = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ default:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+ static const uint8_t vstabMode =
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+ // aeState, awbState, afState only in frame
+
+ /** Allocate metadata if sizing */
+ if (sizeRequest) {
+ ALOGV("Allocating %zu entries, %zu extra bytes for "
+ "request template type %d",
+ entryCount, dataCount, request_template);
+ *request = allocate_camera_metadata(entryCount, dataCount);
+ if (*request == NULL) {
+ ALOGE("Unable to allocate new request template type %d "
+ "(%zu entries, %zu bytes extra data)", request_template,
+ entryCount, dataCount);
+ return NO_MEMORY;
+ }
+ }
+ return OK;
+#undef ADD_OR_SIZE
+}
+
+status_t EmulatedFakeCamera2::addOrSize(camera_metadata_t *request,
+ bool sizeRequest,
+ size_t *entryCount,
+ size_t *dataCount,
+ uint32_t tag,
+ const void *entryData,
+ size_t entryDataCount) {
+ status_t res;
+ if (!sizeRequest) {
+ return add_camera_metadata_entry(request, tag, entryData,
+ entryDataCount);
+ } else {
+ int type = get_camera_metadata_tag_type(tag);
+ if (type < 0 ) return BAD_VALUE;
+ (*entryCount)++;
+ (*dataCount) += calculate_camera_metadata_entry_data_size(type,
+ entryDataCount);
+ return OK;
+ }
+}
+
+bool EmulatedFakeCamera2::isStreamInUse(uint32_t id) {
+ // Assumes mMutex is locked; otherwise new requests could enter
+ // configureThread while readoutThread is being checked
+
+ // Order of isStreamInUse calls matters
+ if (mConfigureThread->isStreamInUse(id) ||
+ mReadoutThread->isStreamInUse(id) ||
+ mJpegCompressor->isStreamInUse(id) ) {
+ ALOGE("%s: Stream %d is in use in active requests!",
+ __FUNCTION__, id);
+ return true;
+ }
+ return false;
+}
+
+bool EmulatedFakeCamera2::isReprocessStreamInUse(uint32_t id) {
+ // TODO: implement
+ return false;
+}
+
+const Stream& EmulatedFakeCamera2::getStreamInfo(uint32_t streamId) {
+ Mutex::Autolock lock(mMutex);
+
+ return mStreams.valueFor(streamId);
+}
+
+const ReprocessStream& EmulatedFakeCamera2::getReprocessStreamInfo(uint32_t streamId) {
+ Mutex::Autolock lock(mMutex);
+
+ return mReprocessStreams.valueFor(streamId);
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedFakeCamera2.h b/v3/EmulatedFakeCamera2.h
new file mode 100644
index 0000000..64c8667
--- a/dev/null
+++ b/v3/EmulatedFakeCamera2.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H
+
+/*
+ * Contains declaration of a class EmulatedFakeCamera2 that encapsulates
+ * functionality of a fake camera that implements version 2 of the camera device
+ * interface.
+ */
+
+#include "EmulatedCamera2.h"
+#include "fake-pipeline2/Base.h"
+#include "fake-pipeline2/Sensor.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include <utils/Condition.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/String16.h>
+
+namespace android {
+
+/* Encapsulates functionality of an advanced fake camera. This camera contains
+ * a simple simulation of a scene, sensor, and image processing pipeline.
+ */
+class EmulatedFakeCamera2 : public EmulatedCamera2 {
+public:
+ /* Constructs EmulatedFakeCamera instance. */
+ EmulatedFakeCamera2(int cameraId, bool facingBack, struct hw_module_t* module);
+
+ /* Destructs EmulatedFakeCamera instance. */
+ ~EmulatedFakeCamera2();
+
+ /****************************************************************************
+ * EmulatedCamera2 virtual overrides.
+ ***************************************************************************/
+
+public:
+ /* Initializes EmulatedFakeCamera2 instance. */
+ status_t Initialize();
+
+ /****************************************************************************
+ * Camera Module API and generic hardware device API implementation
+ ***************************************************************************/
+public:
+
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t plugCamera();
+ virtual status_t unplugCamera();
+ virtual camera_device_status_t getHotplugStatus();
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info *info);
+
+ /****************************************************************************
+ * EmulatedCamera2 abstract API implementation.
+ ***************************************************************************/
+protected:
+ /** Request input queue */
+
+ virtual int requestQueueNotify();
+
+ /** Count of requests in flight */
+ virtual int getInProgressCount();
+
+ /** Cancel all captures in flight */
+ //virtual int flushCapturesInProgress();
+
+ /** Construct default request */
+ virtual int constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request);
+
+ virtual int allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ virtual int registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ virtual int releaseStream(uint32_t stream_id);
+
+ // virtual int allocateReprocessStream(
+ // uint32_t width,
+ // uint32_t height,
+ // uint32_t format,
+ // const camera2_stream_ops_t *stream_ops,
+ // uint32_t *stream_id,
+ // uint32_t *format_actual,
+ // uint32_t *usage,
+ // uint32_t *max_buffers);
+
+ virtual int allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *stream_ops,
+ uint32_t *stream_id);
+
+ virtual int releaseReprocessStream(uint32_t stream_id);
+
+ virtual int triggerAction(uint32_t trigger_id,
+ int32_t ext1,
+ int32_t ext2);
+
+ /** Debug methods */
+
+ virtual int dump(int fd);
+
+public:
+ /****************************************************************************
+ * Utility methods called by configure/readout threads and pipeline
+ ***************************************************************************/
+
+ // Get information about a given stream. Will lock mMutex
+ const Stream &getStreamInfo(uint32_t streamId);
+ const ReprocessStream &getReprocessStreamInfo(uint32_t streamId);
+
+ // Notifies rest of camera subsystem of serious error
+ void signalError();
+
+private:
+ /****************************************************************************
+ * Utility methods
+ ***************************************************************************/
+ /** Construct static camera metadata, two-pass */
+ status_t constructStaticInfo(
+ camera_metadata_t **info,
+ bool sizeRequest) const;
+
+ /** Two-pass implementation of constructDefaultRequest */
+ status_t constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request,
+ bool sizeRequest) const;
+ /** Helper function for constructDefaultRequest */
+ static status_t addOrSize( camera_metadata_t *request,
+ bool sizeRequest,
+ size_t *entryCount,
+ size_t *dataCount,
+ uint32_t tag,
+ const void *entry_data,
+ size_t entry_count);
+
+ /** Determine if the stream id is listed in any currently-in-flight
+ * requests. Assumes mMutex is locked */
+ bool isStreamInUse(uint32_t streamId);
+
+ /** Determine if the reprocess stream id is listed in any
+ * currently-in-flight requests. Assumes mMutex is locked */
+ bool isReprocessStreamInUse(uint32_t streamId);
+
+ /****************************************************************************
+ * Pipeline controller threads
+ ***************************************************************************/
+
+ class ConfigureThread: public Thread {
+ public:
+ ConfigureThread(EmulatedFakeCamera2 *parent);
+ ~ConfigureThread();
+
+ status_t waitUntilRunning();
+ status_t newRequestAvailable();
+ status_t readyToRun();
+
+ bool isStreamInUse(uint32_t id);
+ int getInProgressCount();
+ private:
+ EmulatedFakeCamera2 *mParent;
+ static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+
+ bool mRunning;
+ bool threadLoop();
+
+ bool setupCapture();
+ bool setupReprocess();
+
+ bool configureNextCapture();
+ bool configureNextReprocess();
+
+ bool getBuffers();
+
+ Mutex mInputMutex; // Protects mActive, mRequestCount
+ Condition mInputSignal;
+ bool mActive; // Whether we're waiting for input requests or actively
+ // working on them
+ size_t mRequestCount;
+
+ camera_metadata_t *mRequest;
+
+ Mutex mInternalsMutex; // Lock before accessing below members.
+ bool mWaitingForReadout;
+ bool mNextNeedsJpeg;
+ bool mNextIsCapture;
+ int32_t mNextFrameNumber;
+ int64_t mNextExposureTime;
+ int64_t mNextFrameDuration;
+ int32_t mNextSensitivity;
+ Buffers *mNextBuffers;
+ };
+
+ class ReadoutThread: public Thread, private JpegCompressor::JpegListener {
+ public:
+ ReadoutThread(EmulatedFakeCamera2 *parent);
+ ~ReadoutThread();
+
+ status_t readyToRun();
+
+ // Input
+ status_t waitUntilRunning();
+ bool waitForReady(nsecs_t timeout);
+ void setNextOperation(bool isCapture,
+ camera_metadata_t *request,
+ Buffers *buffers);
+ bool isStreamInUse(uint32_t id);
+ int getInProgressCount();
+ private:
+ EmulatedFakeCamera2 *mParent;
+
+ bool mRunning;
+ bool threadLoop();
+
+ bool readyForNextCapture();
+ status_t collectStatisticsMetadata(camera_metadata_t *frame);
+
+ // Inputs
+ Mutex mInputMutex; // Protects mActive, mInFlightQueue, mRequestCount
+ Condition mInputSignal;
+ Condition mReadySignal;
+
+ bool mActive;
+
+ static const int kInFlightQueueSize = 4;
+ struct InFlightQueue {
+ bool isCapture;
+ camera_metadata_t *request;
+ Buffers *buffers;
+ } *mInFlightQueue;
+
+ size_t mInFlightHead;
+ size_t mInFlightTail;
+
+ size_t mRequestCount;
+
+ // Internals
+ Mutex mInternalsMutex;
+
+ bool mIsCapture;
+ camera_metadata_t *mRequest;
+ Buffers *mBuffers;
+
+ // Jpeg completion listeners
+ void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+ void onJpegInputDone(const StreamBuffer &inputBuffer);
+ nsecs_t mJpegTimestamp;
+ };
+
+ // 3A management thread (auto-exposure, focus, white balance)
+ class ControlThread: public Thread {
+ public:
+ ControlThread(EmulatedFakeCamera2 *parent);
+ ~ControlThread();
+
+ status_t readyToRun();
+
+ status_t waitUntilRunning();
+
+ // Interpret request's control parameters and override
+ // capture settings as needed
+ status_t processRequest(camera_metadata_t *request);
+
+ status_t triggerAction(uint32_t msgType,
+ int32_t ext1, int32_t ext2);
+ private:
+ ControlThread(const ControlThread &t);
+ ControlThread& operator=(const ControlThread &t);
+
+ // Constants controlling fake 3A behavior
+ static const nsecs_t kControlCycleDelay;
+ static const nsecs_t kMinAfDuration;
+ static const nsecs_t kMaxAfDuration;
+ static const float kAfSuccessRate;
+ static const float kContinuousAfStartRate;
+
+ static const float kAeScanStartRate;
+ static const nsecs_t kMinAeDuration;
+ static const nsecs_t kMaxAeDuration;
+ static const nsecs_t kMinPrecaptureAeDuration;
+ static const nsecs_t kMaxPrecaptureAeDuration;
+
+ static const nsecs_t kNormalExposureTime;
+ static const nsecs_t kExposureJump;
+ static const nsecs_t kMinExposureTime;
+
+ EmulatedFakeCamera2 *mParent;
+
+ bool mRunning;
+ bool threadLoop();
+
+ Mutex mInputMutex; // Protects input methods
+ Condition mInputSignal;
+
+ // Trigger notifications
+ bool mStartAf;
+ bool mCancelAf;
+ bool mStartPrecapture;
+
+ // Latest state for 3A request fields
+ uint8_t mControlMode;
+
+ uint8_t mEffectMode;
+ uint8_t mSceneMode;
+
+ uint8_t mAfMode;
+ bool mAfModeChange;
+
+ uint8_t mAwbMode;
+ uint8_t mAeMode;
+
+ // Latest trigger IDs
+ int32_t mAfTriggerId;
+ int32_t mPrecaptureTriggerId;
+
+ // Current state for 3A algorithms
+ uint8_t mAfState;
+ uint8_t mAeState;
+ uint8_t mAwbState;
+ bool mAeLock;
+
+ // Current control parameters
+ nsecs_t mExposureTime;
+
+ // Private to threadLoop and its utility methods
+
+ nsecs_t mAfScanDuration;
+ nsecs_t mAeScanDuration;
+ bool mLockAfterPassiveScan;
+
+ // Utility methods for AF
+ int processAfTrigger(uint8_t afMode, uint8_t afState);
+ int maybeStartAfScan(uint8_t afMode, uint8_t afState);
+ int updateAfScan(uint8_t afMode, uint8_t afState, nsecs_t *maxSleep);
+ void updateAfState(uint8_t newState, int32_t triggerId);
+
+ // Utility methods for precapture trigger
+ int processPrecaptureTrigger(uint8_t aeMode, uint8_t aeState);
+ int maybeStartAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState);
+ int updateAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState,
+ nsecs_t *maxSleep);
+ void updateAeState(uint8_t newState, int32_t triggerId);
+ };
+
+ /****************************************************************************
+ * Static configuration information
+ ***************************************************************************/
+private:
+ static const uint32_t kMaxRawStreamCount = 1;
+ static const uint32_t kMaxProcessedStreamCount = 3;
+ static const uint32_t kMaxJpegStreamCount = 1;
+ static const uint32_t kMaxReprocessStreamCount = 2;
+ static const uint32_t kMaxBufferCount = 4;
+ static const uint32_t kAvailableFormats[];
+ static const uint32_t kAvailableRawSizes[];
+ static const uint64_t kAvailableRawMinDurations[];
+ static const uint32_t kAvailableProcessedSizesBack[];
+ static const uint32_t kAvailableProcessedSizesFront[];
+ static const uint64_t kAvailableProcessedMinDurations[];
+ static const uint32_t kAvailableJpegSizesBack[];
+ static const uint32_t kAvailableJpegSizesFront[];
+ static const uint64_t kAvailableJpegMinDurations[];
+
+ /****************************************************************************
+ * Data members.
+ ***************************************************************************/
+
+protected:
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+private:
+ bool mIsConnected;
+
+ /** Stream manipulation */
+ uint32_t mNextStreamId;
+ uint32_t mRawStreamCount;
+ uint32_t mProcessedStreamCount;
+ uint32_t mJpegStreamCount;
+
+ uint32_t mNextReprocessStreamId;
+ uint32_t mReprocessStreamCount;
+
+ KeyedVector<uint32_t, Stream> mStreams;
+ KeyedVector<uint32_t, ReprocessStream> mReprocessStreams;
+
+ /** Simulated hardware interfaces */
+ sp<Sensor> mSensor;
+ sp<JpegCompressor> mJpegCompressor;
+
+ /** Pipeline control threads */
+ sp<ConfigureThread> mConfigureThread;
+ sp<ReadoutThread> mReadoutThread;
+ sp<ControlThread> mControlThread;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H */
diff --git a/v3/EmulatedFakeCamera3.cpp b/v3/EmulatedFakeCamera3.cpp
new file mode 100755
index 0000000..83f09f8
--- a/dev/null
+++ b/v3/EmulatedFakeCamera3.cpp
@@ -0,0 +1,2347 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera3 that encapsulates
+ * functionality of an advanced fake camera.
+ */
+
+#include <inttypes.h>
+
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera3"
+#include <utils/Log.h>
+
+#include "EmulatedFakeCamera3.h"
+#include "EmulatedCameraFactory.h"
+#include <ui/Fence.h>
+#include <ui/Rect.h>
+#include <ui/GraphicBufferMapper.h>
+#include <sys/types.h>
+
+#include <cutils/properties.h>
+#include "fake-pipeline2/Sensor.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include <cmath>
+#include "../../gralloc/gralloc_priv.h"
+
+#if defined(LOG_NNDEBUG) && LOG_NNDEBUG == 0
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+namespace android {
+
+/**
+ * Constants for camera capabilities
+ */
+
+const int64_t USEC = 1000LL;
+const int64_t MSEC = USEC * 1000LL;
+const int64_t SEC = MSEC * 1000LL;
+
+const int32_t EmulatedFakeCamera3::kAvailableFormats[] = {
+ //HAL_PIXEL_FORMAT_RAW_SENSOR,
+ HAL_PIXEL_FORMAT_BLOB,
+ //HAL_PIXEL_FORMAT_RGBA_8888,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ // These are handled by YCbCr_420_888
+ HAL_PIXEL_FORMAT_YV12,
+ HAL_PIXEL_FORMAT_YCrCb_420_SP,
+ //HAL_PIXEL_FORMAT_YCbCr_420_888
+};
+
+const uint32_t EmulatedFakeCamera3::kAvailableRawSizes[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera3::kAvailableRawMinDurations[1] = {
+ (const uint64_t)Sensor::kFrameDurationRange[0]
+};
+
+const uint32_t EmulatedFakeCamera3::kAvailableProcessedSizesBack[6] = {
+ 640, 480, 320, 240,// 1280, 720
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera3::kAvailableProcessedSizesFront[4] = {
+ 640, 480, 320, 240
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera3::kAvailableProcessedMinDurations[1] = {
+ (const uint64_t)Sensor::kFrameDurationRange[0]
+};
+
+const uint32_t EmulatedFakeCamera3::kAvailableJpegSizesBack[2] = {
+ 1280,720
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera3::kAvailableJpegSizesFront[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+
+const uint64_t EmulatedFakeCamera3::kAvailableJpegMinDurations[1] = {
+ (const uint64_t)Sensor::kFrameDurationRange[0]
+};
+
+/**
+ * 3A constants
+ */
+
+// Default exposure and gain targets for different scenarios
+const nsecs_t EmulatedFakeCamera3::kNormalExposureTime = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera3::kFacePriorityExposureTime = 30 * MSEC;
+const int EmulatedFakeCamera3::kNormalSensitivity = 100;
+const int EmulatedFakeCamera3::kFacePrioritySensitivity = 400;
+const float EmulatedFakeCamera3::kExposureTrackRate = 0.1;
+const int EmulatedFakeCamera3::kPrecaptureMinFrames = 10;
+const int EmulatedFakeCamera3::kStableAeMaxFrames = 100;
+const float EmulatedFakeCamera3::kExposureWanderMin = -2;
+const float EmulatedFakeCamera3::kExposureWanderMax = 1;
+
+/**
+ * Camera device lifecycle methods
+ */
+
+EmulatedFakeCamera3::EmulatedFakeCamera3(int cameraId, bool facingBack,
+ struct hw_module_t* module) :
+ EmulatedCamera3(cameraId, module),
+ mFacingBack(facingBack) {
+ ALOGI("Constructing emulated fake camera 3 facing %s, cameraID:%d",
+ facingBack ? "back" : "front", mCameraID);
+
+ for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+ mDefaultTemplates[i] = NULL;
+ }
+
+ /**
+ * Front cameras = limited mode
+ * Back cameras = full mode
+ */
+ //TODO limited or full mode, read this from camera driver
+ //mFullMode = facingBack;
+ mFullMode = 1;
+}
+
+EmulatedFakeCamera3::~EmulatedFakeCamera3() {
+ for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+ if (mDefaultTemplates[i] != NULL) {
+ free_camera_metadata(mDefaultTemplates[i]);
+ }
+ }
+}
+
+status_t EmulatedFakeCamera3::Initialize() {
+ DBG_LOGB("mCameraID=%d,mStatus=%d,ddd\n", mCameraID, mStatus);
+ status_t res;
+
+#ifdef HAVE_VERSION_INFO
+ CAMHAL_LOGIB("\n--------------------------------\n"
+ "author:aml.sh multi-media team\n"
+ "branch name: %s\n"
+ "git version: %s \n"
+ "last changed: %s\n"
+ "build-time: %s\n"
+ "build-name: %s\n"
+ "uncommitted-file-num:%d\n"
+ "ssh user@%s, cd %s\n"
+ "hostname %s\n"
+ "--------------------------------\n",
+ CAMHAL_BRANCH_NAME,
+ CAMHAL_GIT_VERSION,
+ CAMHAL_LAST_CHANGED,
+ CAMHAL_BUILD_TIME,
+ CAMHAL_BUILD_NAME,
+ CAMHAL_GIT_UNCOMMIT_FILE_NUM,
+ CAMHAL_IP, CAMHAL_PATH, CAMHAL_HOSTNAME
+ );
+#endif
+
+
+ if (mStatus != STATUS_ERROR) {
+ ALOGE("%s: Already initialized!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ res = constructStaticInfo();
+ if (res != OK) {
+ ALOGE("%s: Unable to allocate static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return EmulatedCamera3::Initialize();
+}
+
+status_t EmulatedFakeCamera3::connectCamera(hw_device_t** device) {
+ ALOGV("%s: E", __FUNCTION__);
+ DBG_LOGA("ddd");
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ if (mStatus != STATUS_CLOSED) {
+ ALOGE("%s: Can't connect in state %d", __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ mSensor = new Sensor();
+ mSensor->setSensorListener(this);
+
+ res = mSensor->startUp(mCameraID);
+ DBG_LOGB("mSensor startUp, mCameraID=%d\n", mCameraID);
+ if (res != NO_ERROR) return res;
+
+ mReadoutThread = new ReadoutThread(this);
+ mJpegCompressor = new JpegCompressor();
+
+ res = mReadoutThread->run("EmuCam3::readoutThread");
+ if (res != NO_ERROR) return res;
+
+ // Initialize fake 3A
+
+ mControlMode = ANDROID_CONTROL_MODE_AUTO;
+ mFacePriority = false;
+ mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+ mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+ mAfTriggerId = 0;
+ mAeTriggerId = 0;
+ mAeCurrentExposureTime = kNormalExposureTime;
+ mAeCurrentSensitivity = kNormalSensitivity;
+
+ return EmulatedCamera3::connectCamera(device);
+}
+
+status_t EmulatedFakeCamera3::closeCamera() {
+ ALOGV("%s: E", __FUNCTION__);
+ status_t res;
+ {
+ Mutex::Autolock l(mLock);
+ if (mStatus == STATUS_CLOSED) return OK;
+ res = mSensor->streamOff();
+
+ res = mSensor->shutDown();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+ return res;
+ }
+ mSensor.clear();
+
+ mReadoutThread->requestExit();
+ }
+
+ mReadoutThread->join();
+
+ {
+ Mutex::Autolock l(mLock);
+ // Clear out private stream information
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end(); s++) {
+ PrivateStreamInfo *privStream =
+ static_cast<PrivateStreamInfo*>((*s)->priv);
+ delete privStream;
+ (*s)->priv = NULL;
+ }
+ mStreams.clear();
+ mReadoutThread.clear();
+ }
+
+ return EmulatedCamera3::closeCamera();
+}
+
+status_t EmulatedFakeCamera3::getCameraInfo(struct camera_info *info) {
+ char property[PROPERTY_VALUE_MAX];
+ info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+
+ if (mFacingBack) {
+ property_get("ro.camera.orientation.back", property, "270");
+ } else {
+ property_get("ro.camera.orientation.front", property, "90");
+ }
+ info->orientation = atoi(property);
+ return EmulatedCamera3::getCameraInfo(info);
+}
+
+/**
+ * Camera3 interface methods
+ */
+
+status_t EmulatedFakeCamera3::configureStreams(
+ camera3_stream_configuration *streamList) {
+ Mutex::Autolock l(mLock);
+ int width, height, pixelfmt;
+ bool isRestart = false;
+ DBG_LOGB("%s: %d streams", __FUNCTION__, streamList->num_streams);
+
+ if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) {
+ ALOGE("%s: Cannot configure streams in state %d",
+ __FUNCTION__, mStatus);
+ return NO_INIT;
+ }
+
+ /**
+ * Sanity-check input list.
+ */
+ if (streamList == NULL) {
+ ALOGE("%s: NULL stream configuration", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamList->streams == NULL) {
+ ALOGE("%s: NULL stream list", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamList->num_streams < 1) {
+ ALOGE("%s: Bad number of streams requested: %d", __FUNCTION__,
+ streamList->num_streams);
+ return BAD_VALUE;
+ }
+
+ camera3_stream_t *inputStream = NULL;
+ for (size_t i = 0; i < streamList->num_streams; i++) {
+ camera3_stream_t *newStream = streamList->streams[i];
+
+ if (newStream == NULL) {
+ ALOGE("%s: Stream index %zu was NULL",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+
+ if (newStream->max_buffers <= 0) {
+ isRestart = true;//mSensor->isNeedRestart(newStream->width, newStream->height, newStream->format);
+ DBG_LOGB("format=%x, w*h=%dx%d, stream_type=%d, max_buffers=%d, isRestart=%d\n",
+ newStream->format, newStream->width, newStream->height,
+ newStream->stream_type, newStream->max_buffers,
+ isRestart);
+ }
+ ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
+ __FUNCTION__, newStream, i, newStream->stream_type,
+ newStream->usage,
+ newStream->format);
+
+ if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
+ newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
+ if (inputStream != NULL) {
+
+ ALOGE("%s: Multiple input streams requested!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ inputStream = newStream;
+ }
+
+ bool validFormat = false;
+ for (size_t f = 0;
+ f < sizeof(kAvailableFormats)/sizeof(kAvailableFormats[0]);
+ f++) {
+ if (newStream->format == kAvailableFormats[f]) {
+ validFormat = true;
+ //HAL_PIXEL_FORMAT_YCrCb_420_SP,
+ if (HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED == newStream->format)
+ newStream->format = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+ break;
+ }
+ DBG_LOGB("stream_type=%d\n", newStream->stream_type);
+ }
+ if (!validFormat) {
+ ALOGE("%s: Unsupported stream format 0x%x requested",
+ __FUNCTION__, newStream->format);
+ return BAD_VALUE;
+ }
+ }
+ mInputStream = inputStream;
+ width = 0;
+ height = 0;
+ for (size_t i = 0; i < streamList->num_streams; i++) {
+ camera3_stream_t *newStream = streamList->streams[i];
+ DBG_LOGB("find propert width and height, format=%x, w*h=%dx%d, stream_type=%d, max_buffers=%d\n",
+ newStream->format, newStream->width, newStream->height, newStream->stream_type, newStream->max_buffers);
+ if ((HAL_PIXEL_FORMAT_BLOB != newStream->format) &&
+ (CAMERA3_STREAM_OUTPUT == newStream->stream_type)) {
+
+ if (width < newStream->width)
+ width = newStream->width;
+
+ if (height < newStream->height)
+ height = newStream->height;
+
+ pixelfmt = newStream->format;
+ }
+
+ }
+
+ //TODO modify this ugly code
+ if (isRestart) {
+ isRestart = mSensor->isNeedRestart(width, height, pixelfmt);
+ }
+
+ if (isRestart) {
+ mSensor->streamOff();
+ pixelfmt = mSensor->getOutputFormat();
+ mSensor->setOutputFormat(width, height, pixelfmt);
+ mSensor->streamOn();
+ DBG_LOGB("width=%d, height=%d, pixelfmt=%.4s\n",
+ width, height, (char*)&pixelfmt);
+ }
+
+ /**
+ * Initially mark all existing streams as not alive
+ */
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) {
+ PrivateStreamInfo *privStream =
+ static_cast<PrivateStreamInfo*>((*s)->priv);
+ privStream->alive = false;
+ }
+
+ /**
+ * Find new streams and mark still-alive ones
+ */
+ for (size_t i = 0; i < streamList->num_streams; i++) {
+ camera3_stream_t *newStream = streamList->streams[i];
+ if (newStream->priv == NULL) {
+ // New stream, construct info
+ PrivateStreamInfo *privStream = new PrivateStreamInfo();
+ privStream->alive = true;
+ privStream->registered = false;
+
+ switch (newStream->stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
+ newStream->usage = (GRALLOC_USAGE_HW_TEXTURE
+ | GRALLOC_USAGE_HW_RENDER
+ | GRALLOC_USAGE_SW_READ_RARELY
+ | GRALLOC_USAGE_PRIVATE_1
+ | GRALLOC_USAGE_SW_WRITE_NEVER
+ | GRALLOC_USAGE_HW_CAMERA_WRITE);
+ } else {
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE
+ | GRALLOC_USAGE_PRIVATE_1;
+ }
+ break;
+ case CAMERA3_STREAM_INPUT:
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
+ break;
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ |
+ GRALLOC_USAGE_HW_CAMERA_WRITE;
+ break;
+ }
+ DBG_LOGB("stream_type=%d\n", newStream->stream_type);
+ newStream->max_buffers = kMaxBufferCount;
+ newStream->priv = privStream;
+ mStreams.push_back(newStream);
+ } else {
+ // Existing stream, mark as still alive.
+ PrivateStreamInfo *privStream =
+ static_cast<PrivateStreamInfo*>(newStream->priv);
+ privStream->alive = true;
+ }
+ DBG_LOGB("%d, newStream=%p, stream_type=%d, usage=%x, priv=%p\n",
+ i, newStream, newStream->stream_type, newStream->usage, newStream->priv);
+ }
+
+ /**
+ * Reap the dead streams
+ */
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end();) {
+ PrivateStreamInfo *privStream =
+ static_cast<PrivateStreamInfo*>((*s)->priv);
+ if (!privStream->alive) {
+ DBG_LOGA("delete not alive streams");
+ (*s)->priv = NULL;
+ delete privStream;
+ s = mStreams.erase(s);
+ } else {
+ ++s;
+ }
+ }
+
+ /**
+ * Can't reuse settings across configure call
+ */
+ mPrevSettings.clear();
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) {
+ DBG_LOGB("%s: E", __FUNCTION__);
+ Mutex::Autolock l(mLock);
+
+ /**
+ * Sanity checks
+ */
+ DBG_LOGA("==========sanity checks\n");
+
+ // OK: register streams at any time during configure
+ // (but only once per stream)
+ if (mStatus != STATUS_READY && mStatus != STATUS_ACTIVE) {
+ ALOGE("%s: Cannot register buffers in state %d",
+ __FUNCTION__, mStatus);
+ return NO_INIT;
+ }
+
+ if (bufferSet == NULL) {
+ ALOGE("%s: NULL buffer set!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ StreamIterator s = mStreams.begin();
+ for (; s != mStreams.end(); ++s) {
+ if (bufferSet->stream == *s) break;
+ }
+ if (s == mStreams.end()) {
+ ALOGE("%s: Trying to register buffers for a non-configured stream!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ /**
+ * Register the buffers. This doesn't mean anything to the emulator besides
+ * marking them off as registered.
+ */
+
+ PrivateStreamInfo *privStream =
+ static_cast<PrivateStreamInfo*>((*s)->priv);
+
+#if 0
+ if (privStream->registered) {
+ ALOGE("%s: Illegal to register buffer more than once", __FUNCTION__);
+ return BAD_VALUE;
+ }
+#endif
+
+ privStream->registered = true;
+
+ return OK;
+}
+
+const camera_metadata_t* EmulatedFakeCamera3::constructDefaultRequestSettings(
+ int type) {
+ DBG_LOGB("%s: E", __FUNCTION__);
+ Mutex::Autolock l(mLock);
+
+ if (type < 0 || type >= CAMERA2_TEMPLATE_COUNT) {
+ ALOGE("%s: Unknown request settings template: %d",
+ __FUNCTION__, type);
+ return NULL;
+ }
+
+ /**
+ * Cache is not just an optimization - pointer returned has to live at
+ * least as long as the camera device instance does.
+ */
+ if (mDefaultTemplates[type] != NULL) {
+ return mDefaultTemplates[type];
+ }
+
+ CameraMetadata settings;
+
+ /** android.request */
+
+ static const int android_sync_max_latency = ANDROID_SYNC_MAX_LATENCY_UNKNOWN;
+ settings.update(ANDROID_SYNC_MAX_LATENCY, &android_sync_max_latency, 1);
+
+ static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
+ settings.update(ANDROID_REQUEST_TYPE, &requestType, 1);
+
+ static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+ settings.update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+ static const int32_t id = 0;
+ settings.update(ANDROID_REQUEST_ID, &id, 1);
+
+ static const int32_t frameCount = 0;
+ settings.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+ /** android.lens */
+
+ static const float focusDistance = 0;
+ settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+ static const float aperture = 2.8f;
+ settings.update(ANDROID_LENS_APERTURE, &aperture, 1);
+
+ static const float focalLength = 5.0f;
+ settings.update(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+ static const float filterDensity = 0;
+ settings.update(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+ static const uint8_t opticalStabilizationMode =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ &opticalStabilizationMode, 1);
+
+ // FOCUS_RANGE set only in frame
+
+ /** android.sensor */
+
+ static const int64_t exposureTime = 10 * MSEC;
+ settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+
+ static const int64_t frameDuration = 33333333L; // 1/30 s
+ settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+
+ static const int32_t sensitivity = 100;
+ settings.update(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+
+ // TIMESTAMP set only in frame
+
+ /** android.flash */
+
+ static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
+
+ static const uint8_t flashPower = 10;
+ settings.update(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+ static const int64_t firingTime = 0;
+ settings.update(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+
+ /** Processing block modes */
+ uint8_t hotPixelMode = 0;
+ uint8_t demosaicMode = 0;
+ uint8_t noiseMode = 0;
+ uint8_t shadingMode = 0;
+ uint8_t colorMode = 0;
+ uint8_t tonemapMode = 0;
+ uint8_t edgeMode = 0;
+ switch (type) {
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ // fall-through
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+ shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+ tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+ edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+ break;
+ case CAMERA2_TEMPLATE_PREVIEW:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ // fall-through
+ default:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+ shadingMode = ANDROID_SHADING_MODE_FAST;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+ tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+ edgeMode = ANDROID_EDGE_MODE_FAST;
+ break;
+ }
+ settings.update(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+ settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+ settings.update(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+ settings.update(ANDROID_SHADING_MODE, &shadingMode, 1);
+ settings.update(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+ settings.update(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+ settings.update(ANDROID_EDGE_MODE, &edgeMode, 1);
+
+ /** android.noise */
+ static const uint8_t noiseStrength = 5;
+ settings.update(ANDROID_NOISE_REDUCTION_STRENGTH, &noiseStrength, 1);
+
+ /** android.color */
+ static const float colorTransform[9] = {
+ 1.0f, 0.f, 0.f,
+ 0.f, 1.f, 0.f,
+ 0.f, 0.f, 1.f
+ };
+ settings.update(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+ /** android.tonemap */
+ static const float tonemapCurve[4] = {
+ 0.f, 0.f,
+ 1.f, 1.f
+ };
+ settings.update(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+ settings.update(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+ settings.update(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+
+ /** android.edge */
+ static const uint8_t edgeStrength = 5;
+ settings.update(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
+
+ /** android.scaler */
+ static const int32_t cropRegion[] = {
+ 0, 0, (int32_t)Sensor::kResolution[0], (int32_t)Sensor::kResolution[1],
+ };
+ settings.update(ANDROID_SCALER_CROP_REGION, cropRegion, 4);
+
+ /** android.jpeg */
+ static const uint8_t jpegQuality = 80;
+ settings.update(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+ static const int32_t thumbnailSize[2] = {
+ 640, 480
+ };
+ settings.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+ static const uint8_t thumbnailQuality = 80;
+ settings.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+ static const double gpsCoordinates[2] = {
+ 0, 0
+ };
+ settings.update(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+ static const uint8_t gpsProcessingMethod[32] = "None";
+ settings.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+ static const int64_t gpsTimestamp = 0;
+ settings.update(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+ static const int32_t jpegOrientation = 0;
+ settings.update(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+
+ /** android.stats */
+
+ static const uint8_t faceDetectMode =
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+ static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
+
+ static const uint8_t sharpnessMapMode =
+ ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
+
+ // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+ // sharpnessMap only in frames
+
+ /** android.control */
+
+ uint8_t controlIntent = 0;
+ switch (type) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ break;
+ default:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+ break;
+ }
+ settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+ static const uint8_t controlMode = ANDROID_CONTROL_MODE_OFF;
+ settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+ static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+ static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+ settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+ static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
+ settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+ static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+ static const int32_t controlRegions[5] = {
+ 0, 0, (int32_t)Sensor::kResolution[0], (int32_t)Sensor::kResolution[1],
+ 1000
+ };
+ settings.update(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+ //static const int32_t aeExpCompensation = 0;
+ //settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+ static const int32_t aeTargetFpsRange[2] = {
+ 10, 30
+ };
+ settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+ static const uint8_t aeAntibandingMode =
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+ static const uint8_t awbMode =
+ ANDROID_CONTROL_AWB_MODE_AUTO;
+ settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+ static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+ settings.update(ANDROID_CONTROL_AWB_REGIONS, controlRegions, 5);
+
+ uint8_t afMode = 0;
+ switch (type) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ default:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ }
+ settings.update(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+ settings.update(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+ static const uint8_t vstabMode =
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+ // aeState, awbState, afState only in frame
+
+ mDefaultTemplates[type] = settings.release();
+
+ return mDefaultTemplates[type];
+}
+
+status_t EmulatedFakeCamera3::processCaptureRequest(
+ camera3_capture_request *request) {
+
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ /** Validation */
+
+ if (mStatus < STATUS_READY) {
+ ALOGE("%s: Can't submit capture requests in state %d", __FUNCTION__,
+ mStatus);
+ return INVALID_OPERATION;
+ }
+
+ if (request == NULL) {
+ ALOGE("%s: NULL request!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ uint32_t frameNumber = request->frame_number;
+
+ if (request->settings == NULL && mPrevSettings.isEmpty()) {
+ ALOGE("%s: Request %d: NULL settings for first request after"
+ "configureStreams()", __FUNCTION__, frameNumber);
+ return BAD_VALUE;
+ }
+
+ if (request->input_buffer != NULL &&
+ request->input_buffer->stream != mInputStream) {
+ DBG_LOGB("%s: Request %d: Input buffer not from input stream!",
+ __FUNCTION__, frameNumber);
+ DBG_LOGB("%s: Bad stream %p, expected: %p",
+ __FUNCTION__, request->input_buffer->stream,
+ mInputStream);
+ DBG_LOGB("%s: Bad stream type %d, expected stream type %d",
+ __FUNCTION__, request->input_buffer->stream->stream_type,
+ mInputStream ? mInputStream->stream_type : -1);
+
+ return BAD_VALUE;
+ }
+
+ if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
+ ALOGE("%s: Request %d: No output buffers provided!",
+ __FUNCTION__, frameNumber);
+ return BAD_VALUE;
+ }
+
+ // Validate all buffers, starting with input buffer if it's given
+
+ ssize_t idx;
+ const camera3_stream_buffer_t *b;
+ if (request->input_buffer != NULL) {
+ idx = -1;
+ b = request->input_buffer;
+ } else {
+ idx = 0;
+ b = request->output_buffers;
+ }
+ do {
+ PrivateStreamInfo *priv =
+ static_cast<PrivateStreamInfo*>(b->stream->priv);
+ if (priv == NULL) {
+ ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (!priv->alive || !priv->registered) {
+ ALOGE("%s: Request %d: Buffer %zu: Unregistered or dead stream!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+ ALOGE("%s: Request %d: Buffer %zu: Status not OK!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->release_fence != -1) {
+ ALOGE("%s: Request %d: Buffer %zu: Has a release fence!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->buffer == NULL) {
+ ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ idx++;
+ b = &(request->output_buffers[idx]);
+ } while (idx < (ssize_t)request->num_output_buffers);
+
+ // TODO: Validate settings parameters
+
+ /**
+ * Start processing this request
+ */
+
+ mStatus = STATUS_ACTIVE;
+
+ CameraMetadata settings;
+ camera_metadata_entry e;
+
+ if (request->settings == NULL) {
+ settings.acquire(mPrevSettings);
+ uint8_t controlMode = ANDROID_CONTROL_MODE_OFF;
+ settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+ } else {
+ settings = request->settings;
+
+ uint8_t antiBanding = 0;
+ uint8_t effectMode = 0;
+ int exposureCmp = 0;
+
+ e = settings.find(ANDROID_CONTROL_AE_ANTIBANDING_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No antibanding entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ antiBanding = e.data.u8[0];
+ mSensor->setAntiBanding(antiBanding);
+
+ e = settings.find(ANDROID_CONTROL_EFFECT_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No antibanding entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ effectMode = e.data.u8[0];
+ mSensor->setEffect(effectMode);
+
+
+ e = settings.find(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION);
+ if (e.count == 0) {
+ ALOGE("%s: No exposure entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ exposureCmp = e.data.i32[0];
+ DBG_LOGB("set expsore compensaton %d\n", exposureCmp);
+ mSensor->setExposure(exposureCmp);
+ }
+
+ res = process3A(settings);
+ if (res != OK) {
+ ALOGE("%s: process3A failed!", __FUNCTION__);
+ //return res;
+ }
+
+ // TODO: Handle reprocessing
+
+ /**
+ * Get ready for sensor config
+ */
+
+ nsecs_t exposureTime;
+ nsecs_t frameDuration;
+ uint32_t sensitivity;
+ bool needJpeg = false;
+ struct ExifInfo info;
+
+ exposureTime = settings.find(ANDROID_SENSOR_EXPOSURE_TIME).data.i64[0];
+ frameDuration = settings.find(ANDROID_SENSOR_FRAME_DURATION).data.i64[0];
+ sensitivity = settings.find(ANDROID_SENSOR_SENSITIVITY).data.i32[0];
+
+ Buffers *sensorBuffers = new Buffers();
+ HalBufferVector *buffers = new HalBufferVector();
+
+ sensorBuffers->setCapacity(request->num_output_buffers);
+ buffers->setCapacity(request->num_output_buffers);
+
+ // Process all the buffers we got for output, constructing internal buffer
+ // structures for them, and lock them for writing.
+ for (size_t i = 0; i < request->num_output_buffers; i++) {
+ const camera3_stream_buffer &srcBuf = request->output_buffers[i];
+ const private_handle_t *privBuffer =
+ (const private_handle_t*)(*srcBuf.buffer);
+ StreamBuffer destBuf;
+ destBuf.streamId = kGenericStreamId;
+ destBuf.width = srcBuf.stream->width;
+ destBuf.height = srcBuf.stream->height;
+ destBuf.format = privBuffer->format; // Use real private format
+ destBuf.stride = srcBuf.stream->width; // TODO: query from gralloc
+ destBuf.buffer = srcBuf.buffer;
+
+ if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) {
+ needJpeg = true;
+ memset(&info,0,sizeof(struct ExifInfo));
+ info.orientation = settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0];
+ if ((info.orientation==90)||(info.orientation==270)) {
+ info.mainwidth = srcBuf.stream->height;
+ info.mainheight = srcBuf.stream->width;
+ } else {
+ info.mainwidth = srcBuf.stream->width;
+ info.mainheight = srcBuf.stream->height;
+ }
+ mSensor->setOutputFormat(info.mainwidth,info.mainheight,V4L2_PIX_FMT_RGB24);
+ }
+
+ // Wait on fence
+ sp<Fence> bufferAcquireFence = new Fence(srcBuf.acquire_fence);
+ res = bufferAcquireFence->wait(kFenceTimeoutMs);
+ if (res == TIMED_OUT) {
+ ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms",
+ __FUNCTION__, frameNumber, i, kFenceTimeoutMs);
+ }
+ if (res == OK) {
+ // Lock buffer for writing
+ const Rect rect(destBuf.width, destBuf.height);
+ if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ if (privBuffer->format == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
+ android_ycbcr ycbcr = android_ycbcr();
+ res = GraphicBufferMapper::get().lockYCbCr(
+ *(destBuf.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE, rect,
+ &ycbcr);
+ // This is only valid because we know that emulator's
+ // YCbCr_420_888 is really contiguous NV21 under the hood
+ destBuf.img = static_cast<uint8_t*>(ycbcr.y);
+ } else {
+ ALOGE("Unexpected private format for flexible YUV: 0x%x",
+ privBuffer->format);
+ res = INVALID_OPERATION;
+ }
+ } else {
+ res = GraphicBufferMapper::get().lock(*(destBuf.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE, rect,
+ (void**)&(destBuf.img));
+ }
+ if (res != OK) {
+ ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer",
+ __FUNCTION__, frameNumber, i);
+ }
+ }
+
+ if (res != OK) {
+ // Either waiting or locking failed. Unlock locked buffers and bail
+ // out.
+ for (size_t j = 0; j < i; j++) {
+ GraphicBufferMapper::get().unlock(
+ *(request->output_buffers[i].buffer));
+ }
+ ALOGE("line:%d, format for this usage: %d x %d, usage %x, format=%x, returned\n",
+ __LINE__, destBuf.width, destBuf.height, privBuffer->usage, privBuffer->format);
+ return NO_INIT;
+ }
+
+ sensorBuffers->push_back(destBuf);
+ buffers->push_back(srcBuf);
+ }
+
+ if (needJpeg){
+ if ((info.orientation==90)||(info.orientation==270)) {
+ info.thumbwidth = settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1];
+ info.thumbheight = settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0];
+ } else {
+ info.thumbwidth = settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0];
+ info.thumbheight = settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1];
+ }
+// info.latitude = settings.find(ANDROID_JPEG_GPS_COORDINATES).data.d[0];
+// info.longitude = settings.find(ANDROID_JPEG_GPS_COORDINATES).data.d[1];
+// info.gpsProcessingMethod = settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).data.u8;
+// info.gpsTimestamp = settings.find(ANDROID_JPEG_GPS_TIMESTAMP).data.i64[0];
+// info.focallen = settings.find(ANDROID_LENS_FOCAL_LENGTH).data.f[0];
+// info.orientation = settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0];
+ mJpegCompressor->SetExifInfo(info);
+ mSensor->setPictureRotate(info.orientation);
+ DBG_LOGB("%s::thumbnailSize_width=%d,thumbnailSize_height=%d,mainsize_width=%d,mainsize_height=%d,jpegOrientation=%d",__FUNCTION__,
+ info.thumbwidth,info.thumbheight,info.mainwidth,info.mainheight,info.orientation);
+ }
+ /**
+ * Wait for JPEG compressor to not be busy, if needed
+ */
+ if (needJpeg) {
+ bool ready = mJpegCompressor->waitForDone(kFenceTimeoutMs);
+ if (!ready) {
+ ALOGE("%s: Timeout waiting for JPEG compression to complete!",
+ __FUNCTION__);
+ return NO_INIT;
+ }
+ }
+
+ /**
+ * Wait until the in-flight queue has room
+ */
+ res = mReadoutThread->waitForReadout();
+ if (res != OK) {
+ ALOGE("%s: Timeout waiting for previous requests to complete!",
+ __FUNCTION__);
+ return NO_INIT;
+ }
+
+ /**
+ * Wait until sensor's ready. This waits for lengthy amounts of time with
+ * mLock held, but the interface spec is that no other calls may by done to
+ * the HAL by the framework while process_capture_request is happening.
+ */
+ int syncTimeoutCount = 0;
+ while(!mSensor->waitForVSync(kSyncWaitTimeout)) {
+ if (mStatus == STATUS_ERROR) {
+ return NO_INIT;
+ }
+ if (syncTimeoutCount == kMaxSyncTimeoutCount) {
+ ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms",
+ __FUNCTION__, frameNumber,
+ kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000);
+ return NO_INIT;
+ }
+ syncTimeoutCount++;
+ }
+
+ /**
+ * Configure sensor and queue up the request to the readout thread
+ */
+ mSensor->setExposureTime(exposureTime);
+ mSensor->setFrameDuration(frameDuration);
+ mSensor->setSensitivity(sensitivity);
+ mSensor->setDestinationBuffers(sensorBuffers);
+ mSensor->setFrameNumber(request->frame_number);
+
+ ReadoutThread::Request r;
+ r.frameNumber = request->frame_number;
+ r.settings = settings;
+ r.sensorBuffers = sensorBuffers;
+ r.buffers = buffers;
+
+ mReadoutThread->queueCaptureRequest(r);
+ ALOGVV("%s: Queued frame %d", __FUNCTION__, request->frame_number);
+
+ // Cache the settings for next time
+ mPrevSettings.acquire(settings);
+
+ return OK;
+}
+
+/** Debug methods */
+
+void EmulatedFakeCamera3::dump(int fd) {
+ DBG_LOGA("dump\n");
+
+}
+
+/** Tag query methods */
+const char* EmulatedFakeCamera3::getVendorSectionName(uint32_t tag) {
+ return NULL;
+}
+
+const char* EmulatedFakeCamera3::getVendorTagName(uint32_t tag) {
+ return NULL;
+}
+
+int EmulatedFakeCamera3::getVendorTagType(uint32_t tag) {
+ return 0;
+}
+
+/**
+ * Private methods
+ */
+
+status_t EmulatedFakeCamera3::constructStaticInfo() {
+
+ CameraMetadata info;
+ int32_t picSizes[10];
+ int count;
+ uint8_t maxCount = 10;
+
+ sp<Sensor> s = new Sensor();
+ s->startUp(mCameraID);
+ // android.lens
+
+ // 5 cm min focus distance for back camera, infinity (fixed focus) for front
+ // TODO read this ioctl from camera driver
+ DBG_LOGB("mCameraID=%d,mCameraInfo=%p\n", mCameraID, mCameraInfo);
+ const float minFocusDistance = mFacingBack ? 1.0/0.05 : 0.0;
+ info.update(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ &minFocusDistance, 1);
+
+ // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
+ const float hyperFocalDistance = mFacingBack ? 1.0/5.0 : 0.0;
+ info.update(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ &minFocusDistance, 1);
+
+ static const float focalLength = 3.30f; // mm
+ info.update(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ &focalLength, 1);
+ static const float aperture = 2.8f;
+ info.update(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ &aperture, 1);
+ static const float filterDensity = 0;
+ info.update(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+ &filterDensity, 1);
+ static const uint8_t availableOpticalStabilization =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ info.update(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ &availableOpticalStabilization, 1);
+
+ static const int32_t lensShadingMapSize[] = {1, 1};
+ info.update(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+ sizeof(lensShadingMapSize)/sizeof(int32_t));
+
+ uint8_t lensFacing = mFacingBack ?
+ ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+ info.update(ANDROID_LENS_FACING, &lensFacing, 1);
+
+ float lensPosition[3];
+ if (mFacingBack) {
+ // Back-facing camera is center-top on device
+ lensPosition[0] = 0;
+ lensPosition[1] = 20;
+ lensPosition[2] = -5;
+ } else {
+ // Front-facing camera is center-right on device
+ lensPosition[0] = 20;
+ lensPosition[1] = 20;
+ lensPosition[2] = 0;
+ }
+ info.update(ANDROID_LENS_POSITION, lensPosition, sizeof(lensPosition)/
+ sizeof(float));
+
+ // android.sensor
+
+ info.update(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ Sensor::kExposureTimeRange, 2);
+
+ info.update(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ &Sensor::kFrameDurationRange[1], 1);
+
+ info.update(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ Sensor::kSensitivityRange,
+ sizeof(Sensor::kSensitivityRange)
+ /sizeof(int32_t));
+
+ info.update(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ &Sensor::kColorFilterArrangement, 1);
+
+ static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm
+ info.update(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ sensorPhysicalSize, 2);
+
+ info.update(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ (int32_t*)Sensor::kResolution, 2);
+
+ int32_t full_size[] = {640, 480, 1600, 1200};
+ info.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ (int32_t*)full_size, 4);
+ //(int32_t*)Sensor::kResolution, 2);
+
+ info.update(ANDROID_SENSOR_INFO_WHITE_LEVEL,
+ (int32_t*)&Sensor::kMaxRawValue, 1);
+
+ static const int32_t blackLevelPattern[4] = {
+ (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel,
+ (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel
+ };
+ info.update(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
+ blackLevelPattern, sizeof(blackLevelPattern)/sizeof(int32_t));
+
+ static const int32_t orientation = 0; // unrotated (0 degrees)
+ info.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+
+ //TODO: sensor color calibration fields
+
+ // android.flash
+ static const uint8_t flashAvailable = 0;
+ info.update(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+
+ static const int64_t flashChargeDuration = 0;
+ info.update(ANDROID_FLASH_INFO_CHARGE_DURATION, &flashChargeDuration, 1);
+
+ // android.tonemap
+
+ static const int32_t tonemapCurvePoints = 128;
+ info.update(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+
+ // android.scaler
+
+ info.update(ANDROID_SCALER_AVAILABLE_FORMATS,
+ kAvailableFormats,
+ sizeof(kAvailableFormats)/sizeof(int32_t));
+ DBG_LOGB("jiyu.yang, sizeof(kAvailableFormats)/sizeof(int32_t))=%d\n",
+ sizeof(kAvailableFormats)/sizeof(int32_t));
+
+#if 0
+ info.update(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+ (int32_t*)kAvailableRawSizes,
+ sizeof(kAvailableRawSizes)/sizeof(uint32_t));
+#endif
+
+ info.update(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+ (int64_t*)kAvailableRawMinDurations,
+ sizeof(kAvailableRawMinDurations)/sizeof(uint64_t));
+
+ //for version 3.2 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+ if (mFacingBack) {
+ int32_t picSizes[10];
+ count = sizeof(picSizes)/sizeof(picSizes[0]);
+ DBG_LOGB("count=%d\n", count);
+ count = s->getPictureSizes(picSizes, count, true);
+ info.update(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ picSizes, count);
+ for (;count >0; count -=2){
+ DBG_LOGB("preview size:%dx%d\n", picSizes[count-2], picSizes[count-1]);
+ }
+ } else {
+ info.update(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ (int32_t*)kAvailableProcessedSizesFront,
+ sizeof(kAvailableProcessedSizesFront)/sizeof(uint32_t));
+ }
+
+ info.update(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
+ (int64_t*)kAvailableProcessedMinDurations,
+ sizeof(kAvailableProcessedMinDurations)/sizeof(uint64_t));
+
+#if 0
+ if (mFacingBack) {
+ info.update(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ (int32_t*)kAvailableJpegSizesBack,
+ sizeof(kAvailableJpegSizesBack)/sizeof(uint32_t));
+ } else {
+ info.update(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ (int32_t*)kAvailableJpegSizesFront,
+ sizeof(kAvailableJpegSizesFront)/sizeof(uint32_t));
+ }
+#endif
+#if 1
+ count = sizeof(picSizes)/sizeof(picSizes[0]);
+ DBG_LOGB("count=%d\n", count);
+ count = s->getPictureSizes(picSizes, count, false);
+ info.update(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, picSizes, count);
+ for (;count >0; count -=2){
+ DBG_LOGB("size%dx%d\n", picSizes[count-2], picSizes[count-1]);
+ }
+#endif
+
+
+ info.update(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
+ (int64_t*)kAvailableJpegMinDurations,
+ sizeof(kAvailableJpegMinDurations)/sizeof(uint64_t));
+
+ static const float maxZoom = 10;
+ info.update(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ &maxZoom, 1);
+
+ // android.jpeg
+
+ static const int32_t jpegThumbnailSizes[] = {
+ 0, 0,
+ 160, 120,
+ 320, 240
+ };
+ info.update(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ jpegThumbnailSizes, sizeof(jpegThumbnailSizes)/sizeof(int32_t));
+
+ static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+ info.update(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+
+ // android.stats
+
+ static const uint8_t availableFaceDetectModes[] = {
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
+ };
+
+ info.update(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ availableFaceDetectModes,
+ sizeof(availableFaceDetectModes));
+
+ static const int32_t maxFaceCount = 8;
+ info.update(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ &maxFaceCount, 1);
+
+ static const int32_t histogramSize = 64;
+ info.update(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+ &histogramSize, 1);
+
+ static const int32_t maxHistogramCount = 1000;
+ info.update(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+ &maxHistogramCount, 1);
+
+ static const int32_t sharpnessMapSize[2] = {64, 64};
+ info.update(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+ sharpnessMapSize, sizeof(sharpnessMapSize)/sizeof(int32_t));
+
+ static const int32_t maxSharpnessMapValue = 1000;
+ info.update(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+ &maxSharpnessMapValue, 1);
+
+ // android.control
+
+ static const uint8_t availableSceneModes[] = {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED
+ };
+ info.update(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ availableSceneModes, sizeof(availableSceneModes));
+
+ static const uint8_t availableEffects[] = {
+ ANDROID_CONTROL_EFFECT_MODE_OFF
+ };
+ info.update(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ availableEffects, sizeof(availableEffects));
+
+ static const int32_t max3aRegions[] = {/*AE*/ 0,/*AWB*/ 0,/*AF*/ 0};
+ info.update(ANDROID_CONTROL_MAX_REGIONS,
+ max3aRegions, sizeof(max3aRegions)/sizeof(max3aRegions[0]));
+
+ static const uint8_t availableAeModes[] = {
+ ANDROID_CONTROL_AE_MODE_OFF,
+ ANDROID_CONTROL_AE_MODE_ON
+ };
+ info.update(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ availableAeModes, sizeof(availableAeModes));
+
+ //static const camera_metadata_rational exposureCompensationStep = {
+ // 1, 3
+ //};
+ //info.update(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ // &exposureCompensationStep, 1);
+
+ //int32_t exposureCompensationRange[] = {-9, 9};
+ //info.update(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ // exposureCompensationRange,
+ // sizeof(exposureCompensationRange)/sizeof(int32_t));
+
+ static const int32_t availableTargetFpsRanges[] = {
+ 5, 15,15, 30
+ };
+ info.update(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ availableTargetFpsRanges,
+ sizeof(availableTargetFpsRanges)/sizeof(int32_t));
+
+ static const uint8_t availableAntibandingModes[] = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
+ };
+ info.update(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ availableAntibandingModes, sizeof(availableAntibandingModes));
+
+ uint8_t awbModes[maxCount];
+ count = s->getAWB(awbModes, maxCount);
+ if (count < 0) {
+ static const uint8_t availableAwbModes[] = {
+ ANDROID_CONTROL_AWB_MODE_OFF,
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+ ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+ ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+ ANDROID_CONTROL_AWB_MODE_SHADE
+ };
+ info.update(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ availableAwbModes, sizeof(availableAwbModes));
+ } else {
+ DBG_LOGB("getAWB %d ",count);
+ info.update(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ awbModes, count);
+ }
+
+ static const uint8_t availableAfModesFront[] = {
+ ANDROID_CONTROL_AF_MODE_OFF
+ };
+
+ if (mFacingBack) {
+ uint8_t afMode[maxCount];
+ count = s->getAutoFocus(afMode, maxCount);
+ if (count < 0) {
+ static const uint8_t availableAfModesBack[] = {
+ ANDROID_CONTROL_AF_MODE_OFF,
+ //ANDROID_CONTROL_AF_MODE_AUTO,
+ //ANDROID_CONTROL_AF_MODE_MACRO,
+ //ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+ //ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
+ };
+
+ info.update(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesBack, sizeof(availableAfModesBack));
+ } else {
+ info.update(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ afMode, count);
+ }
+ } else {
+ info.update(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesFront, sizeof(availableAfModesFront));
+ }
+
+ uint8_t antiBanding[maxCount];
+ count = s->getAntiBanding(antiBanding, maxCount);
+ if (count < 0) {
+ static const uint8_t availableAntiBanding[] = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ };
+ info.update(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ availableAntibandingModes, sizeof(availableAntibandingModes));
+ } else {
+ info.update(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ antiBanding, count);
+ }
+
+ camera_metadata_rational step;
+ int maxExp, minExp, def, ret;
+ ret = s->getExposure(&maxExp, &minExp, &def, &step);
+ if (ret < 0) {
+ static const int32_t aeExpCompensation = 0;
+ info.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+ static const camera_metadata_rational exposureCompensationStep = {
+ 1, 3
+ };
+ info.update(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ &exposureCompensationStep, 1);
+
+ int32_t exposureCompensationRange[] = {0, 0};
+ info.update(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ exposureCompensationRange,
+ sizeof(exposureCompensationRange)/sizeof(int32_t));
+ } else {
+ DBG_LOGB("exposure compensation support:(%d, %d)\n", minExp, maxExp);
+ int32_t exposureCompensationRange[] = {minExp, maxExp};
+ info.update(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ exposureCompensationRange,
+ sizeof(exposureCompensationRange)/sizeof(int32_t));
+ info.update(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ &step, 1);
+ info.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &def, 1);
+ }
+
+
+ static const uint8_t availableVstabModes[] = {
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF
+ };
+ info.update(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ availableVstabModes, sizeof(availableVstabModes));
+
+ // android.info
+ const uint8_t supportedHardwareLevel =
+ mFullMode ? ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL :
+ ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ info.update(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+ &supportedHardwareLevel,
+ /*count*/1);
+#if 0
+
+ CameraMetadata mDeviceInfo = info.static_camera_characteristics;
+ camera_metadata_ro_entry availableJpegSizes =
+ mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+ if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) {
+ DBG_LOGA("errno entry\n");
+ }
+
+ // Get max jpeg size (area-wise).
+ for (size_t i = 0; i < availableJpegSizes.count; i += 2) {
+ DBG_LOGB("size=%dx%d\n",
+ availableJpegSizes.data.i32[i], availableJpegSizes.data.i32[i + 1]);
+ }
+
+#endif
+
+ mCameraInfo = info.release();
+ DBG_LOGB("mCameraID=%d,mCameraInfo=%p\n", mCameraID, mCameraInfo);
+
+
+ s->shutDown();
+ s.clear();
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::process3A(CameraMetadata &settings) {
+ /**
+ * Extract top-level 3A controls
+ */
+ status_t res;
+
+ bool facePriority = false;
+
+ camera_metadata_entry e;
+
+ e = settings.find(ANDROID_CONTROL_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No control mode entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ uint8_t controlMode = e.data.u8[0];
+
+ e = settings.find(ANDROID_CONTROL_SCENE_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No scene mode entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ uint8_t sceneMode = e.data.u8[0];
+
+ if (controlMode == ANDROID_CONTROL_MODE_OFF) {
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+ update3A(settings);
+ return OK;
+ } else if (controlMode == ANDROID_CONTROL_MODE_USE_SCENE_MODE) {
+ switch(sceneMode) {
+ case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
+ mFacePriority = true;
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support scene mode %d",
+ __FUNCTION__, sceneMode);
+ return BAD_VALUE;
+ }
+ } else {
+ mFacePriority = false;
+ }
+
+ // controlMode == AUTO or sceneMode = FACE_PRIORITY
+ // Process individual 3A controls
+
+ res = doFakeAE(settings);
+ if (res != OK) return res;
+
+ res = doFakeAF(settings);
+ if (res != OK) return res;
+
+ res = doFakeAWB(settings);
+ if (res != OK) return res;
+
+ update3A(settings);
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::doFakeAE(CameraMetadata &settings) {
+ camera_metadata_entry e;
+
+ e = settings.find(ANDROID_CONTROL_AE_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No AE mode entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ uint8_t aeMode = e.data.u8[0];
+
+ switch (aeMode) {
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ // AE is OFF
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ return OK;
+ case ANDROID_CONTROL_AE_MODE_ON:
+ // OK for AUTO modes
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support AE mode %d",
+ __FUNCTION__, aeMode);
+ return BAD_VALUE;
+ }
+
+ e = settings.find(ANDROID_CONTROL_AE_LOCK);
+ if (e.count == 0) {
+ ALOGE("%s: No AE lock entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ bool aeLocked = (e.data.u8[0] == ANDROID_CONTROL_AE_LOCK_ON);
+
+ e = settings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
+ bool precaptureTrigger = false;
+ if (e.count != 0) {
+ precaptureTrigger =
+ (e.data.u8[0] == ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START);
+ }
+
+ if (precaptureTrigger) {
+ ALOGV("%s: Pre capture trigger = %d", __FUNCTION__, precaptureTrigger);
+ } else if (e.count > 0) {
+ ALOGV("%s: Pre capture trigger was present? %zu",
+ __FUNCTION__,
+ e.count);
+ }
+
+ // If we have an aePrecaptureTrigger, aePrecaptureId should be set too
+ if (e.count != 0) {
+ e = settings.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
+
+ if (e.count == 0) {
+ ALOGE("%s: When android.control.aePrecaptureTrigger is set "
+ " in the request, aePrecaptureId needs to be set as well",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ mAeTriggerId = e.data.i32[0];
+ }
+
+ if (precaptureTrigger || mAeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ // Run precapture sequence
+ if (mAeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ mAeCounter = 0;
+ }
+
+ if (mFacePriority) {
+ mAeTargetExposureTime = kFacePriorityExposureTime;
+ } else {
+ mAeTargetExposureTime = kNormalExposureTime;
+ }
+
+ if (mAeCounter > kPrecaptureMinFrames &&
+ (mAeTargetExposureTime - mAeCurrentExposureTime) <
+ mAeTargetExposureTime / 10) {
+ // Done with precapture
+ mAeCounter = 0;
+ mAeState = aeLocked ? ANDROID_CONTROL_AE_STATE_LOCKED :
+ ANDROID_CONTROL_AE_STATE_CONVERGED;
+ } else {
+ // Converge some more
+ mAeCurrentExposureTime +=
+ (mAeTargetExposureTime - mAeCurrentExposureTime) *
+ kExposureTrackRate;
+ mAeCounter++;
+ mAeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
+ }
+
+ } else if (!aeLocked) {
+ // Run standard occasional AE scan
+ switch (mAeState) {
+ case ANDROID_CONTROL_AE_STATE_CONVERGED:
+ case ANDROID_CONTROL_AE_STATE_INACTIVE:
+ mAeCounter++;
+ if (mAeCounter > kStableAeMaxFrames) {
+ mAeTargetExposureTime =
+ mFacePriority ? kFacePriorityExposureTime :
+ kNormalExposureTime;
+ float exposureStep = ((double)rand() / RAND_MAX) *
+ (kExposureWanderMax - kExposureWanderMin) +
+ kExposureWanderMin;
+ mAeTargetExposureTime *= std::pow(2, exposureStep);
+ mAeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
+ }
+ break;
+ case ANDROID_CONTROL_AE_STATE_SEARCHING:
+ mAeCurrentExposureTime +=
+ (mAeTargetExposureTime - mAeCurrentExposureTime) *
+ kExposureTrackRate;
+ if (abs(mAeTargetExposureTime - mAeCurrentExposureTime) <
+ mAeTargetExposureTime / 10) {
+ // Close enough
+ mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
+ mAeCounter = 0;
+ }
+ break;
+ case ANDROID_CONTROL_AE_STATE_LOCKED:
+ mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
+ mAeCounter = 0;
+ break;
+ default:
+ ALOGE("%s: Emulator in unexpected AE state %d",
+ __FUNCTION__, mAeState);
+ return INVALID_OPERATION;
+ }
+ } else {
+ // AE is locked
+ mAeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+ }
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::doFakeAF(CameraMetadata &settings) {
+ camera_metadata_entry e;
+
+ e = settings.find(ANDROID_CONTROL_AF_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No AF mode entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ uint8_t afMode = e.data.u8[0];
+
+ e = settings.find(ANDROID_CONTROL_AF_TRIGGER);
+ typedef camera_metadata_enum_android_control_af_trigger af_trigger_t;
+ af_trigger_t afTrigger;
+ // If we have an afTrigger, afTriggerId should be set too
+ if (e.count != 0) {
+ afTrigger = static_cast<af_trigger_t>(e.data.u8[0]);
+
+ e = settings.find(ANDROID_CONTROL_AF_TRIGGER_ID);
+
+ if (e.count == 0) {
+ ALOGE("%s: When android.control.afTrigger is set "
+ " in the request, afTriggerId needs to be set as well",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ mAfTriggerId = e.data.i32[0];
+
+ ALOGV("%s: AF trigger set to 0x%x", __FUNCTION__, afTrigger);
+ ALOGV("%s: AF trigger ID set to 0x%x", __FUNCTION__, mAfTriggerId);
+ ALOGV("%s: AF mode is 0x%x", __FUNCTION__, afMode);
+ } else {
+ afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ }
+
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ return OK;
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ if (!mFacingBack) {
+ ALOGE("%s: Front camera doesn't support AF mode %d",
+ __FUNCTION__, afMode);
+ return BAD_VALUE;
+ }
+ mSensor->setAutoFocuas(afMode);
+ // OK, handle transitions lower on
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support AF mode %d",
+ __FUNCTION__, afMode);
+ return BAD_VALUE;
+ }
+
+ e = settings.find(ANDROID_CONTROL_AF_REGIONS);
+ if (e.count == 0) {
+ ALOGE("%s:Get ANDROID_CONTROL_AF_REGIONS failed\n", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ int32_t x0 = e.data.i32[0];
+ int32_t y0 = e.data.i32[1];
+ int32_t x1 = e.data.i32[2];
+ int32_t y1 = e.data.i32[3];
+ mSensor->setFocuasArea(x0, y0, x1, y1);
+ DBG_LOGB(" x0:%d, y0:%d,x1:%d,y1:%d,\n", x0, y0, x1, y1);
+
+
+ bool afModeChanged = mAfMode != afMode;
+ mAfMode = afMode;
+
+ /**
+ * Simulate AF triggers. Transition at most 1 state per frame.
+ * - Focusing always succeeds (goes into locked, or PASSIVE_SCAN).
+ */
+
+ bool afTriggerStart = false;
+ bool afTriggerCancel = false;
+ switch (afTrigger) {
+ case ANDROID_CONTROL_AF_TRIGGER_IDLE:
+ break;
+ case ANDROID_CONTROL_AF_TRIGGER_START:
+ afTriggerStart = true;
+ break;
+ case ANDROID_CONTROL_AF_TRIGGER_CANCEL:
+ afTriggerCancel = true;
+ // Cancel trigger always transitions into INACTIVE
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+
+ ALOGV("%s: AF State transition to STATE_INACTIVE", __FUNCTION__);
+
+ // Stay in 'inactive' until at least next frame
+ return OK;
+ default:
+ ALOGE("%s: Unknown af trigger value %d", __FUNCTION__, afTrigger);
+ return BAD_VALUE;
+ }
+
+ // If we get down here, we're either in an autofocus mode
+ // or in a continuous focus mode (and no other modes)
+
+ int oldAfState = mAfState;
+ switch (mAfState) {
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ if (afTriggerStart) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ break;
+ }
+ } else {
+ // At least one frame stays in INACTIVE
+ if (!afModeChanged) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
+ break;
+ }
+ }
+ }
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+ /**
+ * When the AF trigger is activated, the algorithm should finish
+ * its PASSIVE_SCAN if active, and then transition into AF_FOCUSED
+ * or AF_NOT_FOCUSED as appropriate
+ */
+ if (afTriggerStart) {
+ // Randomly transition to focused or not focused
+ if (rand() % 3) {
+ mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ } else {
+ mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ }
+ }
+ /**
+ * When the AF trigger is not involved, the AF algorithm should
+ * start in INACTIVE state, and then transition into PASSIVE_SCAN
+ * and PASSIVE_FOCUSED states
+ */
+ else if (!afTriggerCancel) {
+ // Randomly transition to passive focus
+ if (rand() % 3 == 0) {
+ mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+ }
+ }
+
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ if (afTriggerStart) {
+ // Randomly transition to focused or not focused
+ if (rand() % 3) {
+ mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ } else {
+ mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ }
+ }
+ // TODO: initiate passive scan (PASSIVE_SCAN)
+ break;
+ case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+ // Simulate AF sweep completing instantaneously
+
+ // Randomly transition to focused or not focused
+ if (rand() % 3) {
+ mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ } else {
+ mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ }
+ break;
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ if (afTriggerStart) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ // continuous autofocus => trigger start has no effect
+ break;
+ }
+ }
+ break;
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ if (afTriggerStart) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ // fall-through
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ // continuous autofocus => trigger start has no effect
+ break;
+ }
+ }
+ break;
+ default:
+ ALOGE("%s: Bad af state %d", __FUNCTION__, mAfState);
+ }
+
+ {
+ char afStateString[100] = {0,};
+ camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE,
+ oldAfState,
+ afStateString,
+ sizeof(afStateString));
+
+ char afNewStateString[100] = {0,};
+ camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE,
+ mAfState,
+ afNewStateString,
+ sizeof(afNewStateString));
+ ALOGVV("%s: AF state transitioned from %s to %s",
+ __FUNCTION__, afStateString, afNewStateString);
+ }
+
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::doFakeAWB(CameraMetadata &settings) {
+ camera_metadata_entry e;
+
+ e = settings.find(ANDROID_CONTROL_AWB_MODE);
+ if (e.count == 0) {
+ ALOGE("%s: No AWB mode entry!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ uint8_t awbMode = e.data.u8[0];
+ //DBG_LOGB(" awbMode%d\n", awbMode);
+
+ // TODO: Add white balance simulation
+
+ switch (awbMode) {
+ case ANDROID_CONTROL_AWB_MODE_OFF:
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+ return OK;
+ case ANDROID_CONTROL_AWB_MODE_AUTO:
+ case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
+ case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
+ case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
+ case ANDROID_CONTROL_AWB_MODE_SHADE:
+ return mSensor->setAWB(awbMode);
+ // OK
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support AWB mode %d",
+ __FUNCTION__, awbMode);
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
+
+void EmulatedFakeCamera3::update3A(CameraMetadata &settings) {
+ if (mAeState != ANDROID_CONTROL_AE_STATE_INACTIVE) {
+ settings.update(ANDROID_SENSOR_EXPOSURE_TIME,
+ &mAeCurrentExposureTime, 1);
+ settings.update(ANDROID_SENSOR_SENSITIVITY,
+ &mAeCurrentSensitivity, 1);
+ }
+
+ settings.update(ANDROID_CONTROL_AE_STATE,
+ &mAeState, 1);
+ settings.update(ANDROID_CONTROL_AF_STATE,
+ &mAfState, 1);
+ settings.update(ANDROID_CONTROL_AWB_STATE,
+ &mAwbState, 1);
+ /**
+ * TODO: Trigger IDs need a think-through
+ */
+ settings.update(ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &mAeTriggerId, 1);
+ settings.update(ANDROID_CONTROL_AF_TRIGGER_ID,
+ &mAfTriggerId, 1);
+}
+
+void EmulatedFakeCamera3::signalReadoutIdle() {
+ Mutex::Autolock l(mLock);
+ // Need to chek isIdle again because waiting on mLock may have allowed
+ // something to be placed in the in-flight queue.
+ if (mStatus == STATUS_ACTIVE && mReadoutThread->isIdle()) {
+ ALOGV("Now idle");
+ mStatus = STATUS_READY;
+ }
+}
+
+void EmulatedFakeCamera3::onSensorEvent(uint32_t frameNumber, Event e,
+ nsecs_t timestamp) {
+ switch(e) {
+ case Sensor::SensorListener::EXPOSURE_START: {
+ ALOGVV("%s: Frame %d: Sensor started exposure at %lld",
+ __FUNCTION__, frameNumber, timestamp);
+ // Trigger shutter notify to framework
+ camera3_notify_msg_t msg;
+ msg.type = CAMERA3_MSG_SHUTTER;
+ msg.message.shutter.frame_number = frameNumber;
+ msg.message.shutter.timestamp = timestamp;
+ sendNotify(&msg);
+ break;
+ }
+ default:
+ ALOGW("%s: Unexpected sensor event %d at %" PRId64, __FUNCTION__,
+ e, timestamp);
+ break;
+ }
+}
+
+EmulatedFakeCamera3::ReadoutThread::ReadoutThread(EmulatedFakeCamera3 *parent) :
+ mParent(parent), mJpegWaiting(false) {
+}
+
+EmulatedFakeCamera3::ReadoutThread::~ReadoutThread() {
+ for (List<Request>::iterator i = mInFlightQueue.begin();
+ i != mInFlightQueue.end(); i++) {
+ delete i->buffers;
+ delete i->sensorBuffers;
+ }
+}
+
+void EmulatedFakeCamera3::ReadoutThread::queueCaptureRequest(const Request &r) {
+ Mutex::Autolock l(mLock);
+
+ mInFlightQueue.push_back(r);
+ mInFlightSignal.signal();
+}
+
+bool EmulatedFakeCamera3::ReadoutThread::isIdle() {
+ Mutex::Autolock l(mLock);
+ return mInFlightQueue.empty() && !mThreadActive;
+}
+
+status_t EmulatedFakeCamera3::ReadoutThread::waitForReadout() {
+ status_t res;
+ Mutex::Autolock l(mLock);
+ int loopCount = 0;
+ while (mInFlightQueue.size() >= kMaxQueueSize) {
+ res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+ if (res != OK && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for in-flight queue to shrink",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if (loopCount == kMaxWaitLoops) {
+ ALOGE("%s: Timed out waiting for in-flight queue to shrink",
+ __FUNCTION__);
+ return TIMED_OUT;
+ }
+ loopCount++;
+ }
+ return OK;
+}
+
+bool EmulatedFakeCamera3::ReadoutThread::threadLoop() {
+ status_t res;
+
+ ALOGVV("%s: ReadoutThread waiting for request", __FUNCTION__);
+
+ // First wait for a request from the in-flight queue
+
+ if (mCurrentRequest.settings.isEmpty()) {
+ Mutex::Autolock l(mLock);
+ if (mInFlightQueue.empty()) {
+ res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+ if (res == TIMED_OUT) {
+ ALOGVV("%s: ReadoutThread: Timed out waiting for request",
+ __FUNCTION__);
+ return true;
+ } else if (res != NO_ERROR) {
+ ALOGE("%s: Error waiting for capture requests: %d",
+ __FUNCTION__, res);
+ return false;
+ }
+ }
+ mCurrentRequest.frameNumber = mInFlightQueue.begin()->frameNumber;
+ mCurrentRequest.settings.acquire(mInFlightQueue.begin()->settings);
+ mCurrentRequest.buffers = mInFlightQueue.begin()->buffers;
+ mCurrentRequest.sensorBuffers = mInFlightQueue.begin()->sensorBuffers;
+ mInFlightQueue.erase(mInFlightQueue.begin());
+ mInFlightSignal.signal();
+ mThreadActive = true;
+ ALOGVV("%s: Beginning readout of frame %d", __FUNCTION__,
+ mCurrentRequest.frameNumber);
+ }
+
+ // Then wait for it to be delivered from the sensor
+ ALOGVV("%s: ReadoutThread: Wait for frame to be delivered from sensor",
+ __FUNCTION__);
+
+ nsecs_t captureTime;
+ bool gotFrame =
+ mParent->mSensor->waitForNewFrame(kWaitPerLoop, &captureTime);
+ if (!gotFrame) {
+ ALOGVV("%s: ReadoutThread: Timed out waiting for sensor frame",
+ __FUNCTION__);
+ return true;
+ }
+
+ ALOGVV("Sensor done with readout for frame %d, captured at %lld ",
+ mCurrentRequest.frameNumber, captureTime);
+
+ // Check if we need to JPEG encode a buffer, and send it for async
+ // compression if so. Otherwise prepare the buffer for return.
+ bool needJpeg = false;
+ HalBufferVector::iterator buf = mCurrentRequest.buffers->begin();
+ while(buf != mCurrentRequest.buffers->end()) {
+ bool goodBuffer = true;
+ if ( buf->stream->format ==
+ HAL_PIXEL_FORMAT_BLOB) {
+ Mutex::Autolock jl(mJpegLock);
+ if (mJpegWaiting) {
+ // This shouldn't happen, because processCaptureRequest should
+ // be stalling until JPEG compressor is free.
+ ALOGE("%s: Already processing a JPEG!", __FUNCTION__);
+ goodBuffer = false;
+ }
+ if (goodBuffer) {
+ // Compressor takes ownership of sensorBuffers here
+ res = mParent->mJpegCompressor->start(mCurrentRequest.sensorBuffers,
+ this);
+ goodBuffer = (res == OK);
+ }
+ if (goodBuffer) {
+ needJpeg = true;
+
+ mJpegHalBuffer = *buf;
+ mJpegFrameNumber = mCurrentRequest.frameNumber;
+ mJpegWaiting = true;
+
+ mCurrentRequest.sensorBuffers = NULL;
+ buf = mCurrentRequest.buffers->erase(buf);
+
+ continue;
+ }
+ ALOGE("%s: Error compressing output buffer: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ // fallthrough for cleanup
+ }
+ GraphicBufferMapper::get().unlock(*(buf->buffer));
+
+ buf->status = goodBuffer ? CAMERA3_BUFFER_STATUS_OK :
+ CAMERA3_BUFFER_STATUS_ERROR;
+ buf->acquire_fence = -1;
+ buf->release_fence = -1;
+
+ ++buf;
+ } // end while
+
+ // Construct result for all completed buffers and results
+
+ camera3_capture_result result;
+
+ mCurrentRequest.settings.update(ANDROID_SENSOR_TIMESTAMP,
+ &captureTime, 1);
+
+ memset(&result, 0, sizeof(result));
+ result.frame_number = mCurrentRequest.frameNumber;
+ result.result = mCurrentRequest.settings.getAndLock();
+ result.num_output_buffers = mCurrentRequest.buffers->size();
+ result.output_buffers = mCurrentRequest.buffers->array();
+
+ // Go idle if queue is empty, before sending result
+ bool signalIdle = false;
+ {
+ Mutex::Autolock l(mLock);
+ if (mInFlightQueue.empty()) {
+ mThreadActive = false;
+ signalIdle = true;
+ }
+ }
+ if (signalIdle) mParent->signalReadoutIdle();
+
+ // Send it off to the framework
+ ALOGVV("%s: ReadoutThread: Send result to framework",
+ __FUNCTION__);
+ mParent->sendCaptureResult(&result);
+
+ // Clean up
+ mCurrentRequest.settings.unlock(result.result);
+
+ delete mCurrentRequest.buffers;
+ mCurrentRequest.buffers = NULL;
+ if (!needJpeg) {
+ delete mCurrentRequest.sensorBuffers;
+ mCurrentRequest.sensorBuffers = NULL;
+ }
+ mCurrentRequest.settings.clear();
+
+ return true;
+}
+
+void EmulatedFakeCamera3::ReadoutThread::onJpegDone(
+ const StreamBuffer &jpegBuffer, bool success) {
+ Mutex::Autolock jl(mJpegLock);
+
+ GraphicBufferMapper::get().unlock(*(jpegBuffer.buffer));
+
+ mJpegHalBuffer.status = success ?
+ CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR;
+ mJpegHalBuffer.acquire_fence = -1;
+ mJpegHalBuffer.release_fence = -1;
+ mJpegWaiting = false;
+
+ camera3_capture_result result;
+ result.frame_number = mJpegFrameNumber;
+ result.result = NULL;
+ result.num_output_buffers = 1;
+ result.output_buffers = &mJpegHalBuffer;
+
+ if (!success) {
+ ALOGE("%s: Compression failure, returning error state buffer to"
+ " framework", __FUNCTION__);
+ } else {
+ DBG_LOGB("%s: Compression complete, returning buffer to framework",
+ __FUNCTION__);
+ }
+
+ mParent->sendCaptureResult(&result);
+}
+
+void EmulatedFakeCamera3::ReadoutThread::onJpegInputDone(
+ const StreamBuffer &inputBuffer) {
+ // Should never get here, since the input buffer has to be returned
+ // by end of processCaptureRequest
+ ALOGE("%s: Unexpected input buffer from JPEG compressor!", __FUNCTION__);
+}
+
+
+}; // namespace android
diff --git a/v3/EmulatedFakeCamera3.h b/v3/EmulatedFakeCamera3.h
new file mode 100644
index 0000000..0889813
--- a/dev/null
+++ b/v3/EmulatedFakeCamera3.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA3_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA3_H
+
+/**
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality of a fake camera that implements version 3 of the camera device
+ * interace.
+ */
+
+#include "EmulatedCamera3.h"
+#include "fake-pipeline2/Base.h"
+#include "fake-pipeline2/Sensor.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include <camera/CameraMetadata.h>
+#include <utils/List.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+/**
+ * Encapsulates functionality for a v3 HAL camera which produces synthetic data.
+ *
+ * Note that EmulatedCameraFactory instantiates an object of this class just
+ * once, when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are invoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedFakeCamera3 : public EmulatedCamera3,
+ private Sensor::SensorListener {
+public:
+
+ EmulatedFakeCamera3(int cameraId, bool facingBack,
+ struct hw_module_t* module);
+
+ virtual ~EmulatedFakeCamera3();
+
+ /****************************************************************************
+ * EmulatedCamera3 virtual overrides
+ ***************************************************************************/
+
+public:
+
+ virtual status_t Initialize();
+
+ /****************************************************************************
+ * Camera module API and generic hardware device API implementation
+ ***************************************************************************/
+
+public:
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info *info);
+
+ /****************************************************************************
+ * EmulatedCamera3 abstract API implementation
+ ***************************************************************************/
+
+protected:
+
+ virtual status_t configureStreams(
+ camera3_stream_configuration *streamList);
+
+ virtual status_t registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) ;
+
+ virtual const camera_metadata_t* constructDefaultRequestSettings(
+ int type);
+
+ virtual status_t processCaptureRequest(camera3_capture_request *request);
+
+ /** Debug methods */
+
+ virtual void dump(int fd);
+
+ /** Tag query methods */
+ virtual const char *getVendorSectionName(uint32_t tag);
+
+ virtual const char *getVendorTagName(uint32_t tag);
+
+ virtual int getVendorTagType(uint32_t tag);
+
+private:
+
+ /**
+ * Build the static info metadata buffer for this device
+ */
+ status_t constructStaticInfo();
+
+ /**
+ * Run the fake 3A algorithms as needed. May override/modify settings
+ * values.
+ */
+ status_t process3A(CameraMetadata &settings);
+
+ status_t doFakeAE(CameraMetadata &settings);
+ status_t doFakeAF(CameraMetadata &settings);
+ status_t doFakeAWB(CameraMetadata &settings);
+ void update3A(CameraMetadata &settings);
+
+ /** Signal from readout thread that it doesn't have anything to do */
+ void signalReadoutIdle();
+
+ /** Handle interrupt events from the sensor */
+ void onSensorEvent(uint32_t frameNumber, Event e, nsecs_t timestamp);
+
+ /****************************************************************************
+ * Static configuration information
+ ***************************************************************************/
+private:
+ static const uint32_t kMaxRawStreamCount = 1;
+ static const uint32_t kMaxProcessedStreamCount = 3;
+ static const uint32_t kMaxJpegStreamCount = 1;
+ static const uint32_t kMaxReprocessStreamCount = 2;
+ static const uint32_t kMaxBufferCount = 4;
+ // We need a positive stream ID to distinguish external buffers from
+ // sensor-generated buffers which use a nonpositive ID. Otherwise, HAL3 has
+ // no concept of a stream id.
+ static const uint32_t kGenericStreamId = 1;
+ static const int32_t kAvailableFormats[];
+ static const uint32_t kAvailableRawSizes[];
+ static const uint64_t kAvailableRawMinDurations[];
+ static const uint32_t kAvailableProcessedSizesBack[];
+ static const uint32_t kAvailableProcessedSizesFront[];
+ static const uint64_t kAvailableProcessedMinDurations[];
+ static const uint32_t kAvailableJpegSizesBack[];
+ static const uint32_t kAvailableJpegSizesFront[];
+ static const uint64_t kAvailableJpegMinDurations[];
+
+ static const int64_t kSyncWaitTimeout = 10000000; // 10 ms
+ static const int32_t kMaxSyncTimeoutCount = 1000; // 1000 kSyncWaitTimeouts
+ static const uint32_t kFenceTimeoutMs = 2000; // 2 s
+
+ /****************************************************************************
+ * Data members.
+ ***************************************************************************/
+
+ /* HAL interface serialization lock. */
+ Mutex mLock;
+
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+ /* Full mode (true) or limited mode (false) switch */
+ bool mFullMode;
+
+ /**
+ * Cache for default templates. Once one is requested, the pointer must be
+ * valid at least until close() is called on the device
+ */
+ camera_metadata_t *mDefaultTemplates[CAMERA3_TEMPLATE_COUNT];
+
+ /**
+ * Private stream information, stored in camera3_stream_t->priv.
+ */
+ struct PrivateStreamInfo {
+ bool alive;
+ bool registered;
+ };
+
+ // Shortcut to the input stream
+ camera3_stream_t* mInputStream;
+
+ typedef List<camera3_stream_t*> StreamList;
+ typedef List<camera3_stream_t*>::iterator StreamIterator;
+ typedef Vector<camera3_stream_buffer> HalBufferVector;
+
+ // All streams, including input stream
+ StreamList mStreams;
+
+ // Cached settings from latest submitted request
+ CameraMetadata mPrevSettings;
+
+ /** Fake hardware interfaces */
+ sp<Sensor> mSensor;
+ sp<JpegCompressor> mJpegCompressor;
+ friend class JpegCompressor;
+
+ /** Processing thread for sending out results */
+
+ class ReadoutThread : public Thread, private JpegCompressor::JpegListener {
+ public:
+ ReadoutThread(EmulatedFakeCamera3 *parent);
+ ~ReadoutThread();
+
+ struct Request {
+ uint32_t frameNumber;
+ CameraMetadata settings;
+ HalBufferVector *buffers;
+ Buffers *sensorBuffers;
+ };
+
+ /**
+ * Interface to parent class
+ */
+
+ // Place request in the in-flight queue to wait for sensor capture
+ void queueCaptureRequest(const Request &r);
+
+ // Test if the readout thread is idle (no in-flight requests, not
+ // currently reading out anything
+ bool isIdle();
+
+ // Wait until isIdle is true
+ status_t waitForReadout();
+
+ private:
+ static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+ static const nsecs_t kMaxWaitLoops = 1000;
+ static const size_t kMaxQueueSize = 2;
+
+ EmulatedFakeCamera3 *mParent;
+ Mutex mLock;
+
+ List<Request> mInFlightQueue;
+ Condition mInFlightSignal;
+ bool mThreadActive;
+
+ virtual bool threadLoop();
+
+ // Only accessed by threadLoop
+
+ Request mCurrentRequest;
+
+ // Jpeg completion callbacks
+
+ Mutex mJpegLock;
+ bool mJpegWaiting;
+ camera3_stream_buffer mJpegHalBuffer;
+ uint32_t mJpegFrameNumber;
+ virtual void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+ virtual void onJpegInputDone(const StreamBuffer &inputBuffer);
+ };
+
+ sp<ReadoutThread> mReadoutThread;
+
+ /** Fake 3A constants */
+
+ static const nsecs_t kNormalExposureTime;
+ static const nsecs_t kFacePriorityExposureTime;
+ static const int kNormalSensitivity;
+ static const int kFacePrioritySensitivity;
+ // Rate of converging AE to new target value, as fraction of difference between
+ // current and target value.
+ static const float kExposureTrackRate;
+ // Minimum duration for precapture state. May be longer if slow to converge
+ // to target exposure
+ static const int kPrecaptureMinFrames;
+ // How often to restart AE 'scanning'
+ static const int kStableAeMaxFrames;
+ // Maximum stop below 'normal' exposure time that we'll wander to while
+ // pretending to converge AE. In powers of 2. (-2 == 1/4 as bright)
+ static const float kExposureWanderMin;
+ // Maximum stop above 'normal' exposure time that we'll wander to while
+ // pretending to converge AE. In powers of 2. (2 == 4x as bright)
+ static const float kExposureWanderMax;
+
+ /** Fake 3A state */
+
+ uint8_t mControlMode;
+ bool mFacePriority;
+ uint8_t mAeState;
+ uint8_t mAfState;
+ uint8_t mAwbState;
+ uint8_t mAeMode;
+ uint8_t mAfMode;
+ uint8_t mAwbMode;
+ int mAfTriggerId;
+ int mAeTriggerId;
+
+ int mAeCounter;
+ nsecs_t mAeCurrentExposureTime;
+ nsecs_t mAeTargetExposureTime;
+ int mAeCurrentSensitivity;
+
+};
+
+} // namespace android
+
+#endif // HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
diff --git a/v3/EmulatedFakeCameraDevice.cpp b/v3/EmulatedFakeCameraDevice.cpp
new file mode 100755
index 0000000..4afadc1
--- a/dev/null
+++ b/v3/EmulatedFakeCameraDevice.cpp
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCameraDevice that encapsulates
+ * fake camera device.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeDevice"
+#include <cutils/log.h>
+#include "EmulatedFakeCamera.h"
+#include "EmulatedFakeCameraDevice.h"
+
+namespace android {
+
+EmulatedFakeCameraDevice::EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal)
+ : EmulatedCameraDevice(camera_hal),
+ mBlackYUV(kBlack32),
+ mWhiteYUV(kWhite32),
+ mRedYUV(kRed8),
+ mGreenYUV(kGreen8),
+ mBlueYUV(kBlue8),
+ mLastRedrawn(0),
+ mCheckX(0),
+ mCheckY(0),
+ mCcounter(0)
+#if EFCD_ROTATE_FRAME
+ , mLastRotatedAt(0),
+ mCurrentFrameType(0),
+ mCurrentColor(&mWhiteYUV)
+#endif // EFCD_ROTATE_FRAME
+{
+ // Makes the image with the original exposure compensation darker.
+ // So the effects of changing the exposure compensation can be seen.
+ mBlackYUV.Y = mBlackYUV.Y / 2;
+ mWhiteYUV.Y = mWhiteYUV.Y / 2;
+ mRedYUV.Y = mRedYUV.Y / 2;
+ mGreenYUV.Y = mGreenYUV.Y / 2;
+ mBlueYUV.Y = mBlueYUV.Y / 2;
+}
+
+EmulatedFakeCameraDevice::~EmulatedFakeCameraDevice()
+{
+}
+
+/****************************************************************************
+ * Emulated camera device abstract interface implementation.
+ ***************************************************************************/
+
+status_t EmulatedFakeCameraDevice::connectDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isInitialized()) {
+ ALOGE("%s: Fake camera device is not initialized.", __FUNCTION__);
+ return EINVAL;
+ }
+ if (isConnected()) {
+ ALOGW("%s: Fake camera device is already connected.", __FUNCTION__);
+ return NO_ERROR;
+ }
+
+ /* There is no device to connect to. */
+ mState = ECDS_CONNECTED;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCameraDevice::disconnectDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isConnected()) {
+ ALOGW("%s: Fake camera device is already disconnected.", __FUNCTION__);
+ return NO_ERROR;
+ }
+ if (isStarted()) {
+ ALOGE("%s: Cannot disconnect from the started device.", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* There is no device to disconnect from. */
+ mState = ECDS_INITIALIZED;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCameraDevice::startDevice(int width,
+ int height,
+ uint32_t pix_fmt)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isConnected()) {
+ ALOGE("%s: Fake camera device is not connected.", __FUNCTION__);
+ return EINVAL;
+ }
+ if (isStarted()) {
+ ALOGE("%s: Fake camera device is already started.", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Initialize the base class. */
+ const status_t res =
+ EmulatedCameraDevice::commonStartDevice(width, height, pix_fmt);
+ if (res == NO_ERROR) {
+ /* Calculate U/V panes inside the framebuffer. */
+ switch (mPixelFormat) {
+ case V4L2_PIX_FMT_YVU420:
+ mFrameV = mCurrentFrame + mTotalPixels;
+ mFrameU = mFrameU + mTotalPixels / 4;
+ mUVStep = 1;
+ mUVTotalNum = mTotalPixels / 4;
+ break;
+
+ case V4L2_PIX_FMT_YUV420:
+ mFrameU = mCurrentFrame + mTotalPixels;
+ mFrameV = mFrameU + mTotalPixels / 4;
+ mUVStep = 1;
+ mUVTotalNum = mTotalPixels / 4;
+ break;
+
+ case V4L2_PIX_FMT_NV21:
+ /* Interleaved UV pane, V first. */
+ mFrameV = mCurrentFrame + mTotalPixels;
+ mFrameU = mFrameV + 1;
+ mUVStep = 2;
+ mUVTotalNum = mTotalPixels / 4;
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ /* Interleaved UV pane, U first. */
+ mFrameU = mCurrentFrame + mTotalPixels;
+ mFrameV = mFrameU + 1;
+ mUVStep = 2;
+ mUVTotalNum = mTotalPixels / 4;
+ break;
+
+ default:
+ ALOGE("%s: Unknown pixel format %.4s", __FUNCTION__,
+ reinterpret_cast<const char*>(&mPixelFormat));
+ return EINVAL;
+ }
+ /* Number of items in a single row inside U/V panes. */
+ mUVInRow = (width / 2) * mUVStep;
+ mState = ECDS_STARTED;
+ } else {
+ ALOGE("%s: commonStartDevice failed", __FUNCTION__);
+ }
+
+ return res;
+}
+
+status_t EmulatedFakeCameraDevice::stopDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isStarted()) {
+ ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
+ return NO_ERROR;
+ }
+
+ mFrameU = mFrameV = NULL;
+ EmulatedCameraDevice::commonStopDevice();
+ mState = ECDS_CONNECTED;
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Worker thread management overrides.
+ ***************************************************************************/
+
+bool EmulatedFakeCameraDevice::inWorkerThread()
+{
+ /* Wait till FPS timeout expires, or thread exit message is received. */
+ WorkerThread::SelectRes res =
+ getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
+ if (res == WorkerThread::EXIT_THREAD) {
+ ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
+ return false;
+ }
+
+ /* Lets see if we need to generate a new frame. */
+ if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRedrawn) >= mRedrawAfter) {
+ /*
+ * Time to generate a new frame.
+ */
+
+#if EFCD_ROTATE_FRAME
+ const int frame_type = rotateFrame();
+ switch (frame_type) {
+ case 0:
+ drawCheckerboard();
+ break;
+ case 1:
+ drawStripes();
+ break;
+ case 2:
+ drawSolid(mCurrentColor);
+ break;
+ }
+#else
+ /* Draw the checker board. */
+ drawCheckerboard();
+
+#endif // EFCD_ROTATE_FRAME
+
+ mLastRedrawn = systemTime(SYSTEM_TIME_MONOTONIC);
+ }
+
+ /* Timestamp the current frame, and notify the camera HAL about new frame. */
+ mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+ mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
+
+ return true;
+}
+
+/****************************************************************************
+ * Fake camera device private API
+ ***************************************************************************/
+
+void EmulatedFakeCameraDevice::drawCheckerboard()
+{
+ const int size = mFrameWidth / 10;
+ bool black = true;
+
+ if (size == 0) {
+ // When this happens, it happens at a very high rate,
+ // so don't log any messages and just return.
+ return;
+ }
+
+
+ if((mCheckX / size) & 1)
+ black = false;
+ if((mCheckY / size) & 1)
+ black = !black;
+
+ int county = mCheckY % size;
+ int checkxremainder = mCheckX % size;
+ uint8_t* Y = mCurrentFrame;
+ uint8_t* U_pos = mFrameU;
+ uint8_t* V_pos = mFrameV;
+ uint8_t* U = U_pos;
+ uint8_t* V = V_pos;
+
+ YUVPixel adjustedWhite = YUVPixel(mWhiteYUV);
+ changeWhiteBalance(adjustedWhite.Y, adjustedWhite.U, adjustedWhite.V);
+
+ for(int y = 0; y < mFrameHeight; y++) {
+ int countx = checkxremainder;
+ bool current = black;
+ for(int x = 0; x < mFrameWidth; x += 2) {
+ if (current) {
+ mBlackYUV.get(Y, U, V);
+ } else {
+ adjustedWhite.get(Y, U, V);
+ }
+ *Y = changeExposure(*Y);
+ Y[1] = *Y;
+ Y += 2; U += mUVStep; V += mUVStep;
+ countx += 2;
+ if(countx >= size) {
+ countx = 0;
+ current = !current;
+ }
+ }
+ if (y & 0x1) {
+ U_pos = U;
+ V_pos = V;
+ } else {
+ U = U_pos;
+ V = V_pos;
+ }
+ if(county++ >= size) {
+ county = 0;
+ black = !black;
+ }
+ }
+ mCheckX += 3;
+ mCheckY++;
+
+ /* Run the square. */
+ int sqx = ((mCcounter * 3) & 255);
+ if(sqx > 128) sqx = 255 - sqx;
+ int sqy = ((mCcounter * 5) & 255);
+ if(sqy > 128) sqy = 255 - sqy;
+ const int sqsize = mFrameWidth / 10;
+ drawSquare(sqx * sqsize / 32, sqy * sqsize / 32, (sqsize * 5) >> 1,
+ (mCcounter & 0x100) ? &mRedYUV : &mGreenYUV);
+ mCcounter++;
+}
+
+void EmulatedFakeCameraDevice::drawSquare(int x,
+ int y,
+ int size,
+ const YUVPixel* color)
+{
+ const int square_xstop = min(mFrameWidth, x + size);
+ const int square_ystop = min(mFrameHeight, y + size);
+ uint8_t* Y_pos = mCurrentFrame + y * mFrameWidth + x;
+
+ YUVPixel adjustedColor = *color;
+ changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
+
+ // Draw the square.
+ for (; y < square_ystop; y++) {
+ const int iUV = (y / 2) * mUVInRow + (x / 2) * mUVStep;
+ uint8_t* sqU = mFrameU + iUV;
+ uint8_t* sqV = mFrameV + iUV;
+ uint8_t* sqY = Y_pos;
+ for (int i = x; i < square_xstop; i += 2) {
+ adjustedColor.get(sqY, sqU, sqV);
+ *sqY = changeExposure(*sqY);
+ sqY[1] = *sqY;
+ sqY += 2; sqU += mUVStep; sqV += mUVStep;
+ }
+ Y_pos += mFrameWidth;
+ }
+}
+
+#if EFCD_ROTATE_FRAME
+
+void EmulatedFakeCameraDevice::drawSolid(YUVPixel* color)
+{
+ YUVPixel adjustedColor = *color;
+ changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
+
+ /* All Ys are the same. */
+ memset(mCurrentFrame, changeExposure(adjustedColor.Y), mTotalPixels);
+
+ /* Fill U, and V panes. */
+ uint8_t* U = mFrameU;
+ uint8_t* V = mFrameV;
+ for (int k = 0; k < mUVTotalNum; k++, U += mUVStep, V += mUVStep) {
+ *U = color->U;
+ *V = color->V;
+ }
+}
+
+void EmulatedFakeCameraDevice::drawStripes()
+{
+ /* Divide frame into 4 stripes. */
+ const int change_color_at = mFrameHeight / 4;
+ const int each_in_row = mUVInRow / mUVStep;
+ uint8_t* pY = mCurrentFrame;
+ for (int y = 0; y < mFrameHeight; y++, pY += mFrameWidth) {
+ /* Select the color. */
+ YUVPixel* color;
+ const int color_index = y / change_color_at;
+ if (color_index == 0) {
+ /* White stripe on top. */
+ color = &mWhiteYUV;
+ } else if (color_index == 1) {
+ /* Then the red stripe. */
+ color = &mRedYUV;
+ } else if (color_index == 2) {
+ /* Then the green stripe. */
+ color = &mGreenYUV;
+ } else {
+ /* And the blue stripe at the bottom. */
+ color = &mBlueYUV;
+ }
+ changeWhiteBalance(color->Y, color->U, color->V);
+
+ /* All Ys at the row are the same. */
+ memset(pY, changeExposure(color->Y), mFrameWidth);
+
+ /* Offset of the current row inside U/V panes. */
+ const int uv_off = (y / 2) * mUVInRow;
+ /* Fill U, and V panes. */
+ uint8_t* U = mFrameU + uv_off;
+ uint8_t* V = mFrameV + uv_off;
+ for (int k = 0; k < each_in_row; k++, U += mUVStep, V += mUVStep) {
+ *U = color->U;
+ *V = color->V;
+ }
+ }
+}
+
+int EmulatedFakeCameraDevice::rotateFrame()
+{
+ if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRotatedAt) >= mRotateFreq) {
+ mLastRotatedAt = systemTime(SYSTEM_TIME_MONOTONIC);
+ mCurrentFrameType++;
+ if (mCurrentFrameType > 2) {
+ mCurrentFrameType = 0;
+ }
+ if (mCurrentFrameType == 2) {
+ ALOGD("********** Rotated to the SOLID COLOR frame **********");
+ /* Solid color: lets rotate color too. */
+ if (mCurrentColor == &mWhiteYUV) {
+ ALOGD("----- Painting a solid RED frame -----");
+ mCurrentColor = &mRedYUV;
+ } else if (mCurrentColor == &mRedYUV) {
+ ALOGD("----- Painting a solid GREEN frame -----");
+ mCurrentColor = &mGreenYUV;
+ } else if (mCurrentColor == &mGreenYUV) {
+ ALOGD("----- Painting a solid BLUE frame -----");
+ mCurrentColor = &mBlueYUV;
+ } else {
+ /* Back to white. */
+ ALOGD("----- Painting a solid WHITE frame -----");
+ mCurrentColor = &mWhiteYUV;
+ }
+ } else if (mCurrentFrameType == 0) {
+ ALOGD("********** Rotated to the CHECKERBOARD frame **********");
+ } else if (mCurrentFrameType == 1) {
+ ALOGD("********** Rotated to the STRIPED frame **********");
+ }
+ }
+
+ return mCurrentFrameType;
+}
+
+#endif // EFCD_ROTATE_FRAME
+
+}; /* namespace android */
diff --git a/v3/EmulatedFakeCameraDevice.h b/v3/EmulatedFakeCameraDevice.h
new file mode 100755
index 0000000..f66f076
--- a/dev/null
+++ b/v3/EmulatedFakeCameraDevice.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_DEVICE_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_DEVICE_H
+
+/*
+ * Contains declaration of a class EmulatedFakeCameraDevice that encapsulates
+ * a fake camera device.
+ */
+
+#include "Converters.h"
+#include "EmulatedCameraDevice.h"
+
+/* This is used for debugging format / conversion issues. If EFCD_ROTATE_FRAME is
+ * set to 0, the frame content will be always the "checkerboard". Otherwise, if
+ * EFCD_ROTATE_FRAME is set to a non-zero value, the frame content will "rotate"
+ * from a "checkerboard" frame to a "white/red/green/blue stripes" frame, to a
+ * "white/red/green/blue" frame. Frame content rotation helps finding bugs in
+ * format conversions.
+ */
+#define EFCD_ROTATE_FRAME 0
+
+namespace android {
+
+class EmulatedFakeCamera;
+
+/* Encapsulates a fake camera device.
+ * Fake camera device emulates a camera device by providing frames containing
+ * a black and white checker board, moving diagonally towards the 0,0 corner.
+ * There is also a green, or red square that bounces inside the frame, changing
+ * its color when bouncing off the 0,0 corner.
+ */
+class EmulatedFakeCameraDevice : public EmulatedCameraDevice {
+public:
+ /* Constructs EmulatedFakeCameraDevice instance. */
+ explicit EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal);
+
+ /* Destructs EmulatedFakeCameraDevice instance. */
+ ~EmulatedFakeCameraDevice();
+
+ /***************************************************************************
+ * Emulated camera device abstract interface implementation.
+ * See declarations of these methods in EmulatedCameraDevice class for
+ * information on each of these methods.
+ **************************************************************************/
+
+public:
+ /* Connects to the camera device.
+ * Since there is no real device to connect to, this method does nothing,
+ * but changes the state.
+ */
+ status_t connectDevice();
+
+ /* Disconnects from the camera device.
+ * Since there is no real device to disconnect from, this method does
+ * nothing, but changes the state.
+ */
+ status_t disconnectDevice();
+
+ /* Starts the camera device. */
+ status_t startDevice(int width, int height, uint32_t pix_fmt);
+
+ /* Stops the camera device. */
+ status_t stopDevice();
+
+ /* Gets current preview fame into provided buffer. */
+ status_t getPreviewFrame(void* buffer);
+
+ /***************************************************************************
+ * Worker thread management overrides.
+ * See declarations of these methods in EmulatedCameraDevice class for
+ * information on each of these methods.
+ **************************************************************************/
+
+protected:
+ /* Implementation of the worker thread routine.
+ * This method simply sleeps for a period of time defined by the FPS property
+ * of the fake camera (simulating frame frequency), and then calls emulated
+ * camera's onNextFrameAvailable method.
+ */
+ bool inWorkerThread();
+
+ /****************************************************************************
+ * Fake camera device private API
+ ***************************************************************************/
+
+private:
+
+ /* Draws a black and white checker board in the current frame buffer. */
+ void drawCheckerboard();
+
+ /* Draws a square of the given color in the current frame buffer.
+ * Param:
+ * x, y - Coordinates of the top left corner of the square in the buffer.
+ * size - Size of the square's side.
+ * color - Square's color.
+ */
+ void drawSquare(int x, int y, int size, const YUVPixel* color);
+
+#if EFCD_ROTATE_FRAME
+ void drawSolid(YUVPixel* color);
+ void drawStripes();
+ int rotateFrame();
+#endif // EFCD_ROTATE_FRAME
+
+ /****************************************************************************
+ * Fake camera device data members
+ ***************************************************************************/
+
+private:
+ /*
+ * Pixel colors in YUV format used when drawing the checker board.
+ */
+
+ YUVPixel mBlackYUV;
+ YUVPixel mWhiteYUV;
+ YUVPixel mRedYUV;
+ YUVPixel mGreenYUV;
+ YUVPixel mBlueYUV;
+
+ /* Last time the frame has been redrawn. */
+ nsecs_t mLastRedrawn;
+
+ /*
+ * Precalculated values related to U/V panes.
+ */
+
+ /* U pane inside the framebuffer. */
+ uint8_t* mFrameU;
+
+ /* V pane inside the framebuffer. */
+ uint8_t* mFrameV;
+
+ /* Defines byte distance between adjacent U, and V values. */
+ int mUVStep;
+
+ /* Defines number of Us and Vs in a row inside the U/V panes.
+ * Note that if U/V panes are interleaved, this value reflects the total
+ * number of both, Us and Vs in a single row in the interleaved UV pane. */
+ int mUVInRow;
+
+ /* Total number of each, U, and V elements in the framebuffer. */
+ int mUVTotalNum;
+
+ /*
+ * Checkerboard drawing related stuff
+ */
+
+ int mCheckX;
+ int mCheckY;
+ int mCcounter;
+
+ /* Emulated FPS (frames per second).
+ * We will emulate 50 FPS. */
+ static const int mEmulatedFPS = 50;
+
+ /* Defines time (in nanoseconds) between redrawing the checker board.
+ * We will redraw the checker board every 15 milliseconds. */
+ static const nsecs_t mRedrawAfter = 15000000LL;
+
+#if EFCD_ROTATE_FRAME
+ /* Frame rotation frequency in nanosec (currently - 3 sec) */
+ static const nsecs_t mRotateFreq = 3000000000LL;
+
+ /* Last time the frame has rotated. */
+ nsecs_t mLastRotatedAt;
+
+ /* Type of the frame to display in the current rotation:
+ * 0 - Checkerboard.
+ * 1 - White/Red/Green/Blue horisontal stripes
+ * 2 - Solid color. */
+ int mCurrentFrameType;
+
+ /* Color to use to paint the solid color frame. Colors will rotate between
+ * white, red, gree, and blue each time rotation comes to the solid color
+ * frame. */
+ YUVPixel* mCurrentColor;
+#endif // EFCD_ROTATE_FRAME
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_DEVICE_H */
diff --git a/v3/EmulatedQemuCamera.cpp b/v3/EmulatedQemuCamera.cpp
new file mode 100755
index 0000000..af1e324
--- a/dev/null
+++ b/v3/EmulatedQemuCamera.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedQemuCamera that encapsulates
+ * functionality of an emulated camera connected to the host.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_QemuCamera"
+#include <cutils/log.h>
+#include "EmulatedQemuCamera.h"
+#include "EmulatedCameraFactory.h"
+
+namespace android {
+
+EmulatedQemuCamera::EmulatedQemuCamera(int cameraId, struct hw_module_t* module)
+ : EmulatedCamera(cameraId, module),
+ mQemuCameraDevice(this)
+{
+}
+
+EmulatedQemuCamera::~EmulatedQemuCamera()
+{
+}
+
+/****************************************************************************
+ * EmulatedCamera virtual overrides.
+ ***************************************************************************/
+
+status_t EmulatedQemuCamera::Initialize(const char* device_name,
+ const char* frame_dims,
+ const char* facing_dir)
+{
+ ALOGV("%s:\n Name=%s\n Facing '%s'\n Dimensions=%s",
+ __FUNCTION__, device_name, facing_dir, frame_dims);
+ /* Save dimensions. */
+ mFrameDims = frame_dims;
+
+ /* Initialize camera device. */
+ status_t res = mQemuCameraDevice.Initialize(device_name);
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /* Initialize base class. */
+ res = EmulatedCamera::Initialize();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /*
+ * Set customizable parameters.
+ */
+
+ mParameters.set(EmulatedCamera::FACING_KEY, facing_dir);
+ mParameters.set(EmulatedCamera::ORIENTATION_KEY,
+ gEmulatedCameraFactory.getQemuCameraOrientation());
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, frame_dims);
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, frame_dims);
+
+ /*
+ * Use first dimension reported by the device to set current preview and
+ * picture sizes.
+ */
+
+ char first_dim[128];
+ /* Dimensions are separated with ',' */
+ const char* c = strchr(frame_dims, ',');
+ if (c == NULL) {
+ strncpy(first_dim, frame_dims, sizeof(first_dim));
+ first_dim[sizeof(first_dim)-1] = '\0';
+ } else if (static_cast<size_t>(c - frame_dims) < sizeof(first_dim)) {
+ memcpy(first_dim, frame_dims, c - frame_dims);
+ first_dim[c - frame_dims] = '\0';
+ } else {
+ memcpy(first_dim, frame_dims, sizeof(first_dim));
+ first_dim[sizeof(first_dim)-1] = '\0';
+ }
+
+ /* Width and height are separated with 'x' */
+ char* sep = strchr(first_dim, 'x');
+ if (sep == NULL) {
+ ALOGE("%s: Invalid first dimension format in %s",
+ __FUNCTION__, frame_dims);
+ return EINVAL;
+ }
+
+ *sep = '\0';
+ const int x = atoi(first_dim);
+ const int y = atoi(sep + 1);
+ mParameters.setPreviewSize(x, y);
+ mParameters.setPictureSize(x, y);
+
+ ALOGV("%s: Qemu camera %s is initialized. Current frame is %dx%d",
+ __FUNCTION__, device_name, x, y);
+
+ return NO_ERROR;
+}
+
+EmulatedCameraDevice* EmulatedQemuCamera::getCameraDevice()
+{
+ return &mQemuCameraDevice;
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedQemuCamera.h b/v3/EmulatedQemuCamera.h
new file mode 100755
index 0000000..1b826c7
--- a/dev/null
+++ b/v3/EmulatedQemuCamera.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_H
+
+/*
+ * Contains declaration of a class EmulatedQemuCamera that encapsulates
+ * functionality of an emulated camera connected to the host.
+ */
+
+#include "EmulatedCamera.h"
+#include "EmulatedQemuCameraDevice.h"
+
+namespace android {
+
+/* Encapsulates functionality of an emulated camera connected to the host.
+ */
+class EmulatedQemuCamera : public EmulatedCamera {
+public:
+ /* Constructs EmulatedQemuCamera instance. */
+ EmulatedQemuCamera(int cameraId, struct hw_module_t* module);
+
+ /* Destructs EmulatedQemuCamera instance. */
+ ~EmulatedQemuCamera();
+
+ /***************************************************************************
+ * EmulatedCamera virtual overrides.
+ **************************************************************************/
+
+public:
+ /* Initializes EmulatedQemuCamera instance. */
+ status_t Initialize(const char* device_name,
+ const char* frame_dims,
+ const char* facing_dir);
+
+ /***************************************************************************
+ * EmulatedCamera abstract API implementation.
+ **************************************************************************/
+
+protected:
+ /* Gets emulated camera device ised by this instance of the emulated camera.
+ */
+ EmulatedCameraDevice* getCameraDevice();
+
+ /***************************************************************************
+ * Data memebers.
+ **************************************************************************/
+
+protected:
+ /* Contained qemu camera device object. */
+ EmulatedQemuCameraDevice mQemuCameraDevice;
+
+ /* Supported frame dimensions reported by the camera device. */
+ String8 mFrameDims;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_H */
diff --git a/v3/EmulatedQemuCamera2.cpp b/v3/EmulatedQemuCamera2.cpp
new file mode 100644
index 0000000..2c94f0e
--- a/dev/null
+++ b/v3/EmulatedQemuCamera2.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedQemuCamera2 that encapsulates
+ * functionality of a host webcam with further processing to simulate the
+ * capabilities of a v2 camera device.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_QemuCamera2"
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include "EmulatedQemuCamera2.h"
+#include "EmulatedCameraFactory.h"
+
+namespace android {
+
+EmulatedQemuCamera2::EmulatedQemuCamera2(int cameraId,
+ bool facingBack,
+ struct hw_module_t* module)
+ : EmulatedCamera2(cameraId,module),
+ mFacingBack(facingBack)
+{
+ ALOGD("Constructing emulated qemu camera 2 facing %s",
+ facingBack ? "back" : "front");
+}
+
+EmulatedQemuCamera2::~EmulatedQemuCamera2()
+{
+}
+
+/****************************************************************************
+ * Public API overrides
+ ***************************************************************************/
+
+status_t EmulatedQemuCamera2::Initialize()
+{
+ return NO_ERROR;
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedQemuCamera2.h b/v3/EmulatedQemuCamera2.h
new file mode 100644
index 0000000..520ccce
--- a/dev/null
+++ b/v3/EmulatedQemuCamera2.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA2_H
+#define HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA2_H
+
+/*
+ * Contains declaration of a class EmulatedQemuCamera2 that encapsulates
+ * functionality of a host webcam with added processing to implement version 2
+ * of the camera device interface.
+ */
+
+#include "EmulatedCamera2.h"
+
+namespace android {
+
+/* Encapsulates functionality of an advanced fake camera based on real host camera data.
+ */
+class EmulatedQemuCamera2 : public EmulatedCamera2 {
+public:
+ /* Constructs EmulatedFakeCamera instance. */
+ EmulatedQemuCamera2(int cameraId, bool facingBack, struct hw_module_t* module);
+
+ /* Destructs EmulatedFakeCamera instance. */
+ ~EmulatedQemuCamera2();
+
+ /****************************************************************************
+ * EmulatedCamera2 virtual overrides.
+ ***************************************************************************/
+
+public:
+ /* Initializes EmulatedQemuCamera2 instance. */
+ status_t Initialize();
+
+ /****************************************************************************
+ * EmulatedCamera abstract API implementation.
+ ***************************************************************************/
+
+protected:
+
+ /****************************************************************************
+ * Data memebers.
+ ***************************************************************************/
+
+protected:
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA2_H */
diff --git a/v3/EmulatedQemuCameraDevice.cpp b/v3/EmulatedQemuCameraDevice.cpp
new file mode 100755
index 0000000..07837af
--- a/dev/null
+++ b/v3/EmulatedQemuCameraDevice.cpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedQemuCameraDevice that encapsulates
+ * an emulated camera device connected to the host.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_QemuDevice"
+#include <cutils/log.h>
+#include "EmulatedQemuCamera.h"
+#include "EmulatedQemuCameraDevice.h"
+
+namespace android {
+
+EmulatedQemuCameraDevice::EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal)
+ : EmulatedCameraDevice(camera_hal),
+ mQemuClient(),
+ mPreviewFrame(NULL)
+{
+}
+
+EmulatedQemuCameraDevice::~EmulatedQemuCameraDevice()
+{
+ if (mPreviewFrame != NULL) {
+ delete[] mPreviewFrame;
+ }
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedQemuCameraDevice::Initialize(const char* device_name)
+{
+ /* Connect to the service. */
+ char connect_str[256];
+ snprintf(connect_str, sizeof(connect_str), "name=%s", device_name);
+ status_t res = mQemuClient.connectClient(connect_str);
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /* Initialize base class. */
+ res = EmulatedCameraDevice::Initialize();
+ if (res == NO_ERROR) {
+ ALOGV("%s: Connected to the emulated camera service '%s'",
+ __FUNCTION__, device_name);
+ mDeviceName = device_name;
+ } else {
+ mQemuClient.queryDisconnect();
+ }
+
+ return res;
+}
+
+/****************************************************************************
+ * Emulated camera device abstract interface implementation.
+ ***************************************************************************/
+
+status_t EmulatedQemuCameraDevice::connectDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isInitialized()) {
+ ALOGE("%s: Qemu camera device is not initialized.", __FUNCTION__);
+ return EINVAL;
+ }
+ if (isConnected()) {
+ ALOGW("%s: Qemu camera device '%s' is already connected.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return NO_ERROR;
+ }
+
+ /* Connect to the camera device via emulator. */
+ const status_t res = mQemuClient.queryConnect();
+ if (res == NO_ERROR) {
+ ALOGV("%s: Connected to device '%s'",
+ __FUNCTION__, (const char*)mDeviceName);
+ mState = ECDS_CONNECTED;
+ } else {
+ ALOGE("%s: Connection to device '%s' failed",
+ __FUNCTION__, (const char*)mDeviceName);
+ }
+
+ return res;
+}
+
+status_t EmulatedQemuCameraDevice::disconnectDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isConnected()) {
+ ALOGW("%s: Qemu camera device '%s' is already disconnected.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return NO_ERROR;
+ }
+ if (isStarted()) {
+ ALOGE("%s: Cannot disconnect from the started device '%s.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return EINVAL;
+ }
+
+ /* Disconnect from the camera device via emulator. */
+ const status_t res = mQemuClient.queryDisconnect();
+ if (res == NO_ERROR) {
+ ALOGV("%s: Disonnected from device '%s'",
+ __FUNCTION__, (const char*)mDeviceName);
+ mState = ECDS_INITIALIZED;
+ } else {
+ ALOGE("%s: Disconnection from device '%s' failed",
+ __FUNCTION__, (const char*)mDeviceName);
+ }
+
+ return res;
+}
+
+status_t EmulatedQemuCameraDevice::startDevice(int width,
+ int height,
+ uint32_t pix_fmt)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isConnected()) {
+ ALOGE("%s: Qemu camera device '%s' is not connected.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return EINVAL;
+ }
+ if (isStarted()) {
+ ALOGW("%s: Qemu camera device '%s' is already started.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return NO_ERROR;
+ }
+
+ status_t res = EmulatedCameraDevice::commonStartDevice(width, height, pix_fmt);
+ if (res != NO_ERROR) {
+ ALOGE("%s: commonStartDevice failed", __FUNCTION__);
+ return res;
+ }
+
+ /* Allocate preview frame buffer. */
+ /* TODO: Watch out for preview format changes! At this point we implement
+ * RGB32 only.*/
+ mPreviewFrame = new uint32_t[mTotalPixels];
+ if (mPreviewFrame == NULL) {
+ ALOGE("%s: Unable to allocate %d bytes for preview frame",
+ __FUNCTION__, mTotalPixels);
+ return ENOMEM;
+ }
+
+ /* Start the actual camera device. */
+ res = mQemuClient.queryStart(mPixelFormat, mFrameWidth, mFrameHeight);
+ if (res == NO_ERROR) {
+ ALOGV("%s: Qemu camera device '%s' is started for %.4s[%dx%d] frames",
+ __FUNCTION__, (const char*)mDeviceName,
+ reinterpret_cast<const char*>(&mPixelFormat),
+ mFrameWidth, mFrameHeight);
+ mState = ECDS_STARTED;
+ } else {
+ ALOGE("%s: Unable to start device '%s' for %.4s[%dx%d] frames",
+ __FUNCTION__, (const char*)mDeviceName,
+ reinterpret_cast<const char*>(&pix_fmt), width, height);
+ }
+
+ return res;
+}
+
+status_t EmulatedQemuCameraDevice::stopDevice()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ if (!isStarted()) {
+ ALOGW("%s: Qemu camera device '%s' is not started.",
+ __FUNCTION__, (const char*)mDeviceName);
+ return NO_ERROR;
+ }
+
+ /* Stop the actual camera device. */
+ status_t res = mQemuClient.queryStop();
+ if (res == NO_ERROR) {
+ if (mPreviewFrame == NULL) {
+ delete[] mPreviewFrame;
+ mPreviewFrame = NULL;
+ }
+ EmulatedCameraDevice::commonStopDevice();
+ mState = ECDS_CONNECTED;
+ ALOGV("%s: Qemu camera device '%s' is stopped",
+ __FUNCTION__, (const char*)mDeviceName);
+ } else {
+ ALOGE("%s: Unable to stop device '%s'",
+ __FUNCTION__, (const char*)mDeviceName);
+ }
+
+ return res;
+}
+
+/****************************************************************************
+ * EmulatedCameraDevice virtual overrides
+ ***************************************************************************/
+
+status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer)
+{
+ ALOGW_IF(mPreviewFrame == NULL, "%s: No preview frame", __FUNCTION__);
+ if (mPreviewFrame != NULL) {
+ memcpy(buffer, mPreviewFrame, mTotalPixels * 4);
+ return 0;
+ } else {
+ return EmulatedCameraDevice::getCurrentPreviewFrame(buffer);
+ }
+}
+
+/****************************************************************************
+ * Worker thread management overrides.
+ ***************************************************************************/
+
+bool EmulatedQemuCameraDevice::inWorkerThread()
+{
+ /* Wait till FPS timeout expires, or thread exit message is received. */
+ WorkerThread::SelectRes res =
+ getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
+ if (res == WorkerThread::EXIT_THREAD) {
+ ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
+ return false;
+ }
+
+ /* Query frames from the service. */
+ status_t query_res = mQemuClient.queryFrame(mCurrentFrame, mPreviewFrame,
+ mFrameBufferSize,
+ mTotalPixels * 4,
+ mWhiteBalanceScale[0],
+ mWhiteBalanceScale[1],
+ mWhiteBalanceScale[2],
+ mExposureCompensation);
+ if (query_res == NO_ERROR) {
+ /* Timestamp the current frame, and notify the camera HAL. */
+ mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+ mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
+ return true;
+ } else {
+ ALOGE("%s: Unable to get current video frame: %s",
+ __FUNCTION__, strerror(query_res));
+ mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+ return false;
+ }
+}
+
+}; /* namespace android */
diff --git a/v3/EmulatedQemuCameraDevice.h b/v3/EmulatedQemuCameraDevice.h
new file mode 100755
index 0000000..8ef562b
--- a/dev/null
+++ b/v3/EmulatedQemuCameraDevice.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_DEVICE_H
+#define HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_DEVICE_H
+
+/*
+ * Contains declaration of a class EmulatedQemuCameraDevice that encapsulates
+ * an emulated camera device connected to the host.
+ */
+
+#include "EmulatedCameraDevice.h"
+#include "QemuClient.h"
+
+namespace android {
+
+class EmulatedQemuCamera;
+
+/* Encapsulates an emulated camera device connected to the host.
+ */
+class EmulatedQemuCameraDevice : public EmulatedCameraDevice {
+public:
+ /* Constructs EmulatedQemuCameraDevice instance. */
+ explicit EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal);
+
+ /* Destructs EmulatedQemuCameraDevice instance. */
+ ~EmulatedQemuCameraDevice();
+
+ /***************************************************************************
+ * Public API
+ **************************************************************************/
+
+public:
+ /* Initializes EmulatedQemuCameraDevice instance.
+ * Param:
+ * device_name - Name of the camera device connected to the host. The name
+ * that is used here must have been reported by the 'factory' camera
+ * service when it listed camera devices connected to the host.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t Initialize(const char* device_name);
+
+ /***************************************************************************
+ * Emulated camera device abstract interface implementation.
+ * See declarations of these methods in EmulatedCameraDevice class for
+ * information on each of these methods.
+ **************************************************************************/
+
+public:
+ /* Connects to the camera device. */
+ status_t connectDevice();
+
+ /* Disconnects from the camera device. */
+ status_t disconnectDevice();
+
+ /* Starts capturing frames from the camera device. */
+ status_t startDevice(int width, int height, uint32_t pix_fmt);
+
+ /* Stops capturing frames from the camera device. */
+ status_t stopDevice();
+
+ /***************************************************************************
+ * EmulatedCameraDevice virtual overrides
+ * See declarations of these methods in EmulatedCameraDevice class for
+ * information on each of these methods.
+ **************************************************************************/
+
+public:
+ /* Gets current preview fame into provided buffer.
+ * We override this method in order to provide preview frames cached in this
+ * object.
+ */
+ status_t getCurrentPreviewFrame(void* buffer);
+
+ /***************************************************************************
+ * Worker thread management overrides.
+ * See declarations of these methods in EmulatedCameraDevice class for
+ * information on each of these methods.
+ **************************************************************************/
+
+protected:
+ /* Implementation of the worker thread routine. */
+ bool inWorkerThread();
+
+ /***************************************************************************
+ * Qemu camera device data members
+ **************************************************************************/
+
+private:
+ /* Qemu client that is used to communicate with the 'emulated camera'
+ * service, created for this instance in the emulator. */
+ CameraQemuClient mQemuClient;
+
+ /* Name of the camera device connected to the host. */
+ String8 mDeviceName;
+
+ /* Current preview framebuffer. */
+ uint32_t* mPreviewFrame;
+
+ /* Emulated FPS (frames per second).
+ * We will emulate 50 FPS. */
+ static const int mEmulatedFPS = 50;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_DEVICE_H */
diff --git a/v3/JpegCompressor.cpp b/v3/JpegCompressor.cpp
new file mode 100644
index 0000000..8014ccf
--- a/dev/null
+++ b/v3/JpegCompressor.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class NV21JpegCompressor that encapsulates a
+ * converter between NV21, and JPEG formats.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_JPEG"
+#include <cutils/log.h>
+#include <assert.h>
+#include <dlfcn.h>
+#include "JpegCompressor.h"
+
+namespace android {
+
+void* NV21JpegCompressor::mDl = NULL;
+
+static void* getSymbol(void* dl, const char* signature) {
+ void* res = dlsym(dl, signature);
+ assert (res != NULL);
+
+ return res;
+}
+
+typedef void (*InitFunc)(JpegStub* stub, int* strides);
+typedef void (*CleanupFunc)(JpegStub* stub);
+typedef int (*CompressFunc)(JpegStub* stub, const void* image,
+ int width, int height, int quality);
+typedef void (*GetCompressedImageFunc)(JpegStub* stub, void* buff);
+typedef size_t (*GetCompressedSizeFunc)(JpegStub* stub);
+
+NV21JpegCompressor::NV21JpegCompressor()
+{
+ const char dlName[] = "/system/lib/hw/camera.goldfish.jpeg.so";
+ if (mDl == NULL) {
+ mDl = dlopen(dlName, RTLD_NOW);
+ }
+ assert(mDl != NULL);
+
+ InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init");
+ (*f)(&mStub, mStrides);
+}
+
+NV21JpegCompressor::~NV21JpegCompressor()
+{
+ CleanupFunc f = (CleanupFunc)getSymbol(mDl, "JpegStub_cleanup");
+ (*f)(&mStub);
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t NV21JpegCompressor::compressRawImage(const void* image,
+ int width,
+ int height,
+ int quality)
+{
+ mStrides[0] = width;
+ mStrides[1] = width;
+ CompressFunc f = (CompressFunc)getSymbol(mDl, "JpegStub_compress");
+ return (status_t)(*f)(&mStub, image, width, height, quality);
+}
+
+
+size_t NV21JpegCompressor::getCompressedSize()
+{
+ GetCompressedSizeFunc f = (GetCompressedSizeFunc)getSymbol(mDl,
+ "JpegStub_getCompressedSize");
+ return (*f)(&mStub);
+}
+
+void NV21JpegCompressor::getCompressedImage(void* buff)
+{
+ GetCompressedImageFunc f = (GetCompressedImageFunc)getSymbol(mDl,
+ "JpegStub_getCompressedImage");
+ (*f)(&mStub, buff);
+}
+
+}; /* namespace android */
diff --git a/v3/JpegCompressor.h b/v3/JpegCompressor.h
new file mode 100644
index 0000000..a6454d2
--- a/dev/null
+++ b/v3/JpegCompressor.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_JPEG_COMPRESSOR_H
+#define HW_EMULATOR_CAMERA_JPEG_COMPRESSOR_H
+
+/*
+ * Contains declaration of a class NV21JpegCompressor that encapsulates a
+ * converter between YV21, and JPEG formats.
+ */
+
+#include "JpegStub.h"
+#include <utils/threads.h>
+
+namespace android {
+
+/* Encapsulates a converter between YV12, and JPEG formats.
+ */
+class NV21JpegCompressor
+{
+public:
+ /* Constructs JpegCompressor instance. */
+ NV21JpegCompressor();
+ /* Destructs JpegCompressor instance. */
+ ~NV21JpegCompressor();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Compresses raw NV21 image into a JPEG.
+ * The compressed image will be saved in mStream member of this class. Use
+ * getCompressedSize method to obtain buffer size of the compressed image,
+ * and getCompressedImage to copy out the compressed image.
+ * Param:
+ * image - Raw NV21 image.
+ * width, height - Image dimensions.
+ * quality - JPEG quality.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ *
+ */
+ status_t compressRawImage(const void* image,
+ int width,
+ int height,
+ int quality);
+
+ /* Get size of the compressed JPEG buffer.
+ * This method must be called only after a successful completion of
+ * compressRawImage call.
+ * Return:
+ * Size of the compressed JPEG buffer.
+ */
+ size_t getCompressedSize();
+
+ /* Copies out compressed JPEG buffer.
+ * This method must be called only after a successful completion of
+ * compressRawImage call.
+ * Param:
+ * buff - Buffer where to copy the JPEG. Must be large enough to contain the
+ * entire image.
+ */
+ void getCompressedImage(void* buff);
+
+ /****************************************************************************
+ * Class data
+ ***************************************************************************/
+
+protected:
+ /* Strides for Y (the first element), and UV (the second one) panes. */
+ int mStrides[2];
+
+private:
+ // library handle to dlopen
+ static void* mDl;
+ JpegStub mStub;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_JPEG_COMPRESSOR_H */
diff --git a/v3/JpegStub.cpp b/v3/JpegStub.cpp
new file mode 100644
index 0000000..084f5fc
--- a/dev/null
+++ b/v3/JpegStub.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_JPEGStub"
+#include <errno.h>
+#include <cutils/log.h>
+#include <YuvToJpegEncoder.h>
+
+#include "JpegStub.h"
+
+
+extern "C" void JpegStub_init(JpegStub* stub, int* strides) {
+ stub->mInternalEncoder = (void*) new Yuv420SpToJpegEncoder(strides);
+ stub->mInternalStream = (void*)new SkDynamicMemoryWStream();
+}
+
+extern "C" void JpegStub_cleanup(JpegStub* stub) {
+ delete((Yuv420SpToJpegEncoder*)stub->mInternalEncoder);
+ delete((SkDynamicMemoryWStream*)stub->mInternalStream);
+}
+
+extern "C" int JpegStub_compress(JpegStub* stub, const void* image,
+ int width, int height, int quality)
+{
+ void* pY = const_cast<void*>(image);
+ int offsets[2];
+ offsets[0] = 0;
+ offsets[1] = width * height;
+
+ Yuv420SpToJpegEncoder* encoder =
+ (Yuv420SpToJpegEncoder*)stub->mInternalEncoder;
+ SkDynamicMemoryWStream* stream =
+ (SkDynamicMemoryWStream*)stub->mInternalStream;
+ if (encoder->encode(stream, pY, width, height, offsets, quality)) {
+ ALOGV("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes",
+ __FUNCTION__, (width * height * 12) / 8,
+ width, height, stream->getOffset());
+ return 0;
+ } else {
+ ALOGE("%s: JPEG compression failed", __FUNCTION__);
+ return errno ? errno: EINVAL;
+ }
+}
+
+extern "C" void JpegStub_getCompressedImage(JpegStub* stub, void* buff) {
+ SkDynamicMemoryWStream* stream =
+ (SkDynamicMemoryWStream*)stub->mInternalStream;
+ stream->copyTo(buff);
+}
+
+extern "C" size_t JpegStub_getCompressedSize(JpegStub* stub) {
+ SkDynamicMemoryWStream* stream =
+ (SkDynamicMemoryWStream*)stub->mInternalStream;
+ return stream->getOffset();
+}
diff --git a/v3/JpegStub.h b/v3/JpegStub.h
new file mode 100644
index 0000000..ad00a54
--- a/dev/null
+++ b/v3/JpegStub.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JPEGSTUB_H_
+#define JPEGSTUB_H_
+
+extern "C" {
+
+struct JpegStub {
+ void* mInternalEncoder;
+ void* mInternalStream;
+};
+
+void JpegStub_init(JpegStub* stub, int* strides);
+void JpegStub_cleanup(JpegStub* stub);
+int JpegStub_compress(JpegStub* stub, const void* image,
+ int width, int height, int quality);
+void JpegStub_getCompressedImage(JpegStub* stub, void* buff);
+size_t JpegStub_getCompressedSize(JpegStub* stub);
+
+};
+#endif // JPEGSTUB_H_
diff --git a/v3/MCameraParameters.cpp b/v3/MCameraParameters.cpp
new file mode 100644
index 0000000..b8ab05d
--- a/dev/null
+++ b/v3/MCameraParameters.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MCAMERAPARAMETERS__
+#define __MCAMERAPARAMETERS__
+
+#define LOG_TAG "MCameraParameters"
+
+#include <cutils/properties.h>
+#include <math.h>
+#include <utils/Errors.h>
+#include <string.h>
+#include <stdlib.h>
+#include "MCameraParameters.h"
+
+/*===========================================================================
+ * * FUNCTION : MCameraParameters
+ * *
+ * * DESCRIPTION: constructor of MCameraParameters
+ * *
+ * * PARAMETERS : none
+ * *
+ * * RETURN : None
+ * *==========================================================================*/
+MCameraParameters::MCameraParameters()
+ : CameraParameters(),mFd(-1),
+{
+}
+
+
+/*===========================================================================
+ * * FUNCTION : ~MCameraParameters
+ * *
+ * * DESCRIPTION: deconstructor of MCameraParameters
+ * *
+ * * PARAMETERS : String8
+ * *
+ * * RETURN : None
+ * *==========================================================================*/
+MCameraParameters::MCameraParameters(const String8 &params)
+ : CameraParameters(params), mFd(-1)
+{
+}
+
+/*===========================================================================
+ * * FUNCTION : ~MCameraParameters
+ * *
+ * * DESCRIPTION: deconstructor of MCameraParameters
+ * *
+ * * PARAMETERS : String8, fd
+ * *
+ * * RETURN : None
+ * *==========================================================================*/
+MCameraParameters::MCameraParameters(const String8 &params, int fd)
+ : CameraParameters(params), mFd(fd)
+{
+}
+
+
+
+
+/*===========================================================================
+ * * FUNCTION : ~MCameraParameters
+ * *
+ * * DESCRIPTION: deconstructor of MCameraParameters
+ * *
+ * * PARAMETERS : none
+ * *
+ * * RETURN : None
+ * *==========================================================================*/
+MCameraParameters::~MCameraParameters()
+{
+
+}
+#endif
diff --git a/v3/PreviewWindow.cpp b/v3/PreviewWindow.cpp
new file mode 100755
index 0000000..4101ed9
--- a/dev/null
+++ b/v3/PreviewWindow.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class PreviewWindow that encapsulates
+ * functionality of a preview window set via set_preview_window camera HAL API.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Preview"
+#include <cutils/log.h>
+#include <ui/Rect.h>
+#include <ui/GraphicBufferMapper.h>
+#include "EmulatedCameraDevice.h"
+#include "PreviewWindow.h"
+
+namespace android {
+
+PreviewWindow::PreviewWindow()
+ : mPreviewWindow(NULL),
+ mLastPreviewed(0),
+ mPreviewFrameWidth(0),
+ mPreviewFrameHeight(0),
+ mPreviewEnabled(false)
+{
+}
+
+PreviewWindow::~PreviewWindow()
+{
+}
+
+/****************************************************************************
+ * Camera API
+ ***************************************************************************/
+
+status_t PreviewWindow::setPreviewWindow(struct preview_stream_ops* window,
+ int preview_fps)
+{
+ ALOGV("%s: current: %p -> new: %p", __FUNCTION__, mPreviewWindow, window);
+
+ status_t res = NO_ERROR;
+ Mutex::Autolock locker(&mObjectLock);
+
+ /* Reset preview info. */
+ mPreviewFrameWidth = mPreviewFrameHeight = 0;
+ mPreviewAfter = 0;
+ mLastPreviewed = 0;
+
+ if (window != NULL) {
+ /* The CPU will write each frame to the preview window buffer.
+ * Note that we delay setting preview window buffer geometry until
+ * frames start to come in. */
+ res = window->set_usage(window, GRALLOC_USAGE_SW_WRITE_OFTEN);
+ if (res == NO_ERROR) {
+ /* Set preview frequency. */
+ mPreviewAfter = 1000000 / preview_fps;
+ } else {
+ window = NULL;
+ res = -res; // set_usage returns a negative errno.
+ ALOGE("%s: Error setting preview window usage %d -> %s",
+ __FUNCTION__, res, strerror(res));
+ }
+ }
+ mPreviewWindow = window;
+
+ return res;
+}
+
+status_t PreviewWindow::startPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mPreviewEnabled = true;
+
+ return NO_ERROR;
+}
+
+void PreviewWindow::stopPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mPreviewEnabled = false;
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+void PreviewWindow::onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev)
+{
+ int res;
+ Mutex::Autolock locker(&mObjectLock);
+
+ if (!isPreviewEnabled() || mPreviewWindow == NULL || !isPreviewTime()) {
+ return;
+ }
+
+ /* Make sure that preview window dimensions are OK with the camera device */
+ if (adjustPreviewDimensions(camera_dev)) {
+ /* Need to set / adjust buffer geometry for the preview window.
+ * Note that in the emulator preview window uses only RGB for pixel
+ * formats. */
+ ALOGV("%s: Adjusting preview windows %p geometry to %dx%d",
+ __FUNCTION__, mPreviewWindow, mPreviewFrameWidth,
+ mPreviewFrameHeight);
+ res = mPreviewWindow->set_buffers_geometry(mPreviewWindow,
+ mPreviewFrameWidth,
+ mPreviewFrameHeight,
+ HAL_PIXEL_FORMAT_RGBA_8888);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Error in set_buffers_geometry %d -> %s",
+ __FUNCTION__, -res, strerror(-res));
+ return;
+ }
+ }
+
+ /*
+ * Push new frame to the preview window.
+ */
+
+ /* Dequeue preview window buffer for the frame. */
+ buffer_handle_t* buffer = NULL;
+ int stride = 0;
+ res = mPreviewWindow->dequeue_buffer(mPreviewWindow, &buffer, &stride);
+ if (res != NO_ERROR || buffer == NULL) {
+ ALOGE("%s: Unable to dequeue preview window buffer: %d -> %s",
+ __FUNCTION__, -res, strerror(-res));
+ return;
+ }
+
+ /* Let the preview window to lock the buffer. */
+ res = mPreviewWindow->lock_buffer(mPreviewWindow, buffer);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to lock preview window buffer: %d -> %s",
+ __FUNCTION__, -res, strerror(-res));
+ mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+ return;
+ }
+
+ /* Now let the graphics framework to lock the buffer, and provide
+ * us with the framebuffer data address. */
+ void* img = NULL;
+ const Rect rect(mPreviewFrameWidth, mPreviewFrameHeight);
+ GraphicBufferMapper& grbuffer_mapper(GraphicBufferMapper::get());
+ res = grbuffer_mapper.lock(*buffer, GRALLOC_USAGE_SW_WRITE_OFTEN, rect, &img);
+ if (res != NO_ERROR) {
+ ALOGE("%s: grbuffer_mapper.lock failure: %d -> %s",
+ __FUNCTION__, res, strerror(res));
+ mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+ return;
+ }
+
+ /* Frames come in in YV12/NV12/NV21 format. Since preview window doesn't
+ * supports those formats, we need to obtain the frame in RGB565. */
+ res = camera_dev->getCurrentPreviewFrame(img);
+ if (res == NO_ERROR) {
+ /* Show it. */
+ mPreviewWindow->set_timestamp(mPreviewWindow, timestamp);
+ mPreviewWindow->enqueue_buffer(mPreviewWindow, buffer);
+ } else {
+ ALOGE("%s: Unable to obtain preview frame: %d", __FUNCTION__, res);
+ mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+ }
+ grbuffer_mapper.unlock(*buffer);
+}
+
+/***************************************************************************
+ * Private API
+ **************************************************************************/
+
+bool PreviewWindow::adjustPreviewDimensions(EmulatedCameraDevice* camera_dev)
+{
+ /* Match the cached frame dimensions against the actual ones. */
+ if (mPreviewFrameWidth == camera_dev->getFrameWidth() &&
+ mPreviewFrameHeight == camera_dev->getFrameHeight()) {
+ /* They match. */
+ return false;
+ }
+
+ /* They don't match: adjust the cache. */
+ mPreviewFrameWidth = camera_dev->getFrameWidth();
+ mPreviewFrameHeight = camera_dev->getFrameHeight();
+
+ return true;
+}
+
+bool PreviewWindow::isPreviewTime()
+{
+ timeval cur_time;
+ gettimeofday(&cur_time, NULL);
+ const uint64_t cur_mks = cur_time.tv_sec * 1000000LL + cur_time.tv_usec;
+ if ((cur_mks - mLastPreviewed) >= mPreviewAfter) {
+ mLastPreviewed = cur_mks;
+ return true;
+ }
+ return false;
+}
+
+}; /* namespace android */
diff --git a/v3/PreviewWindow.h b/v3/PreviewWindow.h
new file mode 100755
index 0000000..d037c95
--- a/dev/null
+++ b/v3/PreviewWindow.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_PREVIEW_WINDOW_H
+#define HW_EMULATOR_CAMERA_PREVIEW_WINDOW_H
+
+/*
+ * Contains declaration of a class PreviewWindow that encapsulates functionality
+ * of a preview window set via set_preview_window camera HAL API.
+ */
+
+namespace android {
+
+class EmulatedCameraDevice;
+
+/* Encapsulates functionality of a preview window set via set_preview_window
+ * camera HAL API.
+ *
+ * Objects of this class are contained in EmulatedCamera objects, and handle
+ * relevant camera API callbacks.
+ */
+class PreviewWindow {
+public:
+ /* Constructs PreviewWindow instance. */
+ PreviewWindow();
+
+ /* Destructs PreviewWindow instance. */
+ ~PreviewWindow();
+
+ /***************************************************************************
+ * Camera API
+ **************************************************************************/
+
+public:
+ /* Actual handler for camera_device_ops_t::set_preview_window callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::set_preview_window callback.
+ * Param:
+ * window - Preview window to set. This parameter might be NULL, which
+ * indicates preview window reset.
+ * preview_fps - Preview's frame frequency. This parameter determins when
+ * a frame received via onNextFrameAvailable call will be pushed to
+ * the preview window. If 'window' parameter passed to this method is
+ * NULL, this parameter is ignored.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t setPreviewWindow(struct preview_stream_ops* window,
+ int preview_fps);
+
+ /* Starts the preview.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::start_preview callback.
+ */
+ status_t startPreview();
+
+ /* Stops the preview.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::start_preview callback.
+ */
+ void stopPreview();
+
+ /* Checks if preview is enabled. */
+ inline bool isPreviewEnabled()
+ {
+ return mPreviewEnabled;
+ }
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Next frame is available in the camera device.
+ * This is a notification callback that is invoked by the camera device when
+ * a new frame is available.
+ * Note that most likely this method is called in context of a worker thread
+ * that camera device has created for frame capturing.
+ * Param:
+ * frame - Captured frame, or NULL if camera device didn't pull the frame
+ * yet. If NULL is passed in this parameter use GetCurrentFrame method
+ * of the camera device class to obtain the next frame. Also note that
+ * the size of the frame that is passed here (as well as the frame
+ * returned from the GetCurrentFrame method) is defined by the current
+ * frame settings (width + height + pixel format) for the camera device.
+ * timestamp - Frame's timestamp.
+ * camera_dev - Camera device instance that delivered the frame.
+ */
+ void onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev);
+
+ /***************************************************************************
+ * Private API
+ **************************************************************************/
+
+protected:
+ /* Adjusts cached dimensions of the preview window frame according to the
+ * frame dimensions used by the camera device.
+ *
+ * When preview is started, it's not known (hard to define) what are going
+ * to be the dimensions of the frames that are going to be displayed. Plus,
+ * it might be possible, that such dimensions can be changed on the fly. So,
+ * in order to be always in sync with frame dimensions, this method is
+ * called for each frame passed to onNextFrameAvailable method, in order to
+ * properly adjust frame dimensions, used by the preview window.
+ * Note that this method must be called while object is locked.
+ * Param:
+ * camera_dev - Camera device, prpviding frames displayed in the preview
+ * window.
+ * Return:
+ * true if cached dimensions have been adjusted, or false if cached
+ * dimensions match device's frame dimensions.
+ */
+ bool adjustPreviewDimensions(EmulatedCameraDevice* camera_dev);
+
+ /* Checks if it's the time to push new frame to the preview window.
+ * Note that this method must be called while object is locked. */
+ bool isPreviewTime();
+
+ /***************************************************************************
+ * Data members
+ **************************************************************************/
+
+protected:
+ /* Locks this instance for data changes. */
+ Mutex mObjectLock;
+
+ /* Preview window instance. */
+ preview_stream_ops* mPreviewWindow;
+
+ /* Timestamp (abs. microseconds) when last frame has been pushed to the
+ * preview window. */
+ uint64_t mLastPreviewed;
+
+ /* Preview frequency in microseconds. */
+ uint32_t mPreviewAfter;
+
+ /*
+ * Cached preview window frame dimensions.
+ */
+
+ int mPreviewFrameWidth;
+ int mPreviewFrameHeight;
+
+ /* Preview status. */
+ bool mPreviewEnabled;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_PREVIEW_WINDOW_H */
diff --git a/v3/QemuClient.cpp b/v3/QemuClient.cpp
new file mode 100755
index 0000000..111cbb8
--- a/dev/null
+++ b/v3/QemuClient.cpp
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of classes that encapsulate connection to camera
+ * services in the emulator via qemu pipe.
+ */
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "EmulatedCamera_QemuClient"
+#include <cutils/log.h>
+#include "EmulatedCamera.h"
+#include "QemuClient.h"
+
+#define LOG_QUERIES 0
+#if LOG_QUERIES
+#define LOGQ(...) ALOGD(__VA_ARGS__)
+#else
+#define LOGQ(...) (void(0))
+
+#endif // LOG_QUERIES
+namespace android {
+
+/****************************************************************************
+ * Qemu query
+ ***************************************************************************/
+
+QemuQuery::QemuQuery()
+ : mQuery(mQueryPrealloc),
+ mQueryDeliveryStatus(NO_ERROR),
+ mReplyBuffer(NULL),
+ mReplyData(NULL),
+ mReplySize(0),
+ mReplyDataSize(0),
+ mReplyStatus(0)
+{
+ *mQuery = '\0';
+}
+
+QemuQuery::QemuQuery(const char* query_string)
+ : mQuery(mQueryPrealloc),
+ mQueryDeliveryStatus(NO_ERROR),
+ mReplyBuffer(NULL),
+ mReplyData(NULL),
+ mReplySize(0),
+ mReplyDataSize(0),
+ mReplyStatus(0)
+{
+ mQueryDeliveryStatus = QemuQuery::createQuery(query_string, NULL);
+}
+
+QemuQuery::QemuQuery(const char* query_name, const char* query_param)
+ : mQuery(mQueryPrealloc),
+ mQueryDeliveryStatus(NO_ERROR),
+ mReplyBuffer(NULL),
+ mReplyData(NULL),
+ mReplySize(0),
+ mReplyDataSize(0),
+ mReplyStatus(0)
+{
+ mQueryDeliveryStatus = QemuQuery::createQuery(query_name, query_param);
+}
+
+QemuQuery::~QemuQuery()
+{
+ QemuQuery::resetQuery();
+}
+
+status_t QemuQuery::createQuery(const char* name, const char* param)
+{
+ /* Reset from the previous use. */
+ resetQuery();
+
+ /* Query name cannot be NULL or an empty string. */
+ if (name == NULL || *name == '\0') {
+ ALOGE("%s: NULL or an empty string is passed as query name.",
+ __FUNCTION__);
+ mQueryDeliveryStatus = EINVAL;
+ return EINVAL;
+ }
+
+ const size_t name_len = strlen(name);
+ const size_t param_len = (param != NULL) ? strlen(param) : 0;
+ const size_t required = strlen(name) + (param_len ? (param_len + 2) : 1);
+
+ if (required > sizeof(mQueryPrealloc)) {
+ /* Preallocated buffer was too small. Allocate a bigger query buffer. */
+ mQuery = new char[required];
+ if (mQuery == NULL) {
+ ALOGE("%s: Unable to allocate %zu bytes for query buffer",
+ __FUNCTION__, required);
+ mQueryDeliveryStatus = ENOMEM;
+ return ENOMEM;
+ }
+ }
+
+ /* At this point mQuery buffer is big enough for the query. */
+ if (param_len) {
+ sprintf(mQuery, "%s %s", name, param);
+ } else {
+ memcpy(mQuery, name, name_len + 1);
+ }
+
+ return NO_ERROR;
+}
+
+status_t QemuQuery::completeQuery(status_t status)
+{
+ /* Save query completion status. */
+ mQueryDeliveryStatus = status;
+ if (mQueryDeliveryStatus != NO_ERROR) {
+ return mQueryDeliveryStatus;
+ }
+
+ /* Make sure reply buffer contains at least 'ok', or 'ko'.
+ * Note that 'ok', or 'ko' prefixes are always 3 characters long: in case
+ * there are more data in the reply, that data will be separated from 'ok'/'ko'
+ * with a ':'. If there is no more data in the reply, the prefix will be
+ * zero-terminated, and the terminator will be inculded in the reply. */
+ if (mReplyBuffer == NULL || mReplySize < 3) {
+ ALOGE("%s: Invalid reply to the query", __FUNCTION__);
+ mQueryDeliveryStatus = EINVAL;
+ return EINVAL;
+ }
+
+ /* Lets see the reply status. */
+ if (!memcmp(mReplyBuffer, "ok", 2)) {
+ mReplyStatus = 1;
+ } else if (!memcmp(mReplyBuffer, "ko", 2)) {
+ mReplyStatus = 0;
+ } else {
+ ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+ mQueryDeliveryStatus = EINVAL;
+ return EINVAL;
+ }
+
+ /* Lets see if there are reply data that follow. */
+ if (mReplySize > 3) {
+ /* There are extra data. Make sure they are separated from the status
+ * with a ':' */
+ if (mReplyBuffer[2] != ':') {
+ ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+ mQueryDeliveryStatus = EINVAL;
+ return EINVAL;
+ }
+ mReplyData = mReplyBuffer + 3;
+ mReplyDataSize = mReplySize - 3;
+ } else {
+ /* Make sure reply buffer containing just 'ok'/'ko' ends with
+ * zero-terminator. */
+ if (mReplyBuffer[2] != '\0') {
+ ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+ mQueryDeliveryStatus = EINVAL;
+ return EINVAL;
+ }
+ }
+
+ return NO_ERROR;
+}
+
+void QemuQuery::resetQuery()
+{
+ if (mQuery != NULL && mQuery != mQueryPrealloc) {
+ delete[] mQuery;
+ }
+ mQuery = mQueryPrealloc;
+ mQueryDeliveryStatus = NO_ERROR;
+ if (mReplyBuffer != NULL) {
+ free(mReplyBuffer);
+ mReplyBuffer = NULL;
+ }
+ mReplyData = NULL;
+ mReplySize = mReplyDataSize = 0;
+ mReplyStatus = 0;
+}
+
+/****************************************************************************
+ * Qemu client base
+ ***************************************************************************/
+
+/* Camera service name. */
+const char QemuClient::mCameraServiceName[] = "camera";
+
+QemuClient::QemuClient()
+ : mPipeFD(-1)
+{
+}
+
+QemuClient::~QemuClient()
+{
+ if (mPipeFD >= 0) {
+ close(mPipeFD);
+ }
+}
+
+/****************************************************************************
+ * Qemu client API
+ ***************************************************************************/
+
+status_t QemuClient::connectClient(const char* param)
+{
+ ALOGV("%s: '%s'", __FUNCTION__, param ? param : "");
+
+ /* Make sure that client is not connected already. */
+ if (mPipeFD >= 0) {
+ ALOGE("%s: Qemu client is already connected", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Select one of the two: 'factory', or 'emulated camera' service */
+ if (param == NULL || *param == '\0') {
+ /* No parameters: connect to the factory service. */
+ char pipe_name[512];
+ snprintf(pipe_name, sizeof(pipe_name), "qemud:%s", mCameraServiceName);
+ mPipeFD = qemu_pipe_open(pipe_name);
+ } else {
+ /* One extra char ':' that separates service name and parameters + six
+ * characters for 'qemud:'. This is required by qemu pipe protocol. */
+ char* connection_str = new char[strlen(mCameraServiceName) +
+ strlen(param) + 8];
+ sprintf(connection_str, "qemud:%s:%s", mCameraServiceName, param);
+
+ mPipeFD = qemu_pipe_open(connection_str);
+ delete[] connection_str;
+ }
+ if (mPipeFD < 0) {
+ ALOGE("%s: Unable to connect to the camera service '%s': %s",
+ __FUNCTION__, param ? param : "Factory", strerror(errno));
+ return errno ? errno : EINVAL;
+ }
+
+ return NO_ERROR;
+}
+
+void QemuClient::disconnectClient()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (mPipeFD >= 0) {
+ close(mPipeFD);
+ mPipeFD = -1;
+ }
+}
+
+status_t QemuClient::sendMessage(const void* data, size_t data_size)
+{
+ if (mPipeFD < 0) {
+ ALOGE("%s: Qemu client is not connected", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Note that we don't use here qemud_client_send, since with qemu pipes we
+ * don't need to provide payload size prior to payload when we're writing to
+ * the pipe. So, we can use simple write, and qemu pipe will take care of the
+ * rest, calling the receiving end with the number of bytes transferred. */
+ const size_t written = qemud_fd_write(mPipeFD, data, data_size);
+ if (written == data_size) {
+ return NO_ERROR;
+ } else {
+ ALOGE("%s: Error sending data via qemu pipe: '%s'",
+ __FUNCTION__, strerror(errno));
+ return errno ? errno : EIO;
+ }
+}
+
+status_t QemuClient::receiveMessage(void** data, size_t* data_size)
+{
+ *data = NULL;
+ *data_size = 0;
+
+ if (mPipeFD < 0) {
+ ALOGE("%s: Qemu client is not connected", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* The way the service replies to a query, it sends payload size first, and
+ * then it sends the payload itself. Note that payload size is sent as a
+ * string, containing 8 characters representing a hexadecimal payload size
+ * value. Note also, that the string doesn't contain zero-terminator. */
+ size_t payload_size;
+ char payload_size_str[9];
+ int rd_res = qemud_fd_read(mPipeFD, payload_size_str, 8);
+ if (rd_res != 8) {
+ ALOGE("%s: Unable to obtain payload size: %s",
+ __FUNCTION__, strerror(errno));
+ return errno ? errno : EIO;
+ }
+
+ /* Convert payload size. */
+ errno = 0;
+ payload_size_str[8] = '\0';
+ payload_size = strtol(payload_size_str, NULL, 16);
+ if (errno) {
+ ALOGE("%s: Invalid payload size '%s'", __FUNCTION__, payload_size_str);
+ return EIO;
+ }
+
+ /* Allocate payload data buffer, and read the payload there. */
+ *data = malloc(payload_size);
+ if (*data == NULL) {
+ ALOGE("%s: Unable to allocate %zu bytes payload buffer",
+ __FUNCTION__, payload_size);
+ return ENOMEM;
+ }
+ rd_res = qemud_fd_read(mPipeFD, *data, payload_size);
+ if (static_cast<size_t>(rd_res) == payload_size) {
+ *data_size = payload_size;
+ return NO_ERROR;
+ } else {
+ ALOGE("%s: Read size %d doesnt match expected payload size %zu: %s",
+ __FUNCTION__, rd_res, payload_size, strerror(errno));
+ free(*data);
+ *data = NULL;
+ return errno ? errno : EIO;
+ }
+}
+
+status_t QemuClient::doQuery(QemuQuery* query)
+{
+ /* Make sure that query has been successfuly constructed. */
+ if (query->mQueryDeliveryStatus != NO_ERROR) {
+ ALOGE("%s: Query is invalid", __FUNCTION__);
+ return query->mQueryDeliveryStatus;
+ }
+
+ LOGQ("Send query '%s'", query->mQuery);
+
+ /* Send the query. */
+ status_t res = sendMessage(query->mQuery, strlen(query->mQuery) + 1);
+ if (res == NO_ERROR) {
+ /* Read the response. */
+ res = receiveMessage(reinterpret_cast<void**>(&query->mReplyBuffer),
+ &query->mReplySize);
+ if (res == NO_ERROR) {
+ LOGQ("Response to query '%s': Status = '%.2s', %d bytes in response",
+ query->mQuery, query->mReplyBuffer, query->mReplySize);
+ } else {
+ ALOGE("%s Response to query '%s' has failed: %s",
+ __FUNCTION__, query->mQuery, strerror(res));
+ }
+ } else {
+ ALOGE("%s: Send query '%s' failed: %s",
+ __FUNCTION__, query->mQuery, strerror(res));
+ }
+
+ /* Complete the query, and return its completion handling status. */
+ const status_t res1 = query->completeQuery(res);
+ ALOGE_IF(res1 != NO_ERROR && res1 != res,
+ "%s: Error %d in query '%s' completion",
+ __FUNCTION__, res1, query->mQuery);
+ return res1;
+}
+
+/****************************************************************************
+ * Qemu client for the 'factory' service.
+ ***************************************************************************/
+
+/*
+ * Factory service queries.
+ */
+
+/* Queries list of cameras connected to the host. */
+const char FactoryQemuClient::mQueryList[] = "list";
+
+FactoryQemuClient::FactoryQemuClient()
+ : QemuClient()
+{
+}
+
+FactoryQemuClient::~FactoryQemuClient()
+{
+}
+
+status_t FactoryQemuClient::listCameras(char** list)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ QemuQuery query(mQueryList);
+ if (doQuery(&query) || !query.isQuerySucceeded()) {
+ ALOGE("%s: List cameras query failed: %s", __FUNCTION__,
+ query.mReplyData ? query.mReplyData : "No error message");
+ return query.getCompletionStatus();
+ }
+
+ /* Make sure there is a list returned. */
+ if (query.mReplyDataSize == 0) {
+ ALOGE("%s: No camera list is returned.", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Copy the list over. */
+ *list = (char*)malloc(query.mReplyDataSize);
+ if (*list != NULL) {
+ memcpy(*list, query.mReplyData, query.mReplyDataSize);
+ ALOGD("Emulated camera list: %s", *list);
+ return NO_ERROR;
+ } else {
+ ALOGE("%s: Unable to allocate %zu bytes",
+ __FUNCTION__, query.mReplyDataSize);
+ return ENOMEM;
+ }
+}
+
+/****************************************************************************
+ * Qemu client for an 'emulated camera' service.
+ ***************************************************************************/
+
+/*
+ * Emulated camera queries
+ */
+
+/* Connect to the camera device. */
+const char CameraQemuClient::mQueryConnect[] = "connect";
+/* Disconect from the camera device. */
+const char CameraQemuClient::mQueryDisconnect[] = "disconnect";
+/* Start capturing video from the camera device. */
+const char CameraQemuClient::mQueryStart[] = "start";
+/* Stop capturing video from the camera device. */
+const char CameraQemuClient::mQueryStop[] = "stop";
+/* Get next video frame from the camera device. */
+const char CameraQemuClient::mQueryFrame[] = "frame";
+
+CameraQemuClient::CameraQemuClient()
+ : QemuClient()
+{
+}
+
+CameraQemuClient::~CameraQemuClient()
+{
+
+}
+
+status_t CameraQemuClient::queryConnect()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ QemuQuery query(mQueryConnect);
+ doQuery(&query);
+ const status_t res = query.getCompletionStatus();
+ ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
+ __FUNCTION__, query.mReplyData ? query.mReplyData :
+ "No error message");
+ return res;
+}
+
+status_t CameraQemuClient::queryDisconnect()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ QemuQuery query(mQueryDisconnect);
+ doQuery(&query);
+ const status_t res = query.getCompletionStatus();
+ ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
+ __FUNCTION__, query.mReplyData ? query.mReplyData :
+ "No error message");
+ return res;
+}
+
+status_t CameraQemuClient::queryStart(uint32_t pixel_format,
+ int width,
+ int height)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ char query_str[256];
+ snprintf(query_str, sizeof(query_str), "%s dim=%dx%d pix=%d",
+ mQueryStart, width, height, pixel_format);
+ QemuQuery query(query_str);
+ doQuery(&query);
+ const status_t res = query.getCompletionStatus();
+ ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
+ __FUNCTION__, query.mReplyData ? query.mReplyData :
+ "No error message");
+ return res;
+}
+
+status_t CameraQemuClient::queryStop()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ QemuQuery query(mQueryStop);
+ doQuery(&query);
+ const status_t res = query.getCompletionStatus();
+ ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
+ __FUNCTION__, query.mReplyData ? query.mReplyData :
+ "No error message");
+ return res;
+}
+
+status_t CameraQemuClient::queryFrame(void* vframe,
+ void* pframe,
+ size_t vframe_size,
+ size_t pframe_size,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exposure_comp)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ char query_str[256];
+ snprintf(query_str, sizeof(query_str), "%s video=%zu preview=%zu whiteb=%g,%g,%g expcomp=%g",
+ mQueryFrame, (vframe && vframe_size) ? vframe_size : 0,
+ (pframe && pframe_size) ? pframe_size : 0, r_scale, g_scale, b_scale,
+ exposure_comp);
+ QemuQuery query(query_str);
+ doQuery(&query);
+ const status_t res = query.getCompletionStatus();
+ if( res != NO_ERROR) {
+ ALOGE("%s: Query failed: %s",
+ __FUNCTION__, query.mReplyData ? query.mReplyData :
+ "No error message");
+ return res;
+ }
+
+ /* Copy requested frames. */
+ size_t cur_offset = 0;
+ const uint8_t* frame = reinterpret_cast<const uint8_t*>(query.mReplyData);
+ /* Video frame is always first. */
+ if (vframe != NULL && vframe_size != 0) {
+ /* Make sure that video frame is in. */
+ if ((query.mReplyDataSize - cur_offset) >= vframe_size) {
+ memcpy(vframe, frame, vframe_size);
+ cur_offset += vframe_size;
+ } else {
+ ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes video frame",
+ __FUNCTION__, query.mReplyDataSize - cur_offset, vframe_size);
+ return EINVAL;
+ }
+ }
+ if (pframe != NULL && pframe_size != 0) {
+ /* Make sure that preview frame is in. */
+ if ((query.mReplyDataSize - cur_offset) >= pframe_size) {
+ memcpy(pframe, frame + cur_offset, pframe_size);
+ cur_offset += pframe_size;
+ } else {
+ ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes preview frame",
+ __FUNCTION__, query.mReplyDataSize - cur_offset, pframe_size);
+ return EINVAL;
+ }
+ }
+
+ return NO_ERROR;
+}
+
+}; /* namespace android */
diff --git a/v3/QemuClient.h b/v3/QemuClient.h
new file mode 100755
index 0000000..1644321
--- a/dev/null
+++ b/v3/QemuClient.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_QEMU_CLIENT_H
+#define HW_EMULATOR_CAMERA_QEMU_CLIENT_H
+
+/*
+ * Contains declaration of classes that encapsulate connection to camera services
+ * in the emulator via qemu pipe.
+ */
+
+#include <hardware/qemud.h>
+
+namespace android {
+
+/****************************************************************************
+ * Qemu query
+ ***************************************************************************/
+
+/* Encapsulates a query to the emulator.
+ * Guest exchanges data with the emulator via queries sent over the qemu pipe.
+ * The queries as well as replies to the queries are all strings (except for the
+ * 'frame' query where reply is a framebuffer).
+ * Each query is formatted as such:
+ *
+ * "<query name>[ <parameters>]",
+ *
+ * where <query name> is a string representing query name, and <parameters> are
+ * optional parameters for the query. If parameters are present, they must be
+ * separated from the query name with a single space, and they must be formatted
+ * as such:
+ *
+ * "<name1>=<value1> <name2>=<value2> ... <nameN>=<valueN>"
+ *
+ * I.e.:
+ * - Every parameter must have a name, and a value.
+ * - Name and value must be separated with '='.
+ * - No spaces are allowed around '=' separating name and value.
+ * - Parameters must be separated with a single space character.
+ * - No '=' character is allowed in name and in value.
+ *
+ * There are certain restrictions on strings used in the query:
+ * - Spaces are allowed only as separators.
+ * - '=' are allowed only to divide parameter names from parameter values.
+ *
+ * Emulator replies to each query in two chunks:
+ * - 8 bytes encoding the payload size as a string containing hexadecimal
+ * representation of the payload size value. This is done in order to simplify
+ * dealing with different endianness on the host, and on the guest.
+ * - Payload, whose size is defined by the first chunk.
+ *
+ * Every payload always begins with two characters, encoding the result of the
+ * query:
+ * - 'ok' Encoding the success
+ * - 'ko' Encoding a failure.
+ * After that payload may have optional data. If payload has more data following
+ * the query result, there is a ':' character separating them. If payload carries
+ * only the result, it always ends with a zero-terminator. So, payload 'ok'/'ko'
+ * prefix is always 3 bytes long: it either includes a zero-terminator, if there
+ * is no data, or a ':' separator.
+ */
+class QemuQuery {
+public:
+ /* Constructs an uninitialized QemuQuery instance. */
+ QemuQuery();
+
+ /* Constructs and initializes QemuQuery instance for a query.
+ * Param:
+ * query_string - Query string. This constructor can also be used to
+ * construct a query that doesn't have parameters. In this case query
+ * name can be passed as a parameter here.
+ */
+ explicit QemuQuery(const char* query_string);
+
+ /* Constructs and initializes QemuQuery instance for a query with parameters.
+ * Param:
+ * query_name - Query name.
+ * query_param - Query parameters. Can be NULL.
+ */
+ QemuQuery(const char* query_name, const char* query_param);
+
+ /* Destructs QemuQuery instance. */
+ ~QemuQuery();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+ /* Creates new query.
+ * Note: this method will reset this instance prior to creating a new query
+ * in order to discard possible "leftovers" from the previous query.
+ * Param:
+ * query_name - Query name.
+ * query_param - Query parameters. Can be NULL.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t createQuery(const char* name, const char* param);
+
+ /* Completes the query after a reply from the emulator.
+ * This method will parse the reply buffer, and calculate the final query
+ * status, which depends not only on the transport success / failure, but
+ * also on 'ok' / 'ko' in the reply buffer.
+ * Param:
+ * status - Query delivery status. This status doesn't necessarily reflects
+ * the final query status (which is defined by 'ok'/'ko' prefix in the
+ * reply buffer). This status simply states whether or not the query has
+ * been sent, and a reply has been received successfuly. However, if
+ * this status indicates a failure, it means that the entire query has
+ * failed.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure. Note that
+ * status returned here just signals whether or not the method has succeeded.
+ * Use isQuerySucceeded() / getCompletionStatus() methods of this class to
+ * check the final query status.
+ */
+ status_t completeQuery(status_t status);
+
+ /* Resets the query from a previous use. */
+ void resetQuery();
+
+ /* Checks if query has succeeded.
+ * Note that this method must be called after completeQuery() method of this
+ * class has been executed.
+ */
+ inline bool isQuerySucceeded() const {
+ return mQueryDeliveryStatus == NO_ERROR && mReplyStatus != 0;
+ }
+
+ /* Gets final completion status of the query.
+ * Note that this method must be called after completeQuery() method of this
+ * class has been executed.
+ * Return:
+ * NO_ERROR if query has succeeded, or an appropriate error status on query
+ * failure.
+ */
+ inline status_t getCompletionStatus() const {
+ if (mQueryDeliveryStatus == NO_ERROR) {
+ if (mReplyStatus) {
+ return NO_ERROR;
+ } else {
+ return EINVAL;
+ }
+ } else {
+ return mQueryDeliveryStatus;
+ }
+ }
+
+ /****************************************************************************
+ * Public data memebers
+ ***************************************************************************/
+
+public:
+ /* Query string. */
+ char* mQuery;
+ /* Query delivery status. */
+ status_t mQueryDeliveryStatus;
+ /* Reply buffer */
+ char* mReplyBuffer;
+ /* Reply data (past 'ok'/'ko'). If NULL, there were no data in reply. */
+ char* mReplyData;
+ /* Reply buffer size. */
+ size_t mReplySize;
+ /* Reply data size. */
+ size_t mReplyDataSize;
+ /* Reply status: 1 - ok, 0 - ko. */
+ int mReplyStatus;
+
+ /****************************************************************************
+ * Private data memebers
+ ***************************************************************************/
+
+protected:
+ /* Preallocated buffer for small queries. */
+ char mQueryPrealloc[256];
+};
+
+/****************************************************************************
+ * Qemu client base
+ ***************************************************************************/
+
+/* Encapsulates a connection to the 'camera' service in the emulator via qemu
+ * pipe.
+ */
+class QemuClient {
+public:
+ /* Constructs QemuClient instance. */
+ QemuClient();
+
+ /* Destructs QemuClient instance. */
+ virtual ~QemuClient();
+
+ /****************************************************************************
+ * Qemu client API
+ ***************************************************************************/
+
+public:
+ /* Connects to the 'camera' service in the emulator via qemu pipe.
+ * Param:
+ * param - Parameters to pass to the camera service. There are two types of
+ * camera services implemented by the emulator. The first one is a
+ * 'camera factory' type of service that provides list of cameras
+ * connected to the host. Another one is an 'emulated camera' type of
+ * service that provides interface to a camera connected to the host. At
+ * the connection time emulator makes distinction between the two by
+ * looking at connection parameters: no parameters means connection to
+ * the 'factory' service, while connection with parameters means
+ * connection to an 'emulated camera' service, where camera is identified
+ * by one of the connection parameters. So, passing NULL, or an empty
+ * string to this method will establish a connection with the 'factory'
+ * service, while not empty string passed here will establish connection
+ * with an 'emulated camera' service. Parameters defining the emulated
+ * camera must be formatted as such:
+ *
+ * "name=<device name> [inp_channel=<input channel #>]",
+ *
+ * where 'device name' is a required parameter defining name of the
+ * camera device, and 'input channel' is an optional parameter (positive
+ * integer), defining the input channel to use on the camera device.
+ * Note that device name passed here must have been previously obtained
+ * from the factory service using 'list' query.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t connectClient(const char* param);
+
+ /* Disconnects from the service. */
+ virtual void disconnectClient();
+
+ /* Sends data to the service.
+ * Param:
+ * data, data_size - Data to send.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t sendMessage(const void* data, size_t data_size);
+
+ /* Receives data from the service.
+ * This method assumes that data to receive will come in two chunks: 8
+ * characters encoding the payload size in hexadecimal string, followed by
+ * the paylod (if any).
+ * This method will allocate data buffer where to receive the response.
+ * Param:
+ * data - Upon success contains address of the allocated data buffer with
+ * the data received from the service. The caller is responsible for
+ * freeing allocated data buffer.
+ * data_size - Upon success contains size of the data received from the
+ * service.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t receiveMessage(void** data, size_t* data_size);
+
+ /* Sends a query, and receives a response from the service.
+ * Param:
+ * query - Query to send to the service. When this method returns, the query
+ * is completed, and all its relevant data members are properly initialized.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure. Note that
+ * status returned here is not the final query status. Use isQuerySucceeded(),
+ * or getCompletionStatus() method on the query object to see if it has
+ * succeeded. However, if this method returns a failure, it means that the
+ * query has failed, and there is no guarantee that its data members are
+ * properly initialized (except for the 'mQueryDeliveryStatus', which is
+ * always in the proper state).
+ */
+ virtual status_t doQuery(QemuQuery* query);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Qemu pipe handle. */
+ int mPipeFD;
+
+private:
+ /* Camera service name. */
+ static const char mCameraServiceName[];
+};
+
+/****************************************************************************
+ * Qemu client for the 'factory' service.
+ ***************************************************************************/
+
+/* Encapsulates QemuClient for the 'factory' service. */
+class FactoryQemuClient : public QemuClient {
+public:
+ /* Constructs FactoryQemuClient instance. */
+ FactoryQemuClient();
+
+ /* Destructs FactoryQemuClient instance. */
+ ~FactoryQemuClient();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Lists camera devices connected to the host.
+ * Param:
+ * list - Upon success contains a list of cameras connected to the host. The
+ * list returned here is represented as a string, containing multiple
+ * lines separated with '\n', where each line represents a camera. Each
+ * camera line is formatted as such:
+ *
+ * "name=<device name> channel=<num> pix=<num> framedims=<dimensions>\n"
+ *
+ * Where:
+ * - 'name' is the name of the camera device attached to the host. This
+ * name must be used for subsequent connection to the 'emulated camera'
+ * service for that camera.
+ * - 'channel' - input channel number (positive int) to use to communicate
+ * with the camera.
+ * - 'pix' - pixel format (a "fourcc" uint), chosen for the video frames
+ * by the camera service.
+ * - 'framedims' contains a list of frame dimensions supported by the
+ * camera for the chosen pixel format. Each etry in the list is in form
+ * '<width>x<height>', where 'width' and 'height' are numeric values
+ * for width and height of a supported frame dimension. Entries in
+ * this list are separated with ',' with no spaces between the entries.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t listCameras(char** list);
+
+ /****************************************************************************
+ * Names of the queries available for the emulated camera factory.
+ ***************************************************************************/
+
+private:
+ /* List cameras connected to the host. */
+ static const char mQueryList[];
+};
+
+/****************************************************************************
+ * Qemu client for an 'emulated camera' service.
+ ***************************************************************************/
+
+/* Encapsulates QemuClient for an 'emulated camera' service.
+ */
+class CameraQemuClient : public QemuClient {
+public:
+ /* Constructs CameraQemuClient instance. */
+ CameraQemuClient();
+
+ /* Destructs CameraQemuClient instance. */
+ ~CameraQemuClient();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Queries camera connection.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t queryConnect();
+
+ /* Queries camera disconnection.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t queryDisconnect();
+
+ /* Queries camera to start capturing video.
+ * Param:
+ * pixel_format - Pixel format that is used by the client to push video
+ * frames to the camera framework.
+ * width, height - Frame dimensions, requested by the framework.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t queryStart(uint32_t pixel_format, int width, int height);
+
+ /* Queries camera to stop capturing video.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t queryStop();
+
+ /* Queries camera for the next video frame.
+ * Param:
+ * vframe, vframe_size - Define buffer, allocated to receive a video frame.
+ * Any of these parameters can be 0, indicating that the caller is
+ * interested only in preview frame.
+ * pframe, pframe_size - Define buffer, allocated to receive a preview frame.
+ * Any of these parameters can be 0, indicating that the caller is
+ * interested only in video frame.
+ * r_scale, g_scale, b_scale - White balance scale.
+ * exposure_comp - Expsoure compensation.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ status_t queryFrame(void* vframe,
+ void* pframe,
+ size_t vframe_size,
+ size_t pframe_size,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exposure_comp);
+
+ /****************************************************************************
+ * Names of the queries available for the emulated camera.
+ ***************************************************************************/
+
+private:
+ /* Connect to the camera. */
+ static const char mQueryConnect[];
+ /* Disconnect from the camera. */
+ static const char mQueryDisconnect[];
+ /* Start video capturing. */
+ static const char mQueryStart[];
+ /* Stop video capturing. */
+ static const char mQueryStop[];
+ /* Query frame(s). */
+ static const char mQueryFrame[];
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_QEMU_CLIENT_H */
diff --git a/v3/fake-pipeline2/Android.mk b/v3/fake-pipeline2/Android.mk
new file mode 100644
index 0000000..2e43120
--- a/dev/null
+++ b/v3/fake-pipeline2/Android.mk
@@ -0,0 +1,4 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/v3/fake-pipeline2/Base.h b/v3/fake-pipeline2/Base.h
new file mode 100755
index 0000000..ff636e8
--- a/dev/null
+++ b/v3/fake-pipeline2/Base.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This file includes various basic structures that are needed by multiple parts
+ * of the fake camera 2 implementation.
+ */
+
+#ifndef HW_EMULATOR_CAMERA2_BASE_H
+#define HW_EMULATOR_CAMERA2_BASE_H
+
+#include <system/window.h>
+#include <hardware/camera2.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+
+/* Internal structure for passing buffers across threads */
+struct StreamBuffer {
+ // Positive numbers are output streams
+ // Negative numbers are input reprocess streams
+ // Zero is an auxillary buffer
+ int streamId;
+ uint32_t width, height;
+ uint32_t format;
+ uint32_t stride;
+ buffer_handle_t *buffer;
+ uint8_t *img;
+};
+typedef Vector<StreamBuffer> Buffers;
+
+struct Stream {
+ const camera2_stream_ops_t *ops;
+ uint32_t width, height;
+ int32_t format;
+ uint32_t stride;
+};
+
+struct ReprocessStream {
+ const camera2_stream_in_ops_t *ops;
+ uint32_t width, height;
+ int32_t format;
+ uint32_t stride;
+ // -1 if the reprocessing stream is independent
+ int32_t sourceStreamId;
+};
+
+struct ExifInfo {
+ int mainwidth;
+ int mainheight;
+ int thumbwidth;
+ int thumbheight;
+ int64_t gpsTimestamp;
+ double longitude;
+ double latitude;
+ uint8_t *gpsProcessingMethod;
+ float focallen;
+ int orientation;
+};
+} // namespace android;
+
+#endif
diff --git a/v3/fake-pipeline2/JpegCompressor.cpp b/v3/fake-pipeline2/JpegCompressor.cpp
new file mode 100755
index 0000000..36fb972
--- a/dev/null
+++ b/v3/fake-pipeline2/JpegCompressor.cpp
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera2_JpegCompressor"
+
+#include <utils/Log.h>
+#include <ui/GraphicBufferMapper.h>
+
+#include "JpegCompressor.h"
+#include "../EmulatedFakeCamera2.h"
+#include "../EmulatedFakeCamera3.h"
+#include <stdlib.h>
+#include <math.h>
+
+#define ARRAY_SIZE(array) (sizeof((array)) / sizeof((array)[0]))
+const uint8_t MARK = 0xFF;
+const uint8_t EOI = 0xD9;
+bool checkJpegEnd(uint8_t *buf) {
+ return buf[0] == MARK && buf[1] == EOI;
+}
+int extraSmallImg(unsigned char* SrcImg,int SrcW,int SrcH,
+ unsigned char* DstImg,int DstW,int DstH)
+{
+ int skipW = SrcW/DstW;
+ int skipH = SrcH/DstH;
+ unsigned char* dst = DstImg;
+ unsigned char* srcrow = SrcImg;
+ unsigned char* srcrowidx = srcrow;
+ int i = 0,j = 0;
+ for(;i<DstH;i++)
+ {
+ for(j = 0;j<DstW;j++)
+ {
+ dst[0] = srcrowidx[0];
+ dst[1] = srcrowidx[1];
+ dst[2] = srcrowidx[2];
+ dst+=3;
+ srcrowidx+=3*skipW;
+ }
+ srcrow += skipH*SrcW*3;
+ srcrowidx = srcrow;
+ }
+ return 1;
+}
+namespace android {
+
+struct string_pair {
+ const char* string1;
+ const char* string2;
+};
+static string_pair degress_to_exif_lut [] = {
+ {"0", "1"},
+ {"90", "6"},
+ {"180", "3"},
+ {"270", "8"},
+};
+JpegCompressor::JpegCompressor():
+ Thread(false),
+ mIsBusy(false),
+ mSynchronous(false),
+ mNeedexif(false),
+ mMainJpegSize(0),
+ mThumbJpegSize(0),
+ mSrcThumbBuffer(NULL),
+ mDstThumbBuffer(NULL),
+ mBuffers(NULL),
+ mListener(NULL) {
+ memset(&mInfo,0,sizeof(struct ExifInfo));
+}
+
+JpegCompressor::~JpegCompressor() {
+ Mutex::Autolock lock(mMutex);
+}
+
+status_t JpegCompressor::start(Buffers *buffers, JpegListener *listener) {
+ if (listener == NULL) {
+ ALOGE("%s: NULL listener not allowed!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ Mutex::Autolock lock(mMutex);
+ {
+ Mutex::Autolock busyLock(mBusyMutex);
+
+ if (mIsBusy) {
+ ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mIsBusy = true;
+ mSynchronous = false;
+ mBuffers = buffers;
+ mListener = listener;
+ }
+
+ status_t res;
+ res = run("EmulatedFakeCamera2::JpegCompressor");
+ if (res != OK) {
+ ALOGE("%s: Unable to start up compression thread: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ delete mBuffers;
+ }
+ return res;
+}
+
+status_t JpegCompressor::compressSynchronous(Buffers *buffers) {
+ status_t res;
+
+ Mutex::Autolock lock(mMutex);
+ {
+ Mutex::Autolock busyLock(mBusyMutex);
+
+ if (mIsBusy) {
+ ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mIsBusy = true;
+ mSynchronous = true;
+ mBuffers = buffers;
+ }
+
+ res = compress();
+
+ cleanUp();
+
+ return res;
+}
+
+status_t JpegCompressor::cancel() {
+ requestExitAndWait();
+ return OK;
+}
+
+status_t JpegCompressor::readyToRun() {
+ return OK;
+}
+
+bool JpegCompressor::threadLoop() {
+ status_t res;
+ ExifElementsTable* exiftable = NULL;
+ struct camera2_jpeg_blob blob;
+ int offset;
+ ALOGV("%s: Starting compression thread", __FUNCTION__);
+
+ res = compress();
+ if (mNeedexif) {
+ memset(&blob,0,sizeof(struct camera2_jpeg_blob));
+ exiftable = new ExifElementsTable();
+ GenExif(exiftable);
+ res = thumbcompress();
+ }
+ if (exiftable) {
+ Section_t* exif_section = NULL;
+ ExifElementsTable* exif = exiftable;
+ exif->insertExifToJpeg((unsigned char*)mJpegBuffer.img,mMainJpegSize);
+ exif->insertExifThumbnailImage((const char*)mDstThumbBuffer,mThumbJpegSize);
+ exif_section = FindSection(M_EXIF);
+ if (exif_section) {
+ exif->saveJpeg((unsigned char*) mJpegBuffer.img, mMainJpegSize + exif_section->Size);
+ }
+
+ int offset = kMaxJpegSize-sizeof(struct camera2_jpeg_blob);
+ blob.jpeg_blob_id = 0x00FF;
+ blob.jpeg_size = mMainJpegSize + exif_section->Size;
+ memcpy(mJpegBuffer.img+offset, &blob, sizeof(struct camera2_jpeg_blob));
+ }
+ mListener->onJpegDone(mJpegBuffer, res == OK);
+
+ if (mNeedexif) {
+ if (exiftable != NULL) {
+ delete exiftable;
+ exiftable = NULL;
+ }
+ }
+ cleanUp();
+
+ return false;
+}
+
+status_t JpegCompressor::compress() {
+ // Find source and target buffers. Assumes only one buffer matches
+ // each condition!
+
+ Mutex::Autolock lock(mMutex);
+ bool foundJpeg = false, mFoundAux = false;
+ for (size_t i = 0; i < mBuffers->size(); i++) {
+ const StreamBuffer &b = (*mBuffers)[i];
+ if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+ mJpegBuffer = b;
+ mFoundJpeg = true;
+ } else if (b.streamId <= 0) {
+ mAuxBuffer = b;
+ mFoundAux = true;
+ }
+ if (mFoundJpeg && mFoundAux) break;
+ }
+ if (!mFoundJpeg || !mFoundAux) {
+ ALOGE("%s: Unable to find buffers for JPEG source/destination",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (mNeedexif == true) {
+ mSrcThumbBuffer = (uint8_t*)malloc(mInfo.thumbwidth*mInfo.thumbheight*3);
+ mDstThumbBuffer = (uint8_t*)malloc(mInfo.thumbwidth*mInfo.thumbheight*3);
+ if (mSrcThumbBuffer) {
+ if (mAuxBuffer.format == HAL_PIXEL_FORMAT_BLOB)
+ extraSmallImg(mAuxBuffer.img,mAuxBuffer.width,mAuxBuffer.height,
+ mSrcThumbBuffer,mInfo.thumbwidth,mInfo.thumbheight);
+ }
+ }
+ // Set up error management
+
+ mJpegErrorInfo = NULL;
+ JpegError error;
+ error.parent = this;
+
+ mCInfo.err = jpeg_std_error(&error);
+ mCInfo.err->error_exit = MainJpegErrorHandler;
+
+ jpeg_create_compress(&mCInfo);
+ if (checkError("Error initializing compression")) return NO_INIT;
+
+ // Route compressed data straight to output stream buffer
+
+ JpegDestination jpegDestMgr;
+ jpegDestMgr.parent = this;
+ jpegDestMgr.init_destination = MainJpegInitDestination;
+ jpegDestMgr.empty_output_buffer = MainJpegEmptyOutputBuffer;
+ jpegDestMgr.term_destination = MainJpegTermDestination;
+
+ mCInfo.dest = &jpegDestMgr;
+
+ // Set up compression parameters
+
+ mCInfo.image_width = mAuxBuffer.width;
+ mCInfo.image_height = mAuxBuffer.height;
+ mCInfo.input_components = 3;
+ mCInfo.in_color_space = JCS_RGB;
+
+ jpeg_set_defaults(&mCInfo);
+ if (checkError("Error configuring defaults")) return NO_INIT;
+
+ // Do compression
+
+ jpeg_start_compress(&mCInfo, TRUE);
+ if (checkError("Error starting compression")) return NO_INIT;
+
+ size_t rowStride = mAuxBuffer.stride * 3;
+ const size_t kChunkSize = 32;
+ while (mCInfo.next_scanline < mCInfo.image_height) {
+ JSAMPROW chunk[kChunkSize];
+ for (size_t i = 0 ; i < kChunkSize; i++) {
+ chunk[i] = (JSAMPROW)
+ (mAuxBuffer.img + (i + mCInfo.next_scanline) * rowStride);
+ }
+ jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
+ if (checkError("Error while compressing")) return NO_INIT;
+ if (exitPending()) {
+ ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
+ return TIMED_OUT;
+ }
+ }
+
+ jpeg_finish_compress(&mCInfo);
+ if (checkError("Error while finishing compression")) return NO_INIT;
+
+ // All done
+ mMainJpegSize = kMaxJpegSize - mCInfo.dest->free_in_buffer;
+ ALOGD("mMainJpegSize = %d",mMainJpegSize);
+
+
+ return OK;
+}
+
+status_t JpegCompressor::thumbcompress() {
+ Mutex::Autolock lock(mMutex);
+ mJpegErrorInfo = NULL;
+ JpegError error;
+ error.parent = this;
+ mCInfo.err = jpeg_std_error(&error);
+ mCInfo.err->error_exit = ThumbJpegErrorHandler;
+
+ jpeg_create_compress(&mCInfo);
+ if (checkError("Error initializing compression")) return NO_INIT;
+ JpegDestination jpegDestMgr;
+ jpegDestMgr.parent = this;
+ jpegDestMgr.init_destination = ThumbJpegInitDestination;
+ jpegDestMgr.empty_output_buffer = ThumbJpegEmptyOutputBuffer;
+ jpegDestMgr.term_destination = ThumbJpegTermDestination;
+ mCInfo.dest = &jpegDestMgr;
+
+ // Set up compression parameters
+
+ mCInfo.image_width = mInfo.thumbwidth;
+ mCInfo.image_height = mInfo.thumbheight;
+ mCInfo.input_components = 3;
+ mCInfo.in_color_space = JCS_RGB;
+ jpeg_set_defaults(&mCInfo);
+ if (checkError("Error configuring defaults")) return NO_INIT;
+ jpeg_start_compress(&mCInfo, TRUE);
+ if (checkError("Error starting compression")) return NO_INIT;
+ size_t rowStride = mInfo.thumbwidth* 3;
+ const size_t kChunkSize = 32;
+ while (mCInfo.next_scanline < mCInfo.image_height) {
+ JSAMPROW chunk[kChunkSize];
+ for (size_t i = 0 ; i < kChunkSize; i++) {
+ chunk[i] = (JSAMPROW)
+ (mSrcThumbBuffer + (i + mCInfo.next_scanline) * rowStride);
+ }
+ jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
+ if (checkError("Error while compressing")) return NO_INIT;
+ if (exitPending()) {
+ ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
+ return TIMED_OUT;
+ }
+ }
+ jpeg_finish_compress(&mCInfo);
+ if (checkError("Error while finishing compression")) return NO_INIT;
+ mThumbJpegSize = kMaxJpegSize - mCInfo.dest->free_in_buffer;
+
+ return OK;
+}
+bool JpegCompressor::isBusy() {
+ Mutex::Autolock busyLock(mBusyMutex);
+ return mIsBusy;
+}
+
+bool JpegCompressor::isStreamInUse(uint32_t id) {
+ Mutex::Autolock lock(mBusyMutex);
+
+ if (mBuffers && mIsBusy) {
+ for (size_t i = 0; i < mBuffers->size(); i++) {
+ if ( (*mBuffers)[i].streamId == (int)id ) return true;
+ }
+ }
+ return false;
+}
+
+bool JpegCompressor::waitForDone(nsecs_t timeout) {
+ Mutex::Autolock lock(mBusyMutex);
+ status_t res = OK;
+ if (mIsBusy) {
+ res = mDone.waitRelative(mBusyMutex, timeout);
+ }
+ return (res == OK);
+}
+
+bool JpegCompressor::checkError(const char *msg) {
+ if (mJpegErrorInfo) {
+ char errBuffer[JMSG_LENGTH_MAX];
+ mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer);
+ ALOGE("%s: %s: %s",
+ __FUNCTION__, msg, errBuffer);
+ mJpegErrorInfo = NULL;
+ return true;
+ }
+ return false;
+}
+
+void JpegCompressor::cleanUp() {
+ status_t res;
+ jpeg_destroy_compress(&mCInfo);
+ if (mNeedexif) {
+ mNeedexif = false;
+ free(mSrcThumbBuffer);
+ free(mDstThumbBuffer);
+ }
+ Mutex::Autolock lock(mBusyMutex);
+
+ if (mFoundAux) {
+ if (mAuxBuffer.streamId == 0) {
+ delete[] mAuxBuffer.img;
+ } else if (!mSynchronous) {
+ mListener->onJpegInputDone(mAuxBuffer);
+ }
+ }
+ if (!mSynchronous) {
+ delete mBuffers;
+ }
+
+ mBuffers = NULL;
+
+ mIsBusy = false;
+ mDone.signal();
+}
+
+void JpegCompressor::MainJpegErrorHandler(j_common_ptr cinfo) {
+ JpegError *error = static_cast<JpegError*>(cinfo->err);
+ error->parent->mJpegErrorInfo = cinfo;
+}
+
+void JpegCompressor::MainJpegInitDestination(j_compress_ptr cinfo) {
+ JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
+ ALOGV("%s: Setting destination to %p, size %zu",
+ __FUNCTION__, dest->parent->mJpegBuffer.img, kMaxJpegSize);
+ dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer.img);
+ dest->free_in_buffer = kMaxJpegSize;
+}
+
+boolean JpegCompressor::MainJpegEmptyOutputBuffer(j_compress_ptr cinfo) {
+ ALOGE("%s: JPEG destination buffer overflow!",
+ __FUNCTION__);
+ return true;
+}
+
+void JpegCompressor::MainJpegTermDestination(j_compress_ptr cinfo) {
+ ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer",
+ __FUNCTION__, cinfo->dest->free_in_buffer);
+}
+
+void JpegCompressor::ThumbJpegErrorHandler(j_common_ptr cinfo) {
+ JpegError *error = static_cast<JpegError*>(cinfo->err);
+ error->parent->mJpegErrorInfo = cinfo;
+}
+void JpegCompressor::ThumbJpegInitDestination(j_compress_ptr cinfo) {
+ JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
+ ALOGV("%s: Setting destination to %p, size %zu",
+ __FUNCTION__, dest->parent->mDstThumbBuffer, kMaxJpegSize);
+ dest->next_output_byte = (JOCTET*)(dest->parent->mDstThumbBuffer);
+ dest->free_in_buffer = kMaxJpegSize;
+}
+boolean JpegCompressor::ThumbJpegEmptyOutputBuffer(j_compress_ptr cinfo) {
+ ALOGE("%s: Thumb JPEG destination buffer overflow!",
+ __FUNCTION__);
+ return true;
+}
+void JpegCompressor::ThumbJpegTermDestination(j_compress_ptr cinfo) {
+ ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer",
+ __FUNCTION__, cinfo->dest->free_in_buffer);
+}
+JpegCompressor::JpegListener::~JpegListener() {
+}
+
+void JpegCompressor::SetExifInfo(struct ExifInfo info)
+{
+ mInfo.mainwidth = info.mainwidth;
+ mInfo.mainheight = info.mainheight;
+ mInfo.thumbwidth = info.thumbwidth;
+ mInfo.thumbheight = info.thumbheight;
+// mInfo.gpsTimestamp = info.gpsTimestamp;
+// mInfo.latitude = info.latitude;
+// mInfo.longitude = info.longitude;
+// mInfo.gpsProcessingMethod = info.gpsProcessingMethod;
+// mInfo.focallen = info.focallen;
+ mInfo.orientation = info.orientation;
+ if ((mInfo.thumbwidth>0)&&(mInfo.thumbheight>0)) {
+ mNeedexif = true;
+ }
+}
+int JpegCompressor::GenExif(ExifElementsTable* exiftable)
+{
+ char exifcontent[256];
+ int width,height;
+ exiftable->insertElement("Make","exif-maker");
+ exiftable->insertElement("Model","exif-model");
+// int orientation = mInfo.orientation;
+
+
+ width = mInfo.mainwidth;
+ height = mInfo.mainheight;
+#if 0
+ if(orientation == 0)
+ orientation = 1;
+ else if(orientation == 90)
+ orientation = 6;
+ else if(orientation == 180)
+ orientation = 3;
+ else if(orientation == 270)
+ orientation = 8;
+#endif
+// sprintf(exifcontent,"%d",orientation);
+// exiftable->insertElement("Orientation",(const char*)exifcontent);
+ sprintf(exifcontent,"%d",width);
+ exiftable->insertElement("ImageWidth",(const char*)exifcontent);
+ sprintf(exifcontent,"%d",height);
+ exiftable->insertElement("ImageLength",(const char*)exifcontent);
+ #if 0
+ float focallen = mParams.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
+ if(focallen >= 0){
+ int focalNum = focallen*1000;
+ int focalDen = 1000;
+ sprintf(exifcontent,"%d/%d",focalNum,focalDen);
+ exiftable->insertElement("FocalLength",(const char*)exifcontent);
+ }
+ time_t times;
+ {
+ time(&times);
+ struct tm tmstruct;
+ tmstruct = *(localtime(&times)); //convert to local time
+ strftime(exifcontent, 30, "%Y:%m:%d %H:%M:%S", &tmstruct);
+ exiftable->insertElement("DateTime",(const char*)exifcontent);
+ }
+ times = mParams.getInt(CameraParameters::KEY_GPS_TIMESTAMP);
+ if(times != -1){
+ struct tm tmstruct;
+ tmstruct = *(gmtime(&times));//convert to standard time
+ strftime(exifcontent, 20, "%Y:%m:%d", &tmstruct);
+ exiftable->insertElement("GPSDateStamp",(const char*)exifcontent);
+ sprintf(exifcontent,"%d/%d,%d/%d,%d/%d",tmstruct.tm_hour,1,tmstruct.tm_min,1,tmstruct.tm_sec,1);
+ exiftable->insertElement("GPSTimeStamp",(const char*)exifcontent);
+ }
+ char* latitudestr = (char*)mParams.get(CameraParameters::KEY_GPS_LATITUDE);
+ if(latitudestr!=NULL){
+ int offset = 0;
+ float latitude = mParams.getFloat(CameraParameters::KEY_GPS_LATITUDE);
+ if(latitude < 0.0){
+ offset = 1;
+ latitude*= (float)(-1);
+ }
+ int latitudedegree = latitude;
+ float latitudeminuts = (latitude-(float)latitudedegree)*60;
+ int latitudeminuts_int = latitudeminuts;
+ float latituseconds = (latitudeminuts-(float)latitudeminuts_int)*60+0.5;
+ int latituseconds_int = latituseconds;
+ sprintf(exifcontent,"%d/%d,%d/%d,%d/%d",latitudedegree,1,latitudeminuts_int,1,latituseconds_int,1);
+ exiftable->insertElement("GPSLatitude",(const char*)exifcontent);
+ exiftable->insertElement("GPSLatitudeRef",(offset==1)?"S":"N");
+ }
+ char* longitudestr = (char*)mParams.get(CameraParameters::KEY_GPS_LONGITUDE);
+ if(longitudestr!=NULL){
+ int offset = 0;
+ float longitude = mParams.getFloat(CameraParameters::KEY_GPS_LONGITUDE);
+ if(longitude < 0.0){
+ offset = 1;
+ longitude*= (float)(-1);
+ }
+ int longitudedegree = longitude;
+ float longitudeminuts = (longitude-(float)longitudedegree)*60;
+ int longitudeminuts_int = longitudeminuts;
+ float longitudeseconds = (longitudeminuts-(float)longitudeminuts_int)*60+0.5;
+ int longitudeseconds_int = longitudeseconds;
+ sprintf(exifcontent,"%d/%d,%d/%d,%d/%d",longitudedegree,1,longitudeminuts_int,1,longitudeseconds_int,1);
+ exiftable->insertElement("GPSLongitude",(const char*)exifcontent);
+ exiftable->insertElement("GPSLongitudeRef",(offset==1)?"S":"N");
+ }
+ char* altitudestr = (char*)mParams.get(CameraParameters::KEY_GPS_ALTITUDE);
+ if(altitudestr!=NULL){
+ int offset = 0;
+ float altitude = mParams.getFloat(CameraParameters::KEY_GPS_ALTITUDE);
+ if(altitude < 0.0){
+ offset = 1;
+ altitude*= (float)(-1);
+ }
+ int altitudenum = altitude*1000;
+ int altitudedec= 1000;
+ sprintf(exifcontent,"%d/%d",altitudenum,altitudedec);
+ exiftable->insertElement("GPSAltitude",(const char*)exifcontent);
+ sprintf(exifcontent,"%d",offset);
+ exiftable->insertElement("GPSAltitudeRef",(const char*)exifcontent);
+ }
+ char* processmethod = (char*)mParams.get(CameraParameters::KEY_GPS_PROCESSING_METHOD);
+ if(processmethod!=NULL){
+ memset(exifcontent,0,sizeof(exifcontent));
+ char ExifAsciiPrefix[] = { 0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0 };//asicii
+ memcpy(exifcontent,ExifAsciiPrefix,8);
+ memcpy(exifcontent+8,processmethod,strlen(processmethod));
+ exiftable->insertElement("GPSProcessingMethod",(const char*)exifcontent);
+ }
+ #endif
+ return 1;
+}
+const char* ExifElementsTable::degreesToExifOrientation(const char* degrees) {
+ for (unsigned int i = 0; i < ARRAY_SIZE(degress_to_exif_lut); i++) {
+ if (!strcmp(degrees, degress_to_exif_lut[i].string1)) {
+ return degress_to_exif_lut[i].string2;
+ }
+ }
+ return NULL;
+}
+void ExifElementsTable::stringToRational(const char* str, unsigned int* num, unsigned int* den) {
+ int len;
+ char * tempVal = NULL;
+ if (str != NULL) {
+ len = strlen(str);
+ tempVal = (char*) malloc( sizeof(char) * (len + 1));
+ }
+ if (tempVal != NULL) {
+ size_t den_len;
+ char *ctx;
+ unsigned int numerator = 0;
+ unsigned int denominator = 0;
+ char* temp = NULL;
+ memset(tempVal, '\0', len + 1);
+ strncpy(tempVal, str, len);
+ temp = strtok_r(tempVal, ".", &ctx);
+ if (temp != NULL)
+ numerator = atoi(temp);
+ if (!numerator)
+ numerator = 1;
+ temp = strtok_r(NULL, ".", &ctx);
+ if (temp != NULL) {
+ den_len = strlen(temp);
+ if(HUGE_VAL == den_len ) {
+ den_len = 0;
+ }
+ denominator = static_cast<unsigned int>(pow(10, den_len));
+ numerator = numerator * denominator + atoi(temp);
+ } else {
+ denominator = 1;
+ }
+ free(tempVal);
+ *num = numerator;
+ *den = denominator;
+ }
+}
+bool ExifElementsTable::isAsciiTag(const char* tag) {
+ return (strcmp(tag, TAG_GPS_PROCESSING_METHOD) == 0);
+}
+status_t ExifElementsTable::insertElement(const char* tag, const char* value) {
+ int value_length = 0;
+ status_t ret = NO_ERROR;
+ if (!value || !tag) {
+ return -EINVAL;
+ }
+ if (position >= MAX_EXIF_TAGS_SUPPORTED) {
+ CAMHAL_LOGEA("Max number of EXIF elements already inserted");
+ return NO_MEMORY;
+ }
+ if (isAsciiTag(tag)) {
+ value_length = sizeof(ExifAsciiPrefix) + strlen(value + sizeof(ExifAsciiPrefix));
+ } else {
+ value_length = strlen(value);
+ }
+ if (IsGpsTag(tag)) {
+ table[position].GpsTag = TRUE;
+ table[position].Tag = GpsTagNameToValue(tag);
+ gps_tag_count++;
+ } else {
+ table[position].GpsTag = FALSE;
+ table[position].Tag = TagNameToValue(tag);
+ exif_tag_count++;
+ }
+ table[position].DataLength = 0;
+ table[position].Value = (char*) malloc(sizeof(char) * (value_length + 1));
+ if (table[position].Value) {
+ memcpy(table[position].Value, value, value_length + 1);
+ table[position].DataLength = value_length + 1;
+ }
+ position++;
+ return ret;
+}
+void ExifElementsTable::saveJpeg(unsigned char* jpeg, size_t jpeg_size) {
+ int ret;
+ if (jpeg_opened) {
+ ret = WriteJpegToBuffer(jpeg, jpeg_size);
+ ALOGD("saveJpeg :: ret =%d",ret);
+ DiscardData();
+ jpeg_opened = false;
+ }
+}
+void ExifElementsTable::insertExifToJpeg(unsigned char* jpeg, size_t jpeg_size) {
+ ReadMode_t read_mode = (ReadMode_t)(READ_METADATA | READ_IMAGE);
+ ResetJpgfile();
+ if (ReadJpegSectionsFromBuffer(jpeg, jpeg_size, read_mode)) {
+ jpeg_opened = true;
+ create_EXIF(table, exif_tag_count, gps_tag_count,true);
+ }
+}
+status_t ExifElementsTable::insertExifThumbnailImage(const char* thumb, int len) {
+ status_t ret = NO_ERROR;
+ if ((len > 0) && jpeg_opened) {
+ ret = ReplaceThumbnailFromBuffer(thumb, len);
+ CAMHAL_LOGDB("insertExifThumbnailImage. ReplaceThumbnail(). ret=%d", ret);
+ }
+ return ret;
+}
+ExifElementsTable::~ExifElementsTable() {
+ int num_elements = gps_tag_count + exif_tag_count;
+ for (int i = 0; i < num_elements; i++) {
+ if (table[i].Value) {
+ free(table[i].Value);
+ }
+ }
+ if (jpeg_opened) {
+ DiscardData();
+ }
+}
+} // namespace android
diff --git a/v3/fake-pipeline2/JpegCompressor.h b/v3/fake-pipeline2/JpegCompressor.h
new file mode 100755
index 0000000..fd35660
--- a/dev/null
+++ b/v3/fake-pipeline2/JpegCompressor.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * This class simulates a hardware JPEG compressor. It receives image buffers
+ * in RGBA_8888 format, processes them in a worker thread, and then pushes them
+ * out to their destination stream.
+ */
+
+#ifndef HW_EMULATOR_CAMERA2_JPEG_H
+#define HW_EMULATOR_CAMERA2_JPEG_H
+
+#include "utils/Thread.h"
+#include "utils/Mutex.h"
+#include "utils/Timers.h"
+
+#include "Base.h"
+
+#include <stdio.h>
+
+extern "C" {
+#include <jpeglib.h>
+#include <jhead.h>
+}
+
+namespace android {
+#define MAX_EXIF_TAGS_SUPPORTED 30
+static const char TAG_MODEL[] = "Model";
+static const char TAG_MAKE[] = "Make";
+static const char TAG_FOCALLENGTH[] = "FocalLength";
+static const char TAG_DATETIME[] = "DateTime";
+static const char TAG_IMAGE_WIDTH[] = "ImageWidth";
+static const char TAG_IMAGE_LENGTH[] = "ImageLength";
+static const char TAG_GPS_LAT[] = "GPSLatitude";
+static const char TAG_GPS_LAT_REF[] = "GPSLatitudeRef";
+static const char TAG_GPS_LONG[] = "GPSLongitude";
+static const char TAG_GPS_LONG_REF[] = "GPSLongitudeRef";
+static const char TAG_GPS_ALT[] = "GPSAltitude";
+static const char TAG_GPS_ALT_REF[] = "GPSAltitudeRef";
+static const char TAG_GPS_MAP_DATUM[] = "GPSMapDatum";
+static const char TAG_GPS_PROCESSING_METHOD[] = "GPSProcessingMethod";
+static const char TAG_GPS_VERSION_ID[] = "GPSVersionID";
+static const char TAG_GPS_TIMESTAMP[] = "GPSTimeStamp";
+static const char TAG_GPS_DATESTAMP[] = "GPSDateStamp";
+static const char TAG_ORIENTATION[] = "Orientation";
+class ExifElementsTable {
+ public:
+ ExifElementsTable() :
+ gps_tag_count(0), exif_tag_count(0), position(0),
+ jpeg_opened(false) { }
+ ~ExifElementsTable();
+ status_t insertElement(const char* tag, const char* value);
+ void insertExifToJpeg(unsigned char* jpeg, size_t jpeg_size);
+ status_t insertExifThumbnailImage(const char*, int);
+ void saveJpeg(unsigned char* picture, size_t jpeg_size);
+ static const char* degreesToExifOrientation(const char*);
+ static void stringToRational(const char*, unsigned int*, unsigned int*);
+ static bool isAsciiTag(const char* tag);
+ private:
+ ExifElement_t table[MAX_EXIF_TAGS_SUPPORTED];
+ unsigned int gps_tag_count;
+ unsigned int exif_tag_count;
+ unsigned int position;
+ bool jpeg_opened;
+};
+
+class JpegCompressor: private Thread, public virtual RefBase {
+ public:
+
+ JpegCompressor();
+ ~JpegCompressor();
+
+ struct JpegListener {
+ // Called when JPEG compression has finished, or encountered an error
+ virtual void onJpegDone(const StreamBuffer &jpegBuffer,
+ bool success) = 0;
+ // Called when the input buffer for JPEG is not needed any more,
+ // if the buffer came from the framework.
+ virtual void onJpegInputDone(const StreamBuffer &inputBuffer) = 0;
+ virtual ~JpegListener();
+ };
+
+ // Start compressing COMPRESSED format buffers; JpegCompressor takes
+ // ownership of the Buffers vector.
+ status_t start(Buffers *buffers, JpegListener *listener);
+
+ // Compress and block until buffer is complete.
+ status_t compressSynchronous(Buffers *buffers);
+
+ status_t cancel();
+
+ bool isBusy();
+ bool isStreamInUse(uint32_t id);
+
+ bool waitForDone(nsecs_t timeout);
+
+ void SetExifInfo(struct ExifInfo info);
+ int GenExif(ExifElementsTable* exiftable);
+
+ // TODO: Measure this
+ static const size_t kMaxJpegSize = 300000;
+
+ private:
+ Mutex mBusyMutex;
+ bool mIsBusy;
+ Condition mDone;
+ bool mSynchronous;
+
+ Mutex mMutex;
+
+ Buffers *mBuffers;
+ JpegListener *mListener;
+
+ StreamBuffer mJpegBuffer, mAuxBuffer;
+ bool mFoundJpeg, mFoundAux;
+
+ bool mNeedexif;
+
+
+
+ int mMainJpegSize, mThumbJpegSize;
+ struct ExifInfo mInfo;
+ uint8_t *mSrcThumbBuffer;
+ uint8_t *mDstThumbBuffer;
+
+ jpeg_compress_struct mCInfo;
+
+ struct JpegError : public jpeg_error_mgr {
+ JpegCompressor *parent;
+ };
+ j_common_ptr mJpegErrorInfo;
+
+ struct JpegDestination : public jpeg_destination_mgr {
+ JpegCompressor *parent;
+ };
+
+ static void MainJpegErrorHandler(j_common_ptr cinfo);
+ static void MainJpegInitDestination(j_compress_ptr cinfo);
+ static boolean MainJpegEmptyOutputBuffer(j_compress_ptr cinfo);
+ static void MainJpegTermDestination(j_compress_ptr cinfo);
+
+ static void ThumbJpegErrorHandler(j_common_ptr cinfo);
+ static void ThumbJpegInitDestination(j_compress_ptr cinfo);
+ static boolean ThumbJpegEmptyOutputBuffer(j_compress_ptr cinfo);
+ static void ThumbJpegTermDestination(j_compress_ptr cinfo);
+
+ bool checkError(const char *msg);
+ status_t compress();
+
+ status_t thumbcompress();
+ void cleanUp();
+
+ /**
+ * Inherited Thread virtual overrides
+ */
+ private:
+ virtual status_t readyToRun();
+ virtual bool threadLoop();
+};
+
+} // namespace android
+
+#endif
diff --git a/v3/fake-pipeline2/Scene.cpp b/v3/fake-pipeline2/Scene.cpp
new file mode 100644
index 0000000..48296d2
--- a/dev/null
+++ b/v3/fake-pipeline2/Scene.cpp
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Scene"
+#include <utils/Log.h>
+#include <stdlib.h>
+#include <cmath>
+#include "Scene.h"
+
+// TODO: This should probably be done host-side in OpenGL for speed and better
+// quality
+
+namespace android {
+
+// Define single-letter shortcuts for scene definition, for directly indexing
+// mCurrentColors
+#define G (Scene::GRASS * Scene::NUM_CHANNELS)
+#define S (Scene::GRASS_SHADOW * Scene::NUM_CHANNELS)
+#define H (Scene::HILL * Scene::NUM_CHANNELS)
+#define W (Scene::WALL * Scene::NUM_CHANNELS)
+#define R (Scene::ROOF * Scene::NUM_CHANNELS)
+#define D (Scene::DOOR * Scene::NUM_CHANNELS)
+#define C (Scene::CHIMNEY * Scene::NUM_CHANNELS)
+#define I (Scene::WINDOW * Scene::NUM_CHANNELS)
+#define U (Scene::SUN * Scene::NUM_CHANNELS)
+#define K (Scene::SKY * Scene::NUM_CHANNELS)
+#define M (Scene::MOON * Scene::NUM_CHANNELS)
+
+const int Scene::kSceneWidth = 20;
+const int Scene::kSceneHeight = 20;
+
+const uint8_t Scene::kScene[Scene::kSceneWidth * Scene::kSceneHeight] = {
+ // 5 10 15 20
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K, // 5
+ K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
+ K,K,K,K,K,K,K,K,H,H,H,H,H,H,H,H,H,H,H,H,
+ K,K,K,K,K,K,K,K,H,H,H,H,H,H,H,C,C,H,H,H,
+ K,K,K,K,K,K,H,H,H,H,H,H,H,H,H,C,C,H,H,H,
+ H,K,K,K,K,K,H,R,R,R,R,R,R,R,R,R,R,R,R,H, // 10
+ H,K,K,K,K,H,H,R,R,R,R,R,R,R,R,R,R,R,R,H,
+ H,H,H,K,K,H,H,R,R,R,R,R,R,R,R,R,R,R,R,H,
+ H,H,H,K,K,H,H,H,W,W,W,W,W,W,W,W,W,W,H,H,
+ S,S,S,G,G,S,S,S,W,W,W,W,W,W,W,W,W,W,S,S,
+ S,G,G,G,G,S,S,S,W,I,I,W,D,D,W,I,I,W,S,S, // 15
+ G,G,G,G,G,G,S,S,W,I,I,W,D,D,W,I,I,W,S,S,
+ G,G,G,G,G,G,G,G,W,W,W,W,D,D,W,W,W,W,G,G,
+ G,G,G,G,G,G,G,G,W,W,W,W,D,D,W,W,W,W,G,G,
+ G,G,G,G,G,G,G,G,S,S,S,S,S,S,S,S,S,S,G,G,
+ G,G,G,G,G,G,G,G,S,S,S,S,S,S,S,S,S,S,G,G, // 20
+ // 5 10 15 20
+};
+
+#undef G
+#undef S
+#undef H
+#undef W
+#undef R
+#undef D
+#undef C
+#undef I
+#undef U
+#undef K
+#undef M
+
+Scene::Scene(
+ int sensorWidthPx,
+ int sensorHeightPx,
+ float sensorSensitivity):
+ mSensorWidth(sensorWidthPx),
+ mSensorHeight(sensorHeightPx),
+ mHour(12),
+ mExposureDuration(0.033f),
+ mSensorSensitivity(sensorSensitivity)
+{
+ // Map scene to sensor pixels
+ if (mSensorWidth > mSensorHeight) {
+ mMapDiv = (mSensorWidth / (kSceneWidth + 1) ) + 1;
+ } else {
+ mMapDiv = (mSensorHeight / (kSceneHeight + 1) ) + 1;
+ }
+ mOffsetX = (kSceneWidth * mMapDiv - mSensorWidth) / 2;
+ mOffsetY = (kSceneHeight * mMapDiv - mSensorHeight) / 2;
+
+ // Assume that sensor filters are sRGB primaries to start
+ mFilterR[0] = 3.2406f; mFilterR[1] = -1.5372f; mFilterR[2] = -0.4986f;
+ mFilterGr[0] = -0.9689f; mFilterGr[1] = 1.8758f; mFilterGr[2] = 0.0415f;
+ mFilterGb[0] = -0.9689f; mFilterGb[1] = 1.8758f; mFilterGb[2] = 0.0415f;
+ mFilterB[0] = 0.0557f; mFilterB[1] = -0.2040f; mFilterB[2] = 1.0570f;
+
+
+}
+
+Scene::~Scene() {
+}
+
+void Scene::setColorFilterXYZ(
+ float rX, float rY, float rZ,
+ float grX, float grY, float grZ,
+ float gbX, float gbY, float gbZ,
+ float bX, float bY, float bZ) {
+ mFilterR[0] = rX; mFilterR[1] = rY; mFilterR[2] = rZ;
+ mFilterGr[0] = grX; mFilterGr[1] = grY; mFilterGr[2] = grZ;
+ mFilterGb[0] = gbX; mFilterGb[1] = gbY; mFilterGb[2] = gbZ;
+ mFilterB[0] = bX; mFilterB[1] = bY; mFilterB[2] = bZ;
+}
+
+void Scene::setHour(int hour) {
+ ALOGV("Hour set to: %d", hour);
+ mHour = hour % 24;
+}
+
+int Scene::getHour() {
+ return mHour;
+}
+
+void Scene::setExposureDuration(float seconds) {
+ mExposureDuration = seconds;
+}
+
+void Scene::calculateScene(nsecs_t time) {
+ // Calculate time fractions for interpolation
+ int timeIdx = mHour / kTimeStep;
+ int nextTimeIdx = (timeIdx + 1) % (24 / kTimeStep);
+ const nsecs_t kOneHourInNsec = 1e9 * 60 * 60;
+ nsecs_t timeSinceIdx = (mHour - timeIdx * kTimeStep) * kOneHourInNsec + time;
+ float timeFrac = timeSinceIdx / (float)(kOneHourInNsec * kTimeStep);
+
+ // Determine overall sunlight levels
+ float sunLux =
+ kSunlight[timeIdx] * (1 - timeFrac) +
+ kSunlight[nextTimeIdx] * timeFrac;
+ ALOGV("Sun lux: %f", sunLux);
+
+ float sunShadeLux = sunLux * (kDaylightShadeIllum / kDirectSunIllum);
+
+ // Determine sun/shade illumination chromaticity
+ float currentSunXY[2];
+ float currentShadeXY[2];
+
+ const float *prevSunXY, *nextSunXY;
+ const float *prevShadeXY, *nextShadeXY;
+ if (kSunlight[timeIdx] == kSunsetIllum ||
+ kSunlight[timeIdx] == kTwilightIllum) {
+ prevSunXY = kSunsetXY;
+ prevShadeXY = kSunsetXY;
+ } else {
+ prevSunXY = kDirectSunlightXY;
+ prevShadeXY = kDaylightXY;
+ }
+ if (kSunlight[nextTimeIdx] == kSunsetIllum ||
+ kSunlight[nextTimeIdx] == kTwilightIllum) {
+ nextSunXY = kSunsetXY;
+ nextShadeXY = kSunsetXY;
+ } else {
+ nextSunXY = kDirectSunlightXY;
+ nextShadeXY = kDaylightXY;
+ }
+ currentSunXY[0] = prevSunXY[0] * (1 - timeFrac) +
+ nextSunXY[0] * timeFrac;
+ currentSunXY[1] = prevSunXY[1] * (1 - timeFrac) +
+ nextSunXY[1] * timeFrac;
+
+ currentShadeXY[0] = prevShadeXY[0] * (1 - timeFrac) +
+ nextShadeXY[0] * timeFrac;
+ currentShadeXY[1] = prevShadeXY[1] * (1 - timeFrac) +
+ nextShadeXY[1] * timeFrac;
+
+ ALOGV("Sun XY: %f, %f, Shade XY: %f, %f",
+ currentSunXY[0], currentSunXY[1],
+ currentShadeXY[0], currentShadeXY[1]);
+
+ // Converting for xyY to XYZ:
+ // X = Y / y * x
+ // Y = Y
+ // Z = Y / y * (1 - x - y);
+ float sunXYZ[3] = {
+ sunLux / currentSunXY[1] * currentSunXY[0],
+ sunLux,
+ sunLux / currentSunXY[1] *
+ (1 - currentSunXY[0] - currentSunXY[1])
+ };
+ float sunShadeXYZ[3] = {
+ sunShadeLux / currentShadeXY[1] * currentShadeXY[0],
+ sunShadeLux,
+ sunShadeLux / currentShadeXY[1] *
+ (1 - currentShadeXY[0] - currentShadeXY[1])
+ };
+ ALOGV("Sun XYZ: %f, %f, %f",
+ sunXYZ[0], sunXYZ[1], sunXYZ[2]);
+ ALOGV("Sun shade XYZ: %f, %f, %f",
+ sunShadeXYZ[0], sunShadeXYZ[1], sunShadeXYZ[2]);
+
+ // Determine moonlight levels
+ float moonLux =
+ kMoonlight[timeIdx] * (1 - timeFrac) +
+ kMoonlight[nextTimeIdx] * timeFrac;
+ float moonShadeLux = moonLux * (kDaylightShadeIllum / kDirectSunIllum);
+
+ float moonXYZ[3] = {
+ moonLux / kMoonlightXY[1] * kMoonlightXY[0],
+ moonLux,
+ moonLux / kMoonlightXY[1] *
+ (1 - kMoonlightXY[0] - kMoonlightXY[1])
+ };
+ float moonShadeXYZ[3] = {
+ moonShadeLux / kMoonlightXY[1] * kMoonlightXY[0],
+ moonShadeLux,
+ moonShadeLux / kMoonlightXY[1] *
+ (1 - kMoonlightXY[0] - kMoonlightXY[1])
+ };
+
+ // Determine starlight level
+ const float kClearNightXYZ[3] = {
+ kClearNightIllum / kMoonlightXY[1] * kMoonlightXY[0],
+ kClearNightIllum,
+ kClearNightIllum / kMoonlightXY[1] *
+ (1 - kMoonlightXY[0] - kMoonlightXY[1])
+ };
+
+ // Calculate direct and shaded light
+ float directIllumXYZ[3] = {
+ sunXYZ[0] + moonXYZ[0] + kClearNightXYZ[0],
+ sunXYZ[1] + moonXYZ[1] + kClearNightXYZ[1],
+ sunXYZ[2] + moonXYZ[2] + kClearNightXYZ[2],
+ };
+
+ float shadeIllumXYZ[3] = {
+ kClearNightXYZ[0],
+ kClearNightXYZ[1],
+ kClearNightXYZ[2]
+ };
+
+ shadeIllumXYZ[0] += (mHour < kSunOverhead) ? sunXYZ[0] : sunShadeXYZ[0];
+ shadeIllumXYZ[1] += (mHour < kSunOverhead) ? sunXYZ[1] : sunShadeXYZ[1];
+ shadeIllumXYZ[2] += (mHour < kSunOverhead) ? sunXYZ[2] : sunShadeXYZ[2];
+
+ // Moon up period covers 23->0 transition, shift for simplicity
+ int adjHour = (mHour + 12) % 24;
+ int adjMoonOverhead = (kMoonOverhead + 12 ) % 24;
+ shadeIllumXYZ[0] += (adjHour < adjMoonOverhead) ?
+ moonXYZ[0] : moonShadeXYZ[0];
+ shadeIllumXYZ[1] += (adjHour < adjMoonOverhead) ?
+ moonXYZ[1] : moonShadeXYZ[1];
+ shadeIllumXYZ[2] += (adjHour < adjMoonOverhead) ?
+ moonXYZ[2] : moonShadeXYZ[2];
+
+ ALOGV("Direct XYZ: %f, %f, %f",
+ directIllumXYZ[0],directIllumXYZ[1],directIllumXYZ[2]);
+ ALOGV("Shade XYZ: %f, %f, %f",
+ shadeIllumXYZ[0], shadeIllumXYZ[1], shadeIllumXYZ[2]);
+
+ for (int i = 0; i < NUM_MATERIALS; i++) {
+ // Converting for xyY to XYZ:
+ // X = Y / y * x
+ // Y = Y
+ // Z = Y / y * (1 - x - y);
+ float matXYZ[3] = {
+ kMaterials_xyY[i][2] / kMaterials_xyY[i][1] *
+ kMaterials_xyY[i][0],
+ kMaterials_xyY[i][2],
+ kMaterials_xyY[i][2] / kMaterials_xyY[i][1] *
+ (1 - kMaterials_xyY[i][0] - kMaterials_xyY[i][1])
+ };
+
+ if (kMaterialsFlags[i] == 0 || kMaterialsFlags[i] & kSky) {
+ matXYZ[0] *= directIllumXYZ[0];
+ matXYZ[1] *= directIllumXYZ[1];
+ matXYZ[2] *= directIllumXYZ[2];
+ } else if (kMaterialsFlags[i] & kShadowed) {
+ matXYZ[0] *= shadeIllumXYZ[0];
+ matXYZ[1] *= shadeIllumXYZ[1];
+ matXYZ[2] *= shadeIllumXYZ[2];
+ } // else if (kMaterialsFlags[i] * kSelfLit), do nothing
+
+ ALOGV("Mat %d XYZ: %f, %f, %f", i, matXYZ[0], matXYZ[1], matXYZ[2]);
+ float luxToElectrons = mSensorSensitivity * mExposureDuration /
+ (kAperture * kAperture);
+ mCurrentColors[i*NUM_CHANNELS + 0] =
+ (mFilterR[0] * matXYZ[0] +
+ mFilterR[1] * matXYZ[1] +
+ mFilterR[2] * matXYZ[2])
+ * luxToElectrons;
+ mCurrentColors[i*NUM_CHANNELS + 1] =
+ (mFilterGr[0] * matXYZ[0] +
+ mFilterGr[1] * matXYZ[1] +
+ mFilterGr[2] * matXYZ[2])
+ * luxToElectrons;
+ mCurrentColors[i*NUM_CHANNELS + 2] =
+ (mFilterGb[0] * matXYZ[0] +
+ mFilterGb[1] * matXYZ[1] +
+ mFilterGb[2] * matXYZ[2])
+ * luxToElectrons;
+ mCurrentColors[i*NUM_CHANNELS + 3] =
+ (mFilterB[0] * matXYZ[0] +
+ mFilterB[1] * matXYZ[1] +
+ mFilterB[2] * matXYZ[2])
+ * luxToElectrons;
+
+ ALOGV("Color %d RGGB: %d, %d, %d, %d", i,
+ mCurrentColors[i*NUM_CHANNELS + 0],
+ mCurrentColors[i*NUM_CHANNELS + 1],
+ mCurrentColors[i*NUM_CHANNELS + 2],
+ mCurrentColors[i*NUM_CHANNELS + 3]);
+ }
+ // Shake viewpoint; horizontal and vertical sinusoids at roughly
+ // human handshake frequencies
+ mHandshakeX =
+ ( kFreq1Magnitude * std::sin(kHorizShakeFreq1 * timeSinceIdx) +
+ kFreq2Magnitude * std::sin(kHorizShakeFreq2 * timeSinceIdx) ) *
+ mMapDiv * kShakeFraction;
+
+ mHandshakeY =
+ ( kFreq1Magnitude * std::sin(kVertShakeFreq1 * timeSinceIdx) +
+ kFreq2Magnitude * std::sin(kVertShakeFreq2 * timeSinceIdx) ) *
+ mMapDiv * kShakeFraction;
+
+ // Set starting pixel
+ setReadoutPixel(0,0);
+}
+
+void Scene::setReadoutPixel(int x, int y) {
+ mCurrentX = x;
+ mCurrentY = y;
+ mSubX = (x + mOffsetX + mHandshakeX) % mMapDiv;
+ mSubY = (y + mOffsetY + mHandshakeY) % mMapDiv;
+ mSceneX = (x + mOffsetX + mHandshakeX) / mMapDiv;
+ mSceneY = (y + mOffsetY + mHandshakeY) / mMapDiv;
+ mSceneIdx = mSceneY * kSceneWidth + mSceneX;
+ mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
+}
+
+const uint32_t* Scene::getPixelElectrons() {
+ const uint32_t *pixel = mCurrentSceneMaterial;
+ mCurrentX++;
+ mSubX++;
+ if (mCurrentX >= mSensorWidth) {
+ mCurrentX = 0;
+ mCurrentY++;
+ if (mCurrentY >= mSensorHeight) mCurrentY = 0;
+ setReadoutPixel(mCurrentX, mCurrentY);
+ } else if (mSubX > mMapDiv) {
+ mSceneIdx++;
+ mSceneX++;
+ mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
+ mSubX = 0;
+ }
+ return pixel;
+}
+
+// Handshake model constants.
+// Frequencies measured in a nanosecond timebase
+const float Scene::kHorizShakeFreq1 = 2 * M_PI * 2 / 1e9; // 2 Hz
+const float Scene::kHorizShakeFreq2 = 2 * M_PI * 13 / 1e9; // 13 Hz
+const float Scene::kVertShakeFreq1 = 2 * M_PI * 3 / 1e9; // 3 Hz
+const float Scene::kVertShakeFreq2 = 2 * M_PI * 11 / 1e9; // 1 Hz
+const float Scene::kFreq1Magnitude = 5;
+const float Scene::kFreq2Magnitude = 1;
+const float Scene::kShakeFraction = 0.03; // As a fraction of a scene tile
+
+// RGB->YUV, Jpeg standard
+const float Scene::kRgb2Yuv[12] = {
+ 0.299f, 0.587f, 0.114f, 0.f,
+ -0.16874f, -0.33126f, 0.5f, -128.f,
+ 0.5f, -0.41869f, -0.08131f, -128.f,
+};
+
+// Aperture of imaging lens
+const float Scene::kAperture = 2.8;
+
+// Sun illumination levels through the day
+const float Scene::kSunlight[24/kTimeStep] =
+{
+ 0, // 00:00
+ 0,
+ 0,
+ kTwilightIllum, // 06:00
+ kDirectSunIllum,
+ kDirectSunIllum,
+ kDirectSunIllum, // 12:00
+ kDirectSunIllum,
+ kDirectSunIllum,
+ kSunsetIllum, // 18:00
+ kTwilightIllum,
+ 0
+};
+
+// Moon illumination levels through the day
+const float Scene::kMoonlight[24/kTimeStep] =
+{
+ kFullMoonIllum, // 00:00
+ kFullMoonIllum,
+ 0,
+ 0, // 06:00
+ 0,
+ 0,
+ 0, // 12:00
+ 0,
+ 0,
+ 0, // 18:00
+ 0,
+ kFullMoonIllum
+};
+
+const int Scene::kSunOverhead = 12;
+const int Scene::kMoonOverhead = 0;
+
+// Used for sun illumination levels
+const float Scene::kDirectSunIllum = 100000;
+const float Scene::kSunsetIllum = 400;
+const float Scene::kTwilightIllum = 4;
+// Used for moon illumination levels
+const float Scene::kFullMoonIllum = 1;
+// Other illumination levels
+const float Scene::kDaylightShadeIllum = 20000;
+const float Scene::kClearNightIllum = 2e-3;
+const float Scene::kStarIllum = 2e-6;
+const float Scene::kLivingRoomIllum = 50;
+
+const float Scene::kIncandescentXY[2] = { 0.44757f, 0.40745f};
+const float Scene::kDirectSunlightXY[2] = { 0.34842f, 0.35161f};
+const float Scene::kDaylightXY[2] = { 0.31271f, 0.32902f};
+const float Scene::kNoonSkyXY[2] = { 0.346f, 0.359f};
+const float Scene::kMoonlightXY[2] = { 0.34842f, 0.35161f};
+const float Scene::kSunsetXY[2] = { 0.527f, 0.413f};
+
+const uint8_t Scene::kSelfLit = 0x01;
+const uint8_t Scene::kShadowed = 0x02;
+const uint8_t Scene::kSky = 0x04;
+
+// For non-self-lit materials, the Y component is normalized with 1=full
+// reflectance; for self-lit materials, it's the constant illuminance in lux.
+const float Scene::kMaterials_xyY[Scene::NUM_MATERIALS][3] = {
+ { 0.3688f, 0.4501f, .1329f }, // GRASS
+ { 0.3688f, 0.4501f, .1329f }, // GRASS_SHADOW
+ { 0.3986f, 0.5002f, .4440f }, // HILL
+ { 0.3262f, 0.5040f, .2297f }, // WALL
+ { 0.4336f, 0.3787f, .1029f }, // ROOF
+ { 0.3316f, 0.2544f, .0639f }, // DOOR
+ { 0.3425f, 0.3577f, .0887f }, // CHIMNEY
+ { kIncandescentXY[0], kIncandescentXY[1], kLivingRoomIllum }, // WINDOW
+ { kDirectSunlightXY[0], kDirectSunlightXY[1], kDirectSunIllum }, // SUN
+ { kNoonSkyXY[0], kNoonSkyXY[1], kDaylightShadeIllum / kDirectSunIllum }, // SKY
+ { kMoonlightXY[0], kMoonlightXY[1], kFullMoonIllum } // MOON
+};
+
+const uint8_t Scene::kMaterialsFlags[Scene::NUM_MATERIALS] = {
+ 0,
+ kShadowed,
+ kShadowed,
+ kShadowed,
+ kShadowed,
+ kShadowed,
+ kShadowed,
+ kSelfLit,
+ kSelfLit,
+ kSky,
+ kSelfLit,
+};
+
+} // namespace android
diff --git a/v3/fake-pipeline2/Scene.h b/v3/fake-pipeline2/Scene.h
new file mode 100644
index 0000000..66d1a69
--- a/dev/null
+++ b/v3/fake-pipeline2/Scene.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The Scene class implements a simple physical simulation of a scene, using the
+ * CIE 1931 colorspace to represent light in physical units (lux).
+ *
+ * It's fairly approximate, but does provide a scene with realistic widely
+ * variable illumination levels and colors over time.
+ *
+ */
+
+#ifndef HW_EMULATOR_CAMERA2_SCENE_H
+#define HW_EMULATOR_CAMERA2_SCENE_H
+
+#include "utils/Timers.h"
+
+namespace android {
+
+class Scene {
+ public:
+ Scene(int sensorWidthPx,
+ int sensorHeightPx,
+ float sensorSensitivity);
+ ~Scene();
+
+ // Set the filter coefficients for the red, green, and blue filters on the
+ // sensor. Used as an optimization to pre-calculate various illuminance
+ // values. Two different green filters can be provided, to account for
+ // possible cross-talk on a Bayer sensor. Must be called before
+ // calculateScene.
+ void setColorFilterXYZ(
+ float rX, float rY, float rZ,
+ float grX, float grY, float grZ,
+ float gbX, float gbY, float gbZ,
+ float bX, float bY, float bZ);
+
+ // Set time of day (24-hour clock). This controls the general light levels
+ // in the scene. Must be called before calculateScene
+ void setHour(int hour);
+ // Get current hour
+ int getHour();
+
+ // Set the duration of exposure for determining luminous exposure.
+ // Must be called before calculateScene
+ void setExposureDuration(float seconds);
+
+ // Calculate scene information for current hour and the time offset since
+ // the hour. Must be called at least once before calling getLuminousExposure.
+ // Resets pixel readout location to 0,0
+ void calculateScene(nsecs_t time);
+
+ // Set sensor pixel readout location.
+ void setReadoutPixel(int x, int y);
+
+ // Get sensor response in physical units (electrons) for light hitting the
+ // current readout pixel, after passing through color filters. The readout
+ // pixel will be auto-incremented. The returned array can be indexed with
+ // ColorChannels.
+ const uint32_t* getPixelElectrons();
+
+ enum ColorChannels {
+ R = 0,
+ Gr,
+ Gb,
+ B,
+ Y,
+ Cb,
+ Cr,
+ NUM_CHANNELS
+ };
+
+ private:
+ // Sensor color filtering coefficients in XYZ
+ float mFilterR[3];
+ float mFilterGr[3];
+ float mFilterGb[3];
+ float mFilterB[3];
+
+ int mOffsetX, mOffsetY;
+ int mMapDiv;
+
+ int mHandshakeX, mHandshakeY;
+
+ int mSensorWidth;
+ int mSensorHeight;
+ int mCurrentX;
+ int mCurrentY;
+ int mSubX;
+ int mSubY;
+ int mSceneX;
+ int mSceneY;
+ int mSceneIdx;
+ uint32_t *mCurrentSceneMaterial;
+
+ int mHour;
+ float mExposureDuration;
+ float mSensorSensitivity;
+
+ enum Materials {
+ GRASS = 0,
+ GRASS_SHADOW,
+ HILL,
+ WALL,
+ ROOF,
+ DOOR,
+ CHIMNEY,
+ WINDOW,
+ SUN,
+ SKY,
+ MOON,
+ NUM_MATERIALS
+ };
+
+ uint32_t mCurrentColors[NUM_MATERIALS*NUM_CHANNELS];
+
+ /**
+ * Constants for scene definition. These are various degrees of approximate.
+ */
+
+ // Fake handshake parameters. Two shake frequencies per axis, plus magnitude
+ // as a fraction of a scene tile, and relative magnitudes for the frequencies
+ static const float kHorizShakeFreq1;
+ static const float kHorizShakeFreq2;
+ static const float kVertShakeFreq1;
+ static const float kVertShakeFreq2;
+ static const float kFreq1Magnitude;
+ static const float kFreq2Magnitude;
+
+ static const float kShakeFraction;
+
+ // RGB->YUV conversion
+ static const float kRgb2Yuv[12];
+
+ // Aperture of imaging lens
+ static const float kAperture;
+
+ // Sun, moon illuminance levels in 2-hour increments. These don't match any
+ // real day anywhere.
+ static const uint32_t kTimeStep = 2;
+ static const float kSunlight[];
+ static const float kMoonlight[];
+ static const int kSunOverhead;
+ static const int kMoonOverhead;
+
+ // Illumination levels for various conditions, in lux
+ static const float kDirectSunIllum;
+ static const float kDaylightShadeIllum;
+ static const float kSunsetIllum;
+ static const float kTwilightIllum;
+ static const float kFullMoonIllum;
+ static const float kClearNightIllum;
+ static const float kStarIllum;
+ static const float kLivingRoomIllum;
+
+ // Chromaticity of various illumination sources
+ static const float kIncandescentXY[2];
+ static const float kDirectSunlightXY[2];
+ static const float kDaylightXY[2];
+ static const float kNoonSkyXY[2];
+ static const float kMoonlightXY[2];
+ static const float kSunsetXY[2];
+
+ static const uint8_t kSelfLit;
+ static const uint8_t kShadowed;
+ static const uint8_t kSky;
+
+ static const float kMaterials_xyY[NUM_MATERIALS][3];
+ static const uint8_t kMaterialsFlags[NUM_MATERIALS];
+
+ static const int kSceneWidth;
+ static const int kSceneHeight;
+ static const uint8_t kScene[];
+};
+
+}
+
+#endif // HW_EMULATOR_CAMERA2_SCENE_H
diff --git a/v3/fake-pipeline2/Sensor.cpp b/v3/fake-pipeline2/Sensor.cpp
new file mode 100755
index 0000000..6989289
--- a/dev/null
+++ b/v3/fake-pipeline2/Sensor.cpp
@@ -0,0 +1,1207 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+#define LOG_TAG "EmulatedCamera2_Sensor"
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <cutils/properties.h>
+
+#include "../EmulatedFakeCamera2.h"
+#include "Sensor.h"
+#include <cmath>
+#include <cstdlib>
+#include "system/camera_metadata.h"
+#include "libyuv.h"
+
+namespace android {
+
+const unsigned int Sensor::kResolution[2] = {1600, 1200};
+
+const nsecs_t Sensor::kExposureTimeRange[2] =
+ {1000L, 30000000000L} ; // 1 us - 30 sec
+const nsecs_t Sensor::kFrameDurationRange[2] =
+ {33331760L, 30000000000L}; // ~1/30 s - 30 sec
+const nsecs_t Sensor::kMinVerticalBlank = 10000L;
+
+const uint8_t Sensor::kColorFilterArrangement =
+ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB;
+
+// Output image data characteristics
+const uint32_t Sensor::kMaxRawValue = 4000;
+const uint32_t Sensor::kBlackLevel = 1000;
+
+// Sensor sensitivity
+const float Sensor::kSaturationVoltage = 0.520f;
+const uint32_t Sensor::kSaturationElectrons = 2000;
+const float Sensor::kVoltsPerLuxSecond = 0.100f;
+
+const float Sensor::kElectronsPerLuxSecond =
+ Sensor::kSaturationElectrons / Sensor::kSaturationVoltage
+ * Sensor::kVoltsPerLuxSecond;
+
+const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue /
+ Sensor::kSaturationElectrons;
+
+const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons
+const float Sensor::kReadNoiseStddevAfterGain = 2.100; // in digital counts
+const float Sensor::kReadNoiseVarBeforeGain =
+ Sensor::kReadNoiseStddevBeforeGain *
+ Sensor::kReadNoiseStddevBeforeGain;
+const float Sensor::kReadNoiseVarAfterGain =
+ Sensor::kReadNoiseStddevAfterGain *
+ Sensor::kReadNoiseStddevAfterGain;
+
+// While each row has to read out, reset, and then expose, the (reset +
+// expose) sequence can be overlapped by other row readouts, so the final
+// minimum frame duration is purely a function of row readout time, at least
+// if there's a reasonable number of rows.
+const nsecs_t Sensor::kRowReadoutTime =
+ Sensor::kFrameDurationRange[0] / Sensor::kResolution[1];
+
+const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
+const uint32_t Sensor::kDefaultSensitivity = 100;
+
+/** A few utility functions for math, normal distributions */
+
+// Take advantage of IEEE floating-point format to calculate an approximate
+// square root. Accurate to within +-3.6%
+float sqrtf_approx(float r) {
+ // Modifier is based on IEEE floating-point representation; the
+ // manipulations boil down to finding approximate log2, dividing by two, and
+ // then inverting the log2. A bias is added to make the relative error
+ // symmetric about the real answer.
+ const int32_t modifier = 0x1FBB4000;
+
+ int32_t r_i = *(int32_t*)(&r);
+ r_i = (r_i >> 1) + modifier;
+
+ return *(float*)(&r_i);
+}
+
+void rgb24_memcpy(unsigned char *dst, unsigned char *src, int width, int height)
+{
+ int stride = (width + 31) & ( ~31);
+ int w, h;
+ for (h=0; h<height; h++)
+ {
+ memcpy( dst, src, width*3);
+ dst += width*3;
+ src += stride*3;
+ }
+}
+
+Sensor::Sensor():
+ Thread(false),
+ mGotVSync(false),
+ mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank),
+ mFrameDuration(kFrameDurationRange[0]),
+ mGainFactor(kDefaultSensitivity),
+ mNextBuffers(NULL),
+ mFrameNumber(0),
+ mCapturedBuffers(NULL),
+ mListener(NULL),
+ mScene(kResolution[0], kResolution[1], kElectronsPerLuxSecond)
+{
+
+}
+
+Sensor::~Sensor() {
+ shutDown();
+}
+
+status_t Sensor::startUp(int idx) {
+ ALOGV("%s: E", __FUNCTION__);
+ DBG_LOGA("ddd");
+
+ int res;
+ mCapturedBuffers = NULL;
+ res = run("EmulatedFakeCamera2::Sensor",
+ ANDROID_PRIORITY_URGENT_DISPLAY);
+
+ if (res != OK) {
+ ALOGE("Unable to start up sensor capture thread: %d", res);
+ }
+
+ vinfo = (struct VideoInfo *) calloc(1, sizeof(*vinfo));
+ vinfo->idx = idx;
+
+ res = camera_open(vinfo);
+ if (res < 0) {
+ ALOGE("Unable to open sensor %d, errno=%d\n", vinfo->idx, res);
+ }
+
+ return res;
+}
+
+status_t Sensor::setOutputFormat(int width, int height, int pixelformat)
+{
+ int res;
+
+ if (pixelformat == V4L2_PIX_FMT_RGB24) {
+ vinfo->picture.format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->picture.format.fmt.pix.width = width;
+ vinfo->picture.format.fmt.pix.height = height;
+ vinfo->picture.format.fmt.pix.pixelformat = pixelformat;
+ } else {
+ vinfo->preview.format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->preview.format.fmt.pix.width = width;
+ vinfo->preview.format.fmt.pix.height = height;
+ vinfo->preview.format.fmt.pix.pixelformat = pixelformat;
+
+ res = setBuffersFormat(vinfo);
+ if (res < 0) {
+ ALOGE("set buffer failed\n");
+ return res;
+ }
+ }
+
+ return OK;
+
+}
+
+status_t Sensor::streamOn() {
+
+ return start_capturing(vinfo);
+}
+
+bool Sensor::isStreaming() {
+
+ return vinfo->isStreaming;
+}
+
+bool Sensor::isNeedRestart(int width, int height, int pixelformat)
+{
+ if ((vinfo->preview.format.fmt.pix.width != width)
+ ||(vinfo->preview.format.fmt.pix.height != height)
+ //||(vinfo->format.fmt.pix.pixelformat != pixelformat)
+ ) {
+
+ return true;
+
+ }
+
+ return false;
+}
+status_t Sensor::streamOff() {
+
+ return stop_capturing(vinfo);
+}
+
+int Sensor::getOutputFormat()
+{
+ struct v4l2_fmtdesc fmt;
+ int ret;
+ memset(&fmt,0,sizeof(fmt));
+ fmt.index = 0;
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ while ((ret = ioctl(vinfo->fd, VIDIOC_ENUM_FMT, &fmt)) == 0){
+ if (fmt.pixelformat == V4L2_PIX_FMT_NV21)
+ return V4L2_PIX_FMT_NV21;
+ else if (fmt.pixelformat == V4L2_PIX_FMT_MJPEG)
+ return V4L2_PIX_FMT_MJPEG;
+ else if (fmt.pixelformat == V4L2_PIX_FMT_YUYV)
+ return V4L2_PIX_FMT_YUYV;
+
+ fmt.index++;
+ }
+ return BAD_VALUE;
+}
+
+void Sensor::setPictureRotate(int rotate)
+{
+ mRotateValue = rotate;
+}
+int Sensor::getPictureRotate()
+{
+ return mRotateValue;
+}
+status_t Sensor::shutDown() {
+ ALOGV("%s: E", __FUNCTION__);
+
+ int res;
+ res = requestExitAndWait();
+ if (res != OK) {
+ ALOGE("Unable to shut down sensor capture thread: %d", res);
+ }
+
+ if (vinfo != NULL) {
+ stop_capturing(vinfo);
+ }
+
+ camera_close(vinfo);
+
+ if (vinfo){
+ free(vinfo);
+ vinfo = NULL;
+ }
+ ALOGD("%s: Exit", __FUNCTION__);
+ return res;
+}
+
+Scene &Sensor::getScene() {
+ return mScene;
+}
+
+status_t Sensor::setEffect(uint8_t effect)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ ctl.id = V4L2_CID_COLORFX;
+
+ switch (effect) {
+ case ANDROID_CONTROL_EFFECT_MODE_OFF:
+ ctl.value= CAM_EFFECT_ENC_NORMAL;
+ break;
+ case ANDROID_CONTROL_EFFECT_MODE_NEGATIVE:
+ ctl.value= CAM_EFFECT_ENC_COLORINV;
+ break;
+ case ANDROID_CONTROL_EFFECT_MODE_SEPIA:
+ ctl.value= CAM_EFFECT_ENC_SEPIA;
+ break;
+ default:
+ ALOGE("%s: Doesn't support effect mode %d",
+ __FUNCTION__, effect);
+ return BAD_VALUE;
+ }
+
+ DBG_LOGB("set effect mode:%d", effect);
+ ret = ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl);
+ if (ret < 0) {
+ CAMHAL_LOGDB("Set effect fail: %s. ret=%d", strerror(errno),ret);
+ }
+ return ret ;
+}
+
+#define MAX_LEVEL_FOR_EXPOSURE 16
+#define MIN_LEVEL_FOR_EXPOSURE 3
+
+int Sensor::getExposure(int *maxExp, int *minExp, int *def, camera_metadata_rational *step)
+{
+ struct v4l2_queryctrl qc;
+ int ret=0;
+ int level = 0;
+ int middle = 0;
+
+ memset( &qc, 0, sizeof(qc));
+
+ DBG_LOGA("getExposure\n");
+ qc.id = V4L2_CID_EXPOSURE;
+ ret = ioctl(vinfo->fd, VIDIOC_QUERYCTRL, &qc);
+ if(ret < 0) {
+ CAMHAL_LOGDB("QUERYCTRL failed, errno=%d\n", errno);
+ *minExp = -4;
+ *maxExp = 4;
+ *def = 0;
+ step->numerator = 1;
+ step->denominator = 1;
+ return ret;
+ }
+
+ if(0 < qc.step)
+ level = ( qc.maximum - qc.minimum + 1 )/qc.step;
+
+ if((level > MAX_LEVEL_FOR_EXPOSURE)
+ || (level < MIN_LEVEL_FOR_EXPOSURE)){
+ *minExp = -4;
+ *maxExp = 4;
+ *def = 0;
+ step->numerator = 1;
+ step->denominator = 1;
+ DBG_LOGB("not in[min,max], min=%d, max=%d, def=%d\n",
+ *minExp, *maxExp, *def);
+ return true;
+ }
+
+ middle = (qc.minimum+qc.maximum)/2;
+ *minExp = qc.minimum - middle;
+ *maxExp = qc.maximum - middle;
+ *def = qc.default_value - middle;
+ step->numerator = 1;
+ step->denominator = qc.step;
+ DBG_LOGB("min=%d, max=%d, step=%d\n", qc.minimum, qc.maximum, qc.step);
+ return ret;
+}
+
+status_t Sensor::setExposure(int expCmp)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ struct v4l2_queryctrl qc;
+
+ if(mEV == expCmp){
+ return 0;
+ }else{
+ mEV = expCmp;
+ }
+ memset(&ctl, 0, sizeof(ctl));
+ memset(&qc, 0, sizeof(qc));
+
+ qc.id = V4L2_CID_EXPOSURE;
+
+ ret = ioctl(vinfo->fd, VIDIOC_QUERYCTRL, &qc);
+ if (ret < 0) {
+ CAMHAL_LOGDB("AMLOGIC CAMERA get Exposure fail: %s. ret=%d", strerror(errno),ret);
+ }
+
+ ctl.id = V4L2_CID_EXPOSURE;
+ ctl.value = expCmp + (qc.maximum - qc.minimum) / 2;
+
+ ret = ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl);
+ if (ret < 0) {
+ CAMHAL_LOGDB("AMLOGIC CAMERA Set Exposure fail: %s. ret=%d", strerror(errno),ret);
+ }
+ DBG_LOGB("setExposure value%d mEVmin%d mEVmax%d\n",ctl.value, qc.minimum, qc.maximum);
+ return ret ;
+}
+
+int Sensor::getAntiBanding(uint8_t *antiBanding, uint8_t maxCont)
+{
+ struct v4l2_queryctrl qc;
+ struct v4l2_querymenu qm;
+ int ret;
+ int mode_count = -1;
+
+ memset(&qc, 0, sizeof(struct v4l2_queryctrl));
+ qc.id = V4L2_CID_POWER_LINE_FREQUENCY;
+ ret = ioctl (vinfo->fd, VIDIOC_QUERYCTRL, &qc);
+ if ( (ret<0) || (qc.flags == V4L2_CTRL_FLAG_DISABLED)){
+ DBG_LOGB("camera handle %d can't support this ctrl",vinfo->fd);
+ } else if ( qc.type != V4L2_CTRL_TYPE_INTEGER) {
+ DBG_LOGB("this ctrl of camera handle %d can't support menu type",vinfo->fd);
+ } else {
+ memset(&qm, 0, sizeof(qm));
+
+ int index = 0;
+ mode_count = 1;
+ antiBanding[0] = ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF;
+
+ for (index = qc.minimum; index <= qc.maximum; index+= qc.step) {
+ if (mode_count >= maxCont)
+ break;
+
+ memset(&qm, 0, sizeof(struct v4l2_querymenu));
+ qm.id = V4L2_CID_POWER_LINE_FREQUENCY;
+ qm.index = index;
+ if(ioctl (vinfo->fd, VIDIOC_QUERYMENU, &qm) < 0){
+ continue;
+ } else {
+ if (strcmp((char*)qm.name,"50hz") == 0) {
+ antiBanding[mode_count] = ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"60hz") == 0) {
+ antiBanding[mode_count] = ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"auto") == 0) {
+ antiBanding[mode_count] = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ mode_count++;
+ }
+
+ }
+ }
+ }
+
+ return mode_count;
+}
+
+status_t Sensor::setAntiBanding(uint8_t antiBanding)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ ctl.id = V4L2_CID_POWER_LINE_FREQUENCY;
+
+ switch (antiBanding) {
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF:
+ ctl.value= CAM_ANTIBANDING_OFF;
+ break;
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ:
+ ctl.value= CAM_ANTIBANDING_50HZ;
+ break;
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ:
+ ctl.value= CAM_ANTIBANDING_60HZ;
+ break;
+ case ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO:
+ ctl.value= CAM_ANTIBANDING_AUTO;
+ break;
+ default:
+ ALOGE("%s: Doesn't support ANTIBANDING mode %d",
+ __FUNCTION__, antiBanding);
+ return BAD_VALUE;
+ }
+
+ DBG_LOGB("anti banding mode:%d", antiBanding);
+ ret = ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl);
+ if ( ret < 0) {
+ CAMHAL_LOGDA("failed to set anti banding mode!\n");
+ return BAD_VALUE;
+ }
+ return ret;
+}
+
+status_t Sensor::setFocuasArea(int32_t x0, int32_t y0, int32_t x1, int32_t y1)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ ctl.id = V4L2_CID_FOCUS_ABSOLUTE;
+ ctl.value = ((x0 + x1) / 2 + 1000) << 16;
+ ctl.value |= ((y0 + y1) / 2 + 1000) & 0xffff;
+
+ ret = ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl);
+ return ret;
+}
+
+
+int Sensor::getAutoFocus(uint8_t *afMode, uint8_t maxCount)
+{
+ struct v4l2_queryctrl qc;
+ struct v4l2_querymenu qm;
+ int ret;
+ int mode_count = -1;
+
+ memset(&qc, 0, sizeof(struct v4l2_queryctrl));
+ qc.id = V4L2_CID_FOCUS_AUTO;
+ ret = ioctl (vinfo->fd, VIDIOC_QUERYCTRL, &qc);
+ if( (ret<0) || (qc.flags == V4L2_CTRL_FLAG_DISABLED)){
+ DBG_LOGB("camera handle %d can't support this ctrl",vinfo->fd);
+ }else if( qc.type != V4L2_CTRL_TYPE_MENU) {
+ DBG_LOGB("this ctrl of camera handle %d can't support menu type",vinfo->fd);
+ }else{
+ memset(&qm, 0, sizeof(qm));
+
+ int index = 0;
+ mode_count = 1;
+ afMode[0] = ANDROID_CONTROL_AF_MODE_OFF;
+
+ for (index = qc.minimum; index <= qc.maximum; index+= qc.step) {
+ if (mode_count >= maxCount)
+ break;
+
+ memset(&qm, 0, sizeof(struct v4l2_querymenu));
+ qm.id = V4L2_CID_FOCUS_AUTO;
+ qm.index = index;
+ if(ioctl (vinfo->fd, VIDIOC_QUERYMENU, &qm) < 0){
+ continue;
+ } else {
+ if (strcmp((char*)qm.name,"auto") == 0) {
+ afMode[mode_count] = ANDROID_CONTROL_AF_MODE_AUTO;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"continuous-video") == 0) {
+ afMode[mode_count] = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"continuous-picture") == 0) {
+ afMode[mode_count] = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ mode_count++;
+ }
+
+ }
+ }
+ }
+
+ return mode_count;
+}
+
+status_t Sensor::setAutoFocuas(uint8_t afMode)
+{
+ struct v4l2_control ctl;
+ ctl.id = V4L2_CID_FOCUS_AUTO;
+
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ ctl.value = CAM_FOCUS_MODE_AUTO;
+ break;
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ ctl.value = CAM_FOCUS_MODE_MACRO;
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ ctl.value = CAM_FOCUS_MODE_CONTI_VID;
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ ctl.value = CAM_FOCUS_MODE_CONTI_PIC;
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support AF mode %d",
+ __FUNCTION__, afMode);
+ return BAD_VALUE;
+ }
+
+ if (ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl) < 0) {
+ CAMHAL_LOGDA("failed to set camera focuas mode!\n");
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
+int Sensor::getAWB(uint8_t *awbMode, uint8_t maxCount)
+{
+ struct v4l2_queryctrl qc;
+ struct v4l2_querymenu qm;
+ int ret;
+ int mode_count = -1;
+
+ memset(&qc, 0, sizeof(struct v4l2_queryctrl));
+ qc.id = V4L2_CID_DO_WHITE_BALANCE;
+ ret = ioctl (vinfo->fd, VIDIOC_QUERYCTRL, &qc);
+ if( (ret<0) || (qc.flags == V4L2_CTRL_FLAG_DISABLED)){
+ DBG_LOGB("camera handle %d can't support this ctrl",vinfo->fd);
+ }else if( qc.type != V4L2_CTRL_TYPE_MENU) {
+ DBG_LOGB("this ctrl of camera handle %d can't support menu type",vinfo->fd);
+ }else{
+ memset(&qm, 0, sizeof(qm));
+
+ int index = 0;
+ mode_count = 1;
+ awbMode[0] = ANDROID_CONTROL_AWB_MODE_OFF;
+
+ for (index = qc.minimum; index <= qc.maximum; index+= qc.step) {
+ if (mode_count >= maxCount)
+ break;
+
+ memset(&qm, 0, sizeof(struct v4l2_querymenu));
+ qm.id = V4L2_CID_DO_WHITE_BALANCE;
+ qm.index = index;
+ if(ioctl (vinfo->fd, VIDIOC_QUERYMENU, &qm) < 0){
+ continue;
+ } else {
+ if (strcmp((char*)qm.name,"auto") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_AUTO;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"daylight") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_DAYLIGHT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"incandescent") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_INCANDESCENT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"fluorescent") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_FLUORESCENT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"warm-fluorescent") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"cloudy-daylight") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"twilight") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_TWILIGHT;
+ mode_count++;
+ } else if (strcmp((char*)qm.name,"shade") == 0) {
+ awbMode[mode_count] = ANDROID_CONTROL_AWB_MODE_SHADE;
+ mode_count++;
+ }
+
+ }
+ }
+ }
+
+ return mode_count;
+}
+
+status_t Sensor::setAWB(uint8_t awbMode)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ ctl.id = V4L2_CID_DO_WHITE_BALANCE;
+
+ switch (awbMode) {
+ case ANDROID_CONTROL_AWB_MODE_AUTO:
+ ctl.value = CAM_WB_AUTO;
+ break;
+ case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
+ ctl.value = CAM_WB_INCANDESCENCE;
+ break;
+ case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
+ ctl.value = CAM_WB_FLUORESCENT;
+ break;
+ case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
+ ctl.value = CAM_WB_DAYLIGHT;
+ break;
+ case ANDROID_CONTROL_AWB_MODE_SHADE:
+ ctl.value = CAM_WB_SHADE;
+ break;
+ default:
+ ALOGE("%s: Emulator doesn't support AWB mode %d",
+ __FUNCTION__, awbMode);
+ return BAD_VALUE;
+ }
+ ret = ioctl(vinfo->fd, VIDIOC_S_CTRL, &ctl);
+ return ret;
+}
+
+void Sensor::setExposureTime(uint64_t ns) {
+ Mutex::Autolock lock(mControlMutex);
+ ALOGVV("Exposure set to %f", ns/1000000.f);
+ mExposureTime = ns;
+}
+
+void Sensor::setFrameDuration(uint64_t ns) {
+ Mutex::Autolock lock(mControlMutex);
+ ALOGVV("Frame duration set to %f", ns/1000000.f);
+ mFrameDuration = ns;
+}
+
+void Sensor::setSensitivity(uint32_t gain) {
+ Mutex::Autolock lock(mControlMutex);
+ ALOGVV("Gain set to %d", gain);
+ mGainFactor = gain;
+}
+
+void Sensor::setDestinationBuffers(Buffers *buffers) {
+ Mutex::Autolock lock(mControlMutex);
+ mNextBuffers = buffers;
+}
+
+void Sensor::setFrameNumber(uint32_t frameNumber) {
+ Mutex::Autolock lock(mControlMutex);
+ mFrameNumber = frameNumber;
+}
+
+bool Sensor::waitForVSync(nsecs_t reltime) {
+ int res;
+ Mutex::Autolock lock(mControlMutex);
+
+ mGotVSync = false;
+ res = mVSync.waitRelative(mControlMutex, reltime);
+ if (res != OK && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
+ return false;
+ }
+ return mGotVSync;
+}
+
+bool Sensor::waitForNewFrame(nsecs_t reltime,
+ nsecs_t *captureTime) {
+ Mutex::Autolock lock(mReadoutMutex);
+ uint8_t *ret;
+ if (mCapturedBuffers == NULL) {
+ int res;
+ res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
+ if (res == TIMED_OUT) {
+ return false;
+ } else if (res != OK || mCapturedBuffers == NULL) {
+ ALOGE("Error waiting for sensor readout signal: %d", res);
+ return false;
+ }
+ } else {
+ mReadoutComplete.signal();
+ }
+
+ *captureTime = mCaptureTime;
+ mCapturedBuffers = NULL;
+ return true;
+}
+
+Sensor::SensorListener::~SensorListener() {
+}
+
+void Sensor::setSensorListener(SensorListener *listener) {
+ Mutex::Autolock lock(mControlMutex);
+ mListener = listener;
+}
+
+status_t Sensor::readyToRun() {
+ int res;
+ ALOGV("Starting up sensor thread");
+ mStartupTime = systemTime();
+ mNextCaptureTime = 0;
+ mNextCapturedBuffers = NULL;
+
+ DBG_LOGA("");
+
+ return OK;
+}
+
+bool Sensor::threadLoop() {
+ /**
+ * Sensor capture operation main loop.
+ *
+ * Stages are out-of-order relative to a single frame's processing, but
+ * in-order in time.
+ */
+
+ /**
+ * Stage 1: Read in latest control parameters
+ */
+ uint64_t exposureDuration;
+ uint64_t frameDuration;
+ uint32_t gain;
+ Buffers *nextBuffers;
+ uint32_t frameNumber;
+ SensorListener *listener = NULL;
+ {
+ Mutex::Autolock lock(mControlMutex);
+ exposureDuration = mExposureTime;
+ frameDuration = mFrameDuration;
+ gain = mGainFactor;
+ nextBuffers = mNextBuffers;
+ frameNumber = mFrameNumber;
+ listener = mListener;
+ // Don't reuse a buffer set
+ mNextBuffers = NULL;
+
+ // Signal VSync for start of readout
+ ALOGVV("Sensor VSync");
+ mGotVSync = true;
+ mVSync.signal();
+ }
+
+ /**
+ * Stage 3: Read out latest captured image
+ */
+
+ Buffers *capturedBuffers = NULL;
+ nsecs_t captureTime = 0;
+
+ nsecs_t startRealTime = systemTime();
+ // Stagefright cares about system time for timestamps, so base simulated
+ // time on that.
+ nsecs_t simulatedTime = startRealTime;
+ nsecs_t frameEndRealTime = startRealTime + frameDuration;
+ nsecs_t frameReadoutEndRealTime = startRealTime +
+ kRowReadoutTime * kResolution[1];
+
+ if (mNextCapturedBuffers != NULL) {
+ ALOGVV("Sensor starting readout");
+ // Pretend we're doing readout now; will signal once enough time has elapsed
+ capturedBuffers = mNextCapturedBuffers;
+ captureTime = mNextCaptureTime;
+ }
+ simulatedTime += kRowReadoutTime + kMinVerticalBlank;
+
+ // TODO: Move this signal to another thread to simulate readout
+ // time properly
+ if (capturedBuffers != NULL) {
+ ALOGVV("Sensor readout complete");
+ Mutex::Autolock lock(mReadoutMutex);
+ if (mCapturedBuffers != NULL) {
+ ALOGV("Waiting for readout thread to catch up!");
+ mReadoutComplete.wait(mReadoutMutex);
+ }
+
+ mCapturedBuffers = capturedBuffers;
+ mCaptureTime = captureTime;
+ mReadoutAvailable.signal();
+ capturedBuffers = NULL;
+ }
+
+ /**
+ * Stage 2: Capture new image
+ */
+ mNextCaptureTime = simulatedTime;
+ mNextCapturedBuffers = nextBuffers;
+
+ if (mNextCapturedBuffers != NULL) {
+ if (listener != NULL) {
+ listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
+ mNextCaptureTime);
+ }
+ ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
+ (float)exposureDuration/1e6, gain);
+ mScene.setExposureDuration((float)exposureDuration/1e9);
+ mScene.calculateScene(mNextCaptureTime);
+
+ // Might be adding more buffers, so size isn't constant
+ for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
+ const StreamBuffer &b = (*mNextCapturedBuffers)[i];
+ ALOGVV("Sensor capturing buffer %d: stream %d,"
+ " %d x %d, format %x, stride %d, buf %p, img %p",
+ i, b.streamId, b.width, b.height, b.format, b.stride,
+ b.buffer, b.img);
+ switch(b.format) {
+ case HAL_PIXEL_FORMAT_RAW_SENSOR:
+ captureRaw(b.img, gain, b.stride);
+ break;
+ case HAL_PIXEL_FORMAT_RGB_888:
+ captureRGB(b.img, gain, b.stride);
+ break;
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ captureRGBA(b.img, gain, b.stride);
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ // Add auxillary buffer of the right size
+ // Assumes only one BLOB (JPEG) buffer in
+ // mNextCapturedBuffers
+ StreamBuffer bAux;
+ int orientation;
+ orientation = getPictureRotate();
+ ALOGD("bAux orientation=%d",orientation);
+ if ((orientation==90)||(orientation==270)) {
+ bAux.streamId = 0;
+ bAux.width = b.height;
+ bAux.height = b.width;
+ bAux.format = HAL_PIXEL_FORMAT_RGB_888;
+ bAux.stride = b.height;
+ bAux.buffer = NULL;
+ } else {
+ bAux.streamId = 0;
+ bAux.width = b.width;
+ bAux.height = b.height;
+ bAux.format = HAL_PIXEL_FORMAT_RGB_888;
+ bAux.stride = b.width;
+ bAux.buffer = NULL;
+ }
+ // TODO: Reuse these
+ bAux.img = new uint8_t[b.width * b.height * 3];
+ mNextCapturedBuffers->push_back(bAux);
+ break;
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ captureNV21(b.img, gain, b.stride);
+ break;
+ case HAL_PIXEL_FORMAT_YV12:
+ // TODO:
+ ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
+ break;
+ default:
+ ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
+ b.format);
+ break;
+ }
+ }
+ }
+
+ ALOGVV("Sensor vertical blanking interval");
+ nsecs_t workDoneRealTime = systemTime();
+ const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok
+ if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
+ timespec t;
+ t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
+ t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
+
+ int ret;
+ do {
+ ret = nanosleep(&t, &t);
+ } while (ret != 0);
+ }
+ nsecs_t endRealTime = systemTime();
+ ALOGVV("Frame cycle took %d ms, target %d ms",
+ (int)((endRealTime - startRealTime)/1000000),
+ (int)(frameDuration / 1000000));
+ return true;
+};
+
+int Sensor::getPictureSizes(int32_t picSizes[], int size, bool preview) {
+ int res;
+ int i;
+ int count = 0;
+ struct v4l2_frmsizeenum frmsize;
+ char property[PROPERTY_VALUE_MAX];
+ unsigned int support_w,support_h;
+ int preview_fmt;
+
+ support_w = 10000;
+ support_h = 10000;
+ memset(property, 0, sizeof(property));
+ if(property_get("ro.camera.preview.MaxSize", property, NULL) > 0){
+ CAMHAL_LOGDB("support Max Preview Size :%s",property);
+ if(sscanf(property,"%dx%d",&support_w,&support_h)!=2){
+ support_w = 10000;
+ support_h = 10000;
+ }
+ }
+
+
+ memset(&frmsize,0,sizeof(frmsize));
+ preview_fmt = getOutputFormat();
+
+ if (preview_fmt == V4L2_PIX_FMT_MJPEG)
+ frmsize.pixel_format = V4L2_PIX_FMT_MJPEG;
+ else if (preview_fmt == V4L2_PIX_FMT_NV21) {
+ if (preview == true)
+ frmsize.pixel_format = V4L2_PIX_FMT_NV21;
+ else
+ frmsize.pixel_format = V4L2_PIX_FMT_RGB24;
+ } else if (preview_fmt == V4L2_PIX_FMT_YUYV)
+ frmsize.pixel_format = V4L2_PIX_FMT_YUYV;
+
+ for(i=0;;i++){
+ frmsize.index = i;
+ res = ioctl(vinfo->fd, VIDIOC_ENUM_FRAMESIZES, &frmsize);
+ if (res < 0){
+ DBG_LOGB("index=%d, break\n", i);
+ break;
+ }
+
+
+ if(frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE){ //only support this type
+
+ if (0 != (frmsize.discrete.width%16))
+ continue;
+
+ if((frmsize.discrete.width > support_w) && (frmsize.discrete.height >support_h))
+ continue;
+
+ if (count >= size)
+ break;
+
+ picSizes[count] = frmsize.discrete.width;
+ picSizes[count+1] = frmsize.discrete.height;
+ count += 2;
+ }
+
+ }
+
+ return count;
+
+}
+
+void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
+ float totalGain = gain/100.0 * kBaseGainFactor;
+ float noiseVarGain = totalGain * totalGain;
+ float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain
+ + kReadNoiseVarAfterGain;
+
+ int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB
+ mScene.setReadoutPixel(0,0);
+ for (unsigned int y = 0; y < kResolution[1]; y++ ) {
+ int *bayerRow = bayerSelect + (y & 0x1) * 2;
+ uint16_t *px = (uint16_t*)img + y * stride;
+ for (unsigned int x = 0; x < kResolution[0]; x++) {
+ uint32_t electronCount;
+ electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
+
+ // TODO: Better pixel saturation curve?
+ electronCount = (electronCount < kSaturationElectrons) ?
+ electronCount : kSaturationElectrons;
+
+ // TODO: Better A/D saturation curve?
+ uint16_t rawCount = electronCount * totalGain;
+ rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
+
+ // Calculate noise value
+ // TODO: Use more-correct Gaussian instead of uniform noise
+ float photonNoiseVar = electronCount * noiseVarGain;
+ float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
+ // Scaled to roughly match gaussian/uniform noise stddev
+ float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
+
+ rawCount += kBlackLevel;
+ rawCount += noiseStddev * noiseSample;
+
+ *px++ = rawCount;
+ }
+ // TODO: Handle this better
+ //simulatedTime += kRowReadoutTime;
+ }
+ ALOGVV("Raw sensor image captured");
+}
+
+void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
+ float totalGain = gain/100.0 * kBaseGainFactor;
+ // In fixed-point math, calculate total scaling from electrons to 8bpp
+ int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+ uint32_t inc = kResolution[0] / stride;
+
+ for (unsigned int y = 0, outY = 0; y < kResolution[1]; y+=inc, outY++ ) {
+ uint8_t *px = img + outY * stride * 4;
+ mScene.setReadoutPixel(0, y);
+ for (unsigned int x = 0; x < kResolution[0]; x+=inc) {
+ uint32_t rCount, gCount, bCount;
+ // TODO: Perfect demosaicing is a cheat
+ const uint32_t *pixel = mScene.getPixelElectrons();
+ rCount = pixel[Scene::R] * scale64x;
+ gCount = pixel[Scene::Gr] * scale64x;
+ bCount = pixel[Scene::B] * scale64x;
+
+ *px++ = rCount < 255*64 ? rCount / 64 : 255;
+ *px++ = gCount < 255*64 ? gCount / 64 : 255;
+ *px++ = bCount < 255*64 ? bCount / 64 : 255;
+ *px++ = 255;
+ for (unsigned int j = 1; j < inc; j++)
+ mScene.getPixelElectrons();
+ }
+ // TODO: Handle this better
+ //simulatedTime += kRowReadoutTime;
+ }
+ ALOGVV("RGBA sensor image captured");
+}
+
+void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
+#if 0
+ float totalGain = gain/100.0 * kBaseGainFactor;
+ // In fixed-point math, calculate total scaling from electrons to 8bpp
+ int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+ uint32_t inc = kResolution[0] / stride;
+
+ for (unsigned int y = 0, outY = 0; y < kResolution[1]; y += inc, outY++ ) {
+ mScene.setReadoutPixel(0, y);
+ uint8_t *px = img + outY * stride * 3;
+ for (unsigned int x = 0; x < kResolution[0]; x += inc) {
+ uint32_t rCount, gCount, bCount;
+ // TODO: Perfect demosaicing is a cheat
+ const uint32_t *pixel = mScene.getPixelElectrons();
+ rCount = pixel[Scene::R] * scale64x;
+ gCount = pixel[Scene::Gr] * scale64x;
+ bCount = pixel[Scene::B] * scale64x;
+
+ *px++ = rCount < 255*64 ? rCount / 64 : 255;
+ *px++ = gCount < 255*64 ? gCount / 64 : 255;
+ *px++ = bCount < 255*64 ? bCount / 64 : 255;
+ for (unsigned int j = 1; j < inc; j++)
+ mScene.getPixelElectrons();
+ }
+ // TODO: Handle this better
+ //simulatedTime += kRowReadoutTime;
+ }
+#else
+ uint8_t *src;
+ int ret,rotate;
+ int width,height;
+ rotate = getPictureRotate();
+ width = vinfo->picture.format.fmt.pix.width;
+ height = vinfo->picture.format.fmt.pix.height;
+ ret = start_picture(vinfo,rotate);
+ if (ret < 0)
+ {
+ ALOGD("start picture failed!");
+ }
+ while(1)
+ {
+ src = (uint8_t *)get_picture(vinfo);
+ if (NULL == src) {
+ usleep(300000);
+ continue;
+ } else {
+ break;
+ }
+ }
+ ALOGD("get picture success !");
+ if (vinfo->picture.format.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24){
+ if (vinfo->picture.buf.length == width*height*3) {
+ memcpy(img, src, vinfo->picture.buf.length);
+ } else {
+ rgb24_memcpy( img, src, width, height);
+ }
+ }
+ stop_picture(vinfo);
+#endif
+}
+
+void Sensor::YUYVToNV21(uint8_t *src, uint8_t *dst, int width, int height)
+{
+ for (int i = 0; i < width * height * 2; i += 2) {
+ *dst++ = *(src + i);
+ }
+
+ for (int y = 0; y < height - 1; y +=2) {
+ for (int j = 0; j < width * 2; j += 4) {
+ *dst++ = (*(src + 3 + j) + *(src + 3 + j + width * 2) + 1) >> 1; //v
+ *dst++ = (*(src + 1 + j) + *(src + 1 + j + width * 2) + 1) >> 1; //u
+ }
+ src += width * 2 * 2;
+ }
+
+ if (height & 1)
+ for (int j = 0; j < width * 2; j += 4) {
+ *dst++ = *(src + 3 + j); //v
+ *dst++ = *(src + 1 + j); //u
+ }
+}
+
+void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
+#if 0
+ float totalGain = gain/100.0 * kBaseGainFactor;
+ // Using fixed-point math with 6 bits of fractional precision.
+ // In fixed-point math, calculate total scaling from electrons to 8bpp
+ const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+ // In fixed-point math, saturation point of sensor after gain
+ const int saturationPoint = 64 * 255;
+ // Fixed-point coefficients for RGB-YUV transform
+ // Based on JFIF RGB->YUV transform.
+ // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
+ const int rgbToY[] = {19, 37, 7};
+ const int rgbToCb[] = {-10,-21, 32, 524288};
+ const int rgbToCr[] = {32,-26, -5, 524288};
+ // Scale back to 8bpp non-fixed-point
+ const int scaleOut = 64;
+ const int scaleOutSq = scaleOut * scaleOut; // after multiplies
+
+ uint32_t inc = kResolution[0] / stride;
+ uint32_t outH = kResolution[1] / inc;
+ for (unsigned int y = 0, outY = 0;
+ y < kResolution[1]; y+=inc, outY++) {
+ uint8_t *pxY = img + outY * stride;
+ uint8_t *pxVU = img + (outH + outY / 2) * stride;
+ mScene.setReadoutPixel(0,y);
+ for (unsigned int outX = 0; outX < stride; outX++) {
+ int32_t rCount, gCount, bCount;
+ // TODO: Perfect demosaicing is a cheat
+ const uint32_t *pixel = mScene.getPixelElectrons();
+ rCount = pixel[Scene::R] * scale64x;
+ rCount = rCount < saturationPoint ? rCount : saturationPoint;
+ gCount = pixel[Scene::Gr] * scale64x;
+ gCount = gCount < saturationPoint ? gCount : saturationPoint;
+ bCount = pixel[Scene::B] * scale64x;
+ bCount = bCount < saturationPoint ? bCount : saturationPoint;
+
+ *pxY++ = (rgbToY[0] * rCount +
+ rgbToY[1] * gCount +
+ rgbToY[2] * bCount) / scaleOutSq;
+ if (outY % 2 == 0 && outX % 2 == 0) {
+ *pxVU++ = (rgbToCr[0] * rCount +
+ rgbToCr[1] * gCount +
+ rgbToCr[2] * bCount +
+ rgbToCr[3]) / scaleOutSq;
+ *pxVU++ = (rgbToCb[0] * rCount +
+ rgbToCb[1] * gCount +
+ rgbToCb[2] * bCount +
+ rgbToCb[3]) / scaleOutSq;
+ }
+ for (unsigned int j = 1; j < inc; j++)
+ mScene.getPixelElectrons();
+ }
+ }
+#else
+ uint8_t *src;
+ while(1){
+ src = (uint8_t *)get_frame(vinfo);
+ usleep(30000);
+ if (NULL == src)
+ continue;
+ if (vinfo->preview.format.fmt.pix.pixelformat == V4L2_PIX_FMT_NV21)
+ memcpy(img, src, vinfo->preview.buf.length);
+ else if (vinfo->preview.format.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ int width = vinfo->preview.format.fmt.pix.width;
+ int height = vinfo->preview.format.fmt.pix.height;
+ YUYVToNV21(src, img, width, height);
+ }
+ else if (vinfo->preview.format.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) {
+ int width = vinfo->preview.format.fmt.pix.width;
+ int height = vinfo->preview.format.fmt.pix.height;
+ if (ConvertMjpegToNV21(src, vinfo->preview.buf.bytesused, img,
+ width, img + width * height, (width + 1) / 2, width,
+ height, width, height, libyuv::FOURCC_MJPG) != 0) {
+ DBG_LOGA("Decode MJPEG frame failed\n");
+ }
+ } else {
+ ALOGE("Unable known sensor format: %d", vinfo->preview.format.fmt.pix.pixelformat);
+ }
+
+ putback_frame(vinfo);
+ break;
+ }
+#endif
+ ALOGVV("NV21 sensor image captured");
+}
+
+} // namespace android
+
diff --git a/v3/fake-pipeline2/Sensor.h b/v3/fake-pipeline2/Sensor.h
new file mode 100755
index 0000000..b543ba2
--- a/dev/null
+++ b/v3/fake-pipeline2/Sensor.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This class is a simple simulation of a typical CMOS cellphone imager chip,
+ * which outputs 12-bit Bayer-mosaic raw images.
+ *
+ * Unlike most real image sensors, this one's native color space is linear sRGB.
+ *
+ * The sensor is abstracted as operating as a pipeline 3 stages deep;
+ * conceptually, each frame to be captured goes through these three stages. The
+ * processing step for the sensor is marked off by vertical sync signals, which
+ * indicate the start of readout of the oldest frame. The interval between
+ * processing steps depends on the frame duration of the frame currently being
+ * captured. The stages are 1) configure, 2) capture, and 3) readout. During
+ * configuration, the sensor's registers for settings such as exposure time,
+ * frame duration, and gain are set for the next frame to be captured. In stage
+ * 2, the image data for the frame is actually captured by the sensor. Finally,
+ * in stage 3, the just-captured data is read out and sent to the rest of the
+ * system.
+ *
+ * The sensor is assumed to be rolling-shutter, so low-numbered rows of the
+ * sensor are exposed earlier in time than larger-numbered rows, with the time
+ * offset between each row being equal to the row readout time.
+ *
+ * The characteristics of this sensor don't correspond to any actual sensor,
+ * but are not far off typical sensors.
+ *
+ * Example timing diagram, with three frames:
+ * Frame 0-1: Frame duration 50 ms, exposure time 20 ms.
+ * Frame 2: Frame duration 75 ms, exposure time 65 ms.
+ * Legend:
+ * C = update sensor registers for frame
+ * v = row in reset (vertical blanking interval)
+ * E = row capturing image data
+ * R = row being read out
+ * | = vertical sync signal
+ *time(ms)| 0 55 105 155 230 270
+ * Frame 0| :configure : capture : readout : : :
+ * Row # | ..|CCCC______|_________|_________| : :
+ * 0 | :\ \vvvvvEEEER \ : :
+ * 500 | : \ \vvvvvEEEER \ : :
+ * 1000 | : \ \vvvvvEEEER \ : :
+ * 1500 | : \ \vvvvvEEEER \ : :
+ * 2000 | : \__________\vvvvvEEEER_________\ : :
+ * Frame 1| : configure capture readout : :
+ * Row # | : |CCCC_____|_________|______________| :
+ * 0 | : :\ \vvvvvEEEER \ :
+ * 500 | : : \ \vvvvvEEEER \ :
+ * 1000 | : : \ \vvvvvEEEER \ :
+ * 1500 | : : \ \vvvvvEEEER \ :
+ * 2000 | : : \_________\vvvvvEEEER______________\ :
+ * Frame 2| : : configure capture readout:
+ * Row # | : : |CCCC_____|______________|_______|...
+ * 0 | : : :\ \vEEEEEEEEEEEEER \
+ * 500 | : : : \ \vEEEEEEEEEEEEER \
+ * 1000 | : : : \ \vEEEEEEEEEEEEER \
+ * 1500 | : : : \ \vEEEEEEEEEEEEER \
+ * 2000 | : : : \_________\vEEEEEEEEEEEEER_______\
+ */
+
+#ifndef HW_EMULATOR_CAMERA2_SENSOR_H
+#define HW_EMULATOR_CAMERA2_SENSOR_H
+
+#include "utils/Thread.h"
+#include "utils/Mutex.h"
+#include "utils/Timers.h"
+#include <utils/String8.h>
+
+#include "Scene.h"
+#include "Base.h"
+#include "camera_hw.h"
+#include <cstdlib>
+
+namespace android {
+
+typedef enum camera_mirror_flip_e {
+ MF_NORMAL = 0,
+ MF_MIRROR,
+ MF_FLIP,
+ MF_MIRROR_FLIP,
+}camera_mirror_flip_t;
+
+
+typedef enum camera_wb_flip_e {
+ CAM_WB_AUTO = 0,
+ CAM_WB_CLOUD,
+ CAM_WB_DAYLIGHT,
+ CAM_WB_INCANDESCENCE,
+ CAM_WB_TUNGSTEN,
+ CAM_WB_FLUORESCENT,
+ CAM_WB_MANUAL,
+ CAM_WB_SHADE,
+ CAM_WB_TWILIGHT,
+ CAM_WB_WARM_FLUORESCENT,
+}camera_wb_flip_t;
+
+typedef enum camera_effect_flip_e {
+ CAM_EFFECT_ENC_NORMAL = 0,
+ CAM_EFFECT_ENC_GRAYSCALE,
+ CAM_EFFECT_ENC_SEPIA,
+ CAM_EFFECT_ENC_SEPIAGREEN,
+ CAM_EFFECT_ENC_SEPIABLUE,
+ CAM_EFFECT_ENC_COLORINV,
+}camera_effect_flip_t;
+
+typedef enum camera_night_mode_flip_e {
+ CAM_NM_AUTO = 0,
+ CAM_NM_ENABLE,
+}camera_night_mode_flip_t;
+
+typedef enum camera_banding_mode_flip_e {
+ CAM_ANTIBANDING_DISABLED= V4L2_CID_POWER_LINE_FREQUENCY_DISABLED,
+ CAM_ANTIBANDING_50HZ = V4L2_CID_POWER_LINE_FREQUENCY_50HZ,
+ CAM_ANTIBANDING_60HZ = V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ CAM_ANTIBANDING_AUTO,
+ CAM_ANTIBANDING_OFF,
+}camera_banding_mode_flip_t;
+
+typedef enum camera_flashlight_status_e{
+ FLASHLIGHT_AUTO = 0,
+ FLASHLIGHT_ON,
+ FLASHLIGHT_OFF,
+ FLASHLIGHT_TORCH,
+ FLASHLIGHT_RED_EYE,
+}camera_flashlight_status_t;
+
+typedef enum camera_focus_mode_e {
+ CAM_FOCUS_MODE_RELEASE = 0,
+ CAM_FOCUS_MODE_FIXED,
+ CAM_FOCUS_MODE_INFINITY,
+ CAM_FOCUS_MODE_AUTO,
+ CAM_FOCUS_MODE_MACRO,
+ CAM_FOCUS_MODE_EDOF,
+ CAM_FOCUS_MODE_CONTI_VID,
+ CAM_FOCUS_MODE_CONTI_PIC,
+}camera_focus_mode_t;
+
+
+class Sensor: private Thread, public virtual RefBase {
+ public:
+
+ Sensor();
+ ~Sensor();
+
+ /*
+ * Power control
+ */
+
+ status_t startUp(int idx);
+ status_t shutDown();
+
+ int getOutputFormat();
+ status_t setOutputFormat(int width, int height, int pixelformat);
+ void setPictureRotate(int rotate);
+ int getPictureRotate();
+
+ status_t streamOn();
+ status_t streamOff();
+
+ int getPictureSizes(int32_t picSizes[], int size, bool preview);
+ bool isStreaming();
+ bool isNeedRestart(int width, int height, int pixelformat);
+ /*
+ * Access to scene
+ */
+ Scene &getScene();
+
+ /*
+ * Controls that can be updated every frame
+ */
+
+ int getExposure(int *mamExp, int *minExp, int *def, camera_metadata_rational *step);
+ status_t setExposure(int expCmp);
+ status_t setEffect(uint8_t effect);
+ int getAntiBanding(uint8_t *antiBanding, uint8_t maxCont);
+ status_t setAntiBanding(uint8_t antiBanding);
+ status_t setFocuasArea(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+ int getAWB(uint8_t *awbMode, uint8_t maxCount);
+ status_t setAWB(uint8_t awbMode);
+ status_t setAutoFocuas(uint8_t afMode);
+ int getAutoFocus(uint8_t *afMode, uint8_t maxCount);
+ void setExposureTime(uint64_t ns);
+ void setFrameDuration(uint64_t ns);
+ void setSensitivity(uint32_t gain);
+ // Buffer must be at least stride*height*2 bytes in size
+ void setDestinationBuffers(Buffers *buffers);
+ // To simplify tracking sensor's current frame
+ void setFrameNumber(uint32_t frameNumber);
+
+ /*
+ * Controls that cause reconfiguration delay
+ */
+
+ void setBinning(int horizontalFactor, int verticalFactor);
+
+ /*
+ * Synchronizing with sensor operation (vertical sync)
+ */
+
+ // Wait until the sensor outputs its next vertical sync signal, meaning it
+ // is starting readout of its latest frame of data. Returns true if vertical
+ // sync is signaled, false if the wait timed out.
+ bool waitForVSync(nsecs_t reltime);
+
+ // Wait until a new frame has been read out, and then return the time
+ // capture started. May return immediately if a new frame has been pushed
+ // since the last wait for a new frame. Returns true if new frame is
+ // returned, false if timed out.
+ bool waitForNewFrame(nsecs_t reltime,
+ nsecs_t *captureTime);
+
+ /*
+ * Interrupt event servicing from the sensor. Only triggers for sensor
+ * cycles that have valid buffers to write to.
+ */
+ struct SensorListener {
+ enum Event {
+ EXPOSURE_START, // Start of exposure
+ };
+
+ virtual void onSensorEvent(uint32_t frameNumber, Event e,
+ nsecs_t timestamp) = 0;
+ virtual ~SensorListener();
+ };
+
+ void setSensorListener(SensorListener *listener);
+
+ /**
+ * Static sensor characteristics
+ */
+ static const unsigned int kResolution[2];
+
+ static const nsecs_t kExposureTimeRange[2];
+ static const nsecs_t kFrameDurationRange[2];
+ static const nsecs_t kMinVerticalBlank;
+
+ static const uint8_t kColorFilterArrangement;
+
+ // Output image data characteristics
+ static const uint32_t kMaxRawValue;
+ static const uint32_t kBlackLevel;
+ // Sensor sensitivity, approximate
+
+ static const float kSaturationVoltage;
+ static const uint32_t kSaturationElectrons;
+ static const float kVoltsPerLuxSecond;
+ static const float kElectronsPerLuxSecond;
+
+ static const float kBaseGainFactor;
+
+ static const float kReadNoiseStddevBeforeGain; // In electrons
+ static const float kReadNoiseStddevAfterGain; // In raw digital units
+ static const float kReadNoiseVarBeforeGain;
+ static const float kReadNoiseVarAfterGain;
+
+ // While each row has to read out, reset, and then expose, the (reset +
+ // expose) sequence can be overlapped by other row readouts, so the final
+ // minimum frame duration is purely a function of row readout time, at least
+ // if there's a reasonable number of rows.
+ static const nsecs_t kRowReadoutTime;
+
+ static const int32_t kSensitivityRange[2];
+ static const uint32_t kDefaultSensitivity;
+
+ private:
+ Mutex mControlMutex; // Lock before accessing control parameters
+ // Start of control parameters
+ Condition mVSync;
+ bool mGotVSync;
+ uint64_t mExposureTime;
+ uint64_t mFrameDuration;
+ uint32_t mGainFactor;
+ Buffers *mNextBuffers;
+ uint32_t mFrameNumber;
+ int mRotateValue;
+
+ // End of control parameters
+
+ int mEV;
+
+ Mutex mReadoutMutex; // Lock before accessing readout variables
+ // Start of readout variables
+ Condition mReadoutAvailable;
+ Condition mReadoutComplete;
+ Buffers *mCapturedBuffers;
+ nsecs_t mCaptureTime;
+ SensorListener *mListener;
+ // End of readout variables
+
+ // Time of sensor startup, used for simulation zero-time point
+ nsecs_t mStartupTime;
+
+ //store the v4l2 info
+ struct VideoInfo *vinfo;
+ /**
+ * Inherited Thread virtual overrides, and members only used by the
+ * processing thread
+ */
+ private:
+ virtual status_t readyToRun();
+
+ virtual bool threadLoop();
+
+ nsecs_t mNextCaptureTime;
+ Buffers *mNextCapturedBuffers;
+
+ Scene mScene;
+
+ void captureRaw(uint8_t *img, uint32_t gain, uint32_t stride);
+ void captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride);
+ void captureRGB(uint8_t *img, uint32_t gain, uint32_t stride);
+ void captureNV21(uint8_t *img, uint32_t gain, uint32_t stride);
+ void YUYVToNV21(uint8_t *src, uint8_t *dst, int width, int height);
+};
+
+}
+
+#endif // HW_EMULATOR_CAMERA2_SENSOR_H
diff --git a/v3/fake-pipeline2/camera_hw.cpp b/v3/fake-pipeline2/camera_hw.cpp
new file mode 100755
index 0000000..2bb9b7e
--- a/dev/null
+++ b/v3/fake-pipeline2/camera_hw.cpp
@@ -0,0 +1,382 @@
+#ifndef __CAMERA_HW__
+#define __CAMERA_HW__
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Camera_hw"
+
+#include <errno.h>
+#include "camera_hw.h"
+
+#ifdef __cplusplus
+//extern "C" {
+#endif
+static int set_rotate_value(int camera_fd, int value)
+{
+ int ret = 0;
+ struct v4l2_control ctl;
+ if(camera_fd<0)
+ return -1;
+ if((value!=0)&&(value!=90)&&(value!=180)&&(value!=270)){
+ CAMHAL_LOGDB("Set rotate value invalid: %d.", value);
+ return -1;
+ }
+ memset( &ctl, 0, sizeof(ctl));
+ ctl.value=value;
+ ctl.id = V4L2_CID_ROTATE;
+ ALOGD("set_rotate_value:: id =%x , value=%d",ctl.id,ctl.value);
+ ret = ioctl(camera_fd, VIDIOC_S_CTRL, &ctl);
+ if(ret<0){
+ CAMHAL_LOGDB("Set rotate value fail: %s,errno=%d. ret=%d", strerror(errno),errno,ret);
+ }
+ return ret ;
+}
+
+int camera_open(struct VideoInfo *cam_dev)
+{
+ char dev_name[128];
+ int ret;
+
+ sprintf(dev_name, "%s%d", "/dev/video", cam_dev->idx);
+ cam_dev->fd = open(dev_name, O_RDWR | O_NONBLOCK);
+ //cam_dev->fd = open("/dev/video0", O_RDWR | O_NONBLOCK);
+ if (cam_dev->fd < 0){
+ DBG_LOGB("open %s failed, errno=%d\n", dev_name, errno);
+ return -ENOTTY;
+ }
+
+ ret = ioctl(cam_dev->fd, VIDIOC_QUERYCAP, &cam_dev->cap);
+ if (ret < 0) {
+ DBG_LOGB("VIDIOC_QUERYCAP, errno=%d", errno);
+ }
+
+ if (!(cam_dev->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
+ DBG_LOGB( "%s is not video capture device\n",
+ dev_name);
+ }
+
+ if (!(cam_dev->cap.capabilities & V4L2_CAP_STREAMING)) {
+ DBG_LOGB( "video%d does not support streaming i/o\n",
+ cam_dev->idx);
+ }
+
+ return ret;
+}
+
+int setBuffersFormat(struct VideoInfo *cam_dev)
+{
+ int ret = 0;
+ int pixelformat = cam_dev->preview.format.fmt.pix.pixelformat;
+
+ ret = ioctl(cam_dev->fd, VIDIOC_S_FMT, &cam_dev->preview.format);
+ if (ret < 0) {
+ DBG_LOGB("Open: VIDIOC_S_FMT Failed: %s, ret=%d\n", strerror(errno), ret);
+ }
+
+ CAMHAL_LOGIB("Width * Height %d x %d expect pixelfmt:%.4s, get:%.4s\n",
+ cam_dev->preview.format.fmt.pix.width,
+ cam_dev->preview.format.fmt.pix.height,
+ (char*)&pixelformat,
+ (char*)&cam_dev->preview.format.fmt.pix.pixelformat);
+ return ret;
+}
+
+int start_capturing(struct VideoInfo *vinfo)
+{
+ int ret = 0;
+ int i;
+ enum v4l2_buf_type type;
+ struct v4l2_buffer buf;
+
+ if (vinfo->isStreaming) {
+ DBG_LOGA("already stream on\n");
+ }
+ CLEAR(vinfo->preview.rb);
+
+ vinfo->preview.rb.count = 6;
+ vinfo->preview.rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ //TODO DMABUF & ION
+ vinfo->preview.rb.memory = V4L2_MEMORY_MMAP;
+
+ ret = ioctl(vinfo->fd, VIDIOC_REQBUFS, &vinfo->preview.rb);
+ if (ret < 0) {
+ DBG_LOGB("camera idx:%d does not support "
+ "memory mapping, errno=%d\n", vinfo->idx, errno);
+ }
+
+ if (vinfo->preview.rb.count < 2) {
+ DBG_LOGB( "Insufficient buffer memory on /dev/video%d, errno=%d\n",
+ vinfo->idx, errno);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < vinfo->preview.rb.count; ++i) {
+
+ CLEAR(vinfo->preview.buf);
+
+ vinfo->preview.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->preview.buf.memory = V4L2_MEMORY_MMAP;
+ vinfo->preview.buf.index = i;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QUERYBUF, &vinfo->preview.buf)){
+ DBG_LOGB("VIDIOC_QUERYBUF, errno=%d", errno);
+ }
+
+ vinfo->mem[i] = mmap(NULL /* start anywhere */,
+ vinfo->preview.buf.length,
+ PROT_READ | PROT_WRITE /* required */,
+ MAP_SHARED /* recommended */,
+ vinfo->fd,
+ vinfo->preview.buf.m.offset);
+
+ if (MAP_FAILED == vinfo->mem[i]) {
+ DBG_LOGB("mmap failed, errno=%d\n", errno);
+ }
+ }
+ ////////////////////////////////
+ for (i = 0; i < vinfo->preview.rb.count; ++i) {
+
+ CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QBUF, &buf))
+ DBG_LOGB("VIDIOC_QBUF failed, errno=%d\n", errno);
+ }
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == ioctl(vinfo->fd, VIDIOC_STREAMON, &type))
+ DBG_LOGB("VIDIOC_STREAMON, errno=%d\n", errno);
+
+ vinfo->isStreaming = true;
+
+ return 0;
+}
+
+int stop_capturing(struct VideoInfo *vinfo)
+{
+ enum v4l2_buf_type type;
+ int res = 0;
+ int i;
+
+ if (!vinfo->isStreaming)
+ return -1;
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == ioctl(vinfo->fd, VIDIOC_STREAMOFF, &type)){
+ DBG_LOGB("VIDIOC_STREAMOFF, errno=%d", errno);
+ res = -1;
+ }
+
+ for (i = 0; i < vinfo->preview.rb.count; ++i) {
+ if (-1 == munmap(vinfo->mem[i], vinfo->preview.buf.length)) {
+ DBG_LOGB("munmap failed errno=%d", errno);
+ res = -1;
+ }
+ }
+
+ vinfo->isStreaming = false;
+
+ return res;
+}
+
+void *get_frame(struct VideoInfo *vinfo)
+{
+ CLEAR(vinfo->preview.buf);
+
+ vinfo->preview.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->preview.buf.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_DQBUF, &vinfo->preview.buf)) {
+ switch (errno) {
+ case EAGAIN:
+ return NULL;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ DBG_LOGB("VIDIOC_DQBUF failed, errno=%d\n", errno);
+ exit(1);
+ }
+ DBG_LOGB("VIDIOC_DQBUF failed, errno=%d\n", errno);
+ }
+ //DBG_LOGA("get frame\n");
+
+ return vinfo->mem[vinfo->preview.buf.index];
+}
+
+int putback_frame(struct VideoInfo *vinfo)
+{
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QBUF, &vinfo->preview.buf))
+ DBG_LOGB("QBUF failed error=%d\n", errno);
+
+ return 0;
+}
+
+int start_picture(struct VideoInfo *vinfo, int rotate)
+{
+ int ret = 0;
+ int i;
+ enum v4l2_buf_type type;
+ struct v4l2_buffer buf;
+
+ CLEAR(vinfo->picture.rb);
+
+ stop_capturing(vinfo);
+
+ //step 1 : ioctl VIDIOC_S_FMT
+ ret = ioctl(vinfo->fd, VIDIOC_S_FMT, &vinfo->picture.format);
+ if (ret < 0) {
+ DBG_LOGB("Open: VIDIOC_S_FMT Failed: %s, ret=%d\n", strerror(errno), ret);
+ }
+
+ //step 2 : request buffer
+ vinfo->picture.rb.count = 1;
+ vinfo->picture.rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ //TODO DMABUF & ION
+ vinfo->picture.rb.memory = V4L2_MEMORY_MMAP;
+
+ ret = ioctl(vinfo->fd, VIDIOC_REQBUFS, &vinfo->picture.rb);
+ if (ret < 0) {
+ DBG_LOGB("camera idx:%d does not support "
+ "memory mapping, errno=%d\n", vinfo->idx, errno);
+ }
+
+ if (vinfo->picture.rb.count < 1) {
+ DBG_LOGB( "Insufficient buffer memory on /dev/video%d, errno=%d\n",
+ vinfo->idx, errno);
+ return -EINVAL;
+ }
+
+ //step 3: mmap buffer
+ for (i = 0; i < vinfo->picture.rb.count; ++i) {
+
+ CLEAR(vinfo->picture.buf);
+
+ vinfo->picture.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->picture.buf.memory = V4L2_MEMORY_MMAP;
+ vinfo->picture.buf.index = i;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QUERYBUF, &vinfo->picture.buf)){
+ DBG_LOGB("VIDIOC_QUERYBUF, errno=%d", errno);
+ }
+ vinfo->mem_pic[i] = mmap(NULL /* start anywhere */,
+ vinfo->picture.buf.length,
+ PROT_READ | PROT_WRITE /* required */,
+ MAP_SHARED /* recommended */,
+ vinfo->fd,
+ vinfo->picture.buf.m.offset);
+
+ if (MAP_FAILED == vinfo->mem_pic[i]) {
+ DBG_LOGB("mmap failed, errno=%d\n", errno);
+ }
+ }
+
+ //step 4 : QBUF
+ ////////////////////////////////
+ for (i = 0; i < vinfo->picture.rb.count; ++i) {
+
+ CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QBUF, &buf))
+ DBG_LOGB("VIDIOC_QBUF failed, errno=%d\n", errno);
+ }
+
+ if (vinfo->isPicture) {
+ DBG_LOGA("already stream on\n");
+ }
+
+ set_rotate_value(vinfo->fd,rotate);
+ //step 5: Stream ON
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == ioctl(vinfo->fd, VIDIOC_STREAMON, &type))
+ DBG_LOGB("VIDIOC_STREAMON, errno=%d\n", errno);
+ vinfo->isPicture = true;
+
+ return 0;
+
+}
+
+void *get_picture(struct VideoInfo *vinfo)
+{
+ CLEAR(vinfo->picture.buf);
+
+ vinfo->picture.buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->picture.buf.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == ioctl(vinfo->fd, VIDIOC_DQBUF, &vinfo->picture.buf)) {
+ switch (errno) {
+ case EAGAIN:
+ return NULL;
+ case EIO:
+ /* Could ignore EIO, see spec. */
+ /* fall through */
+ default:
+ DBG_LOGB("VIDIOC_DQBUF failed, errno=%d\n", errno);
+ exit(1);
+ }
+ }
+ DBG_LOGA("get picture\n");
+ return vinfo->mem_pic[vinfo->picture.buf.index];
+}
+
+void stop_picture(struct VideoInfo *vinfo)
+{
+ enum v4l2_buf_type type;
+ struct v4l2_buffer buf;
+ int i;
+
+ if (!vinfo->isPicture)
+ return ;
+
+ //QBUF
+ for (i = 0; i < vinfo->picture.rb.count; ++i) {
+ CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+ if (-1 == ioctl(vinfo->fd, VIDIOC_QBUF, &buf))
+ DBG_LOGB("VIDIOC_QBUF failed, errno=%d\n", errno);
+ }
+
+ //stream off and unmap buffer
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == ioctl(vinfo->fd, VIDIOC_STREAMOFF, &type))
+ DBG_LOGB("VIDIOC_STREAMOFF, errno=%d", errno);
+
+ for (i = 0; i < vinfo->picture.rb.count; i++)
+ {
+ if (-1 == munmap(vinfo->mem_pic[i], vinfo->picture.buf.length))
+ DBG_LOGB("munmap failed errno=%d", errno);
+ }
+
+ set_rotate_value(vinfo->fd,0);
+ vinfo->isPicture = false;
+ setBuffersFormat(vinfo);
+ start_capturing(vinfo);
+
+}
+
+void camera_close(struct VideoInfo *vinfo)
+{
+ if (NULL == vinfo) {
+ DBG_LOGA("vinfo is null\n");
+ return ;
+ }
+
+ if (-1 == close(vinfo->fd))
+ DBG_LOGB("close failed, errno=%d\n", errno);
+
+ vinfo->fd = -1;
+}
+#ifdef __cplusplus
+//}
+#endif
+#endif
diff --git a/v3/fake-pipeline2/camera_hw.h b/v3/fake-pipeline2/camera_hw.h
new file mode 100755
index 0000000..fe8dee8
--- a/dev/null
+++ b/v3/fake-pipeline2/camera_hw.h
@@ -0,0 +1,67 @@
+//#ifndef __CAMERA_HW__
+//#define __CAMERA_HW__
+#include <linux/videodev2.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include <getopt.h> /* getopt_long() */
+
+#include <fcntl.h> /* low-level i/o */
+#include <unistd.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <stdbool.h>
+
+#include <linux/videodev2.h>
+#include <DebugUtils.h>
+
+#define NB_BUFFER 6
+#define NB_PIC_BUFFER 2
+#define CLEAR(x) memset(&(x), 0, sizeof(x))
+
+#define V4L2_ROTATE_ID 0x980922 //V4L2_CID_ROTATE
+typedef struct FrameV4L2Info {
+ struct v4l2_format format;
+ struct v4l2_buffer buf;
+ struct v4l2_requestbuffers rb;
+}FrameV4L2Info;
+
+struct VideoInfo {
+ struct v4l2_capability cap;
+ FrameV4L2Info preview;
+ FrameV4L2Info picture;
+ void *mem[NB_BUFFER];
+ void *mem_pic[NB_PIC_BUFFER];
+ unsigned int canvas[NB_BUFFER];
+ bool isStreaming;
+ bool isPicture;
+ bool canvas_mode;
+ int width;
+ int height;
+ int formatIn;
+ int framesizeIn;
+ uint32_t idVendor;
+ uint32_t idProduct;
+
+ int idx;
+ int fd;
+};
+
+extern int camera_open(struct VideoInfo *cam_dev);
+extern void camera_close(struct VideoInfo *vinfo);
+extern int setBuffersFormat(struct VideoInfo *cam_dev);
+extern int start_capturing(struct VideoInfo *vinfo);
+extern int start_picture(struct VideoInfo *vinfo,int rotate);
+extern void stop_picture(struct VideoInfo *vinfo);
+extern int stop_capturing(struct VideoInfo *vinfo);
+extern void *get_frame(struct VideoInfo *vinfo);
+extern void *get_picture(struct VideoInfo *vinfo);
+extern int putback_frame(struct VideoInfo *vinfo);
+//#endif
diff --git a/v3/fake-pipeline2/tests/Android.mk b/v3/fake-pipeline2/tests/Android.mk
new file mode 100755
index 0000000..e148bae
--- a/dev/null
+++ b/v3/fake-pipeline2/tests/Android.mk
@@ -0,0 +1,39 @@
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+LOCAL_PATH := $(call my-dir)
+#################################################################
+ifeq (true, false)
+include $(CLEAR_VARS)
+
+LOCAL_CFLAGS += -Wno-unused-parameter
+LOCAL_CFLAGS +=-g
+LOCAL_CPPFLAGS := -g
+LOCAL_SHARED_LIBRARIES:= \
+ libcutils \
+ liblog \
+ camera.amlogic \
+
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/ \
+ $(LOCAL_PATH)/../ \
+ $(LOCAL_PATH)/../../inc
+
+LOCAL_SRC_FILES := test_camera.cpp\
+
+LOCAL_MODULE := test_camera
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_EXECUTABLE)
+endif
diff --git a/v3/fake-pipeline2/tests/test_camera.cpp b/v3/fake-pipeline2/tests/test_camera.cpp
new file mode 100755
index 0000000..cdd4233
--- a/dev/null
+++ b/v3/fake-pipeline2/tests/test_camera.cpp
@@ -0,0 +1,137 @@
+#include <camera_hw.h>
+
+static const char short_options[] = "d:hmruofc:";
+
+static const struct option
+long_options[] = {
+ { "device", required_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { "mmap", no_argument, NULL, 'm' },
+ { "read", no_argument, NULL, 'r' },
+ { "userp", no_argument, NULL, 'u' },
+ { "output", no_argument, NULL, 'o' },
+ { "format", no_argument, NULL, 'f' },
+ { "count", required_argument, NULL, 'c' },
+ { 0, 0, 0, 0 }
+};
+
+int main(int argc, char **argv)
+{
+ int frame_count = 0;
+ char *dev_name = "/dev/video0";
+ struct VideoInfo *vinfo;
+ int ret = 0;
+ FILE* fp;
+
+ uint8_t *src = NULL;
+ uint8_t *dst = NULL;
+
+ fp = fopen("/sdcard/raw.data", "ab+");
+
+ vinfo = (struct VideoInfo *) calloc(1, sizeof(*vinfo));
+
+ if (NULL == vinfo){
+ CAMHAL_LOGDA("calloc failed\n");
+ return -1;
+ }
+
+ for (;;) {
+ int idx;
+ int c;
+
+ c = getopt_long(argc, argv,
+ short_options, long_options, &idx);
+
+ if (-1 == c)
+ break;
+
+ switch (c) {
+ case 0: /* getopt_long() flag */
+ break;
+
+ case 'd':
+ dev_name = optarg;
+ break;
+
+ case 'h':
+
+ case 'm':
+ break;
+
+ case 'r':
+ break;
+
+ case 'u':
+ break;
+
+ case 'o':
+ break;
+
+ case 'f':
+ break;
+
+ case 'c':
+ errno = 0;
+ frame_count = strtol(optarg, NULL, 0);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ vinfo->idx = 0;
+ vinfo->preview.format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->preview.format.fmt.pix.width = 640;
+ vinfo->preview.format.fmt.pix.height = 480;
+ vinfo->preview.format.fmt.pix.pixelformat = V4L2_PIX_FMT_NV21;
+
+ vinfo->picture.format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vinfo->picture.format.fmt.pix.width = 480;
+ vinfo->picture.format.fmt.pix.height = 640;
+ vinfo->picture.format.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;//V4L2_PIX_FMT_NV21;
+ ret = camera_open(vinfo);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = setBuffersFormat(vinfo);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = start_capturing(vinfo);
+ if (ret < 0) {
+ return -1;
+ }
+
+ while (frame_count > 0) {
+
+ src = (uint8_t *)get_frame(vinfo);
+ if (!src) {
+ usleep(30000);
+ continue;
+ }
+
+ fwrite( src, vinfo->preview.buf.length, 1, fp);
+
+ putback_frame(vinfo);
+ frame_count --;
+ }
+
+ stop_capturing(vinfo);
+ camera_close(vinfo);
+
+ if (vinfo){
+ free(vinfo);
+ vinfo = NULL;
+ }
+
+ if (fp){
+ fclose(fp);
+ fp = NULL;
+ }
+
+
+ return 0;
+}
diff --git a/v3/fake-pipeline2/v4l2-base.c b/v3/fake-pipeline2/v4l2-base.c
new file mode 100644
index 0000000..2746506
--- a/dev/null
+++ b/v3/fake-pipeline2/v4l2-base.c
@@ -0,0 +1,657 @@
+/*
+ * V4L2 video capture example
+ *
+ * This program can be used and distributed without restrictions.
+ *
+ * This program is provided with the V4L2 API
+ * see http://linuxtv.org/docs.php for more information
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include <getopt.h> /* getopt_long() */
+
+#include <fcntl.h> /* low-level i/o */
+#include <unistd.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+
+#include <linux/videodev2.h>
+
+#define CLEAR(x) memset(&(x), 0, sizeof(x))
+
+enum io_method {
+ IO_METHOD_READ,
+ IO_METHOD_MMAP,
+ IO_METHOD_USERPTR,
+};
+
+struct buffer {
+ void *start;
+ size_t length;
+};
+
+static char *dev_name;
+static enum io_method io = IO_METHOD_MMAP;
+static int fd = -1;
+struct buffer *buffers;
+static unsigned int n_buffers;
+static int out_buf;
+static int force_format;
+static int frame_count = 70;
+
+static void errno_exit(const char *s)
+{
+ fprintf(stderr, "%s error %d, %s\n", s, errno, strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+static int xioctl(int fh, int request, void *arg)
+{
+ int r;
+
+ do {
+ r = ioctl(fh, request, arg);
+ } while (-1 == r && EINTR == errno);
+
+ return r;
+}
+
+static void process_image(const void *p, int size)
+{
+ if (out_buf)
+ fwrite(p, size, 1, stdout);
+
+ fflush(stderr);
+ fprintf(stderr, ".");
+ fflush(stdout);
+}
+
+static int read_frame(void)
+{
+ struct v4l2_buffer buf;
+ unsigned int i;
+
+ switch (io) {
+ case IO_METHOD_READ:
+ if (-1 == read(fd, buffers[0].start, buffers[0].length)) {
+ switch (errno) {
+ case EAGAIN:
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ errno_exit("read");
+ }
+ }
+
+ process_image(buffers[0].start, buffers[0].length);
+ break;
+
+ case IO_METHOD_MMAP:
+ CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
+ switch (errno) {
+ case EAGAIN:
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ errno_exit("VIDIOC_DQBUF");
+ }
+ }
+
+ assert(buf.index < n_buffers);
+
+ process_image(buffers[buf.index].start, buf.bytesused);
+
+ if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
+ errno_exit("VIDIOC_QBUF");
+ break;
+
+ case IO_METHOD_USERPTR:
+ CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+
+ if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
+ switch (errno) {
+ case EAGAIN:
+ return 0;
+
+ case EIO:
+ /* Could ignore EIO, see spec. */
+
+ /* fall through */
+
+ default:
+ errno_exit("VIDIOC_DQBUF");
+ }
+ }
+
+ for (i = 0; i < n_buffers; ++i)
+ if (buf.m.userptr == (unsigned long)buffers[i].start
+ && buf.length == buffers[i].length)
+ break;
+
+ assert(i < n_buffers);
+
+ process_image((void *)buf.m.userptr, buf.bytesused);
+
+ if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
+ errno_exit("VIDIOC_QBUF");
+ break;
+ }
+
+ return 1;
+}
+
+static void mainloop(void)
+{
+ unsigned int count;
+
+ count = frame_count;
+
+ while (count-- > 0) {
+ for (;;) {
+ fd_set fds;
+ struct timeval tv;
+ int r;
+
+ FD_ZERO(&fds);
+ FD_SET(fd, &fds);
+
+ /* Timeout. */
+ tv.tv_sec = 2;
+ tv.tv_usec = 0;
+
+ r = select(fd + 1, &fds, NULL, NULL, &tv);
+
+ if (-1 == r) {
+ if (EINTR == errno)
+ continue;
+ errno_exit("select");
+ }
+
+ if (0 == r) {
+ fprintf(stderr, "select timeout\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (read_frame())
+ break;
+ /* EAGAIN - continue select loop. */
+ }
+ }
+}
+
+static void stop_capturing(void)
+{
+ enum v4l2_buf_type type;
+
+ switch (io) {
+ case IO_METHOD_READ:
+ /* Nothing to do. */
+ break;
+
+ case IO_METHOD_MMAP:
+ case IO_METHOD_USERPTR:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &type))
+ errno_exit("VIDIOC_STREAMOFF");
+ break;
+ }
+}
+
+static void start_capturing(void)
+{
+ unsigned int i;
+ enum v4l2_buf_type type;
+
+ switch (io) {
+ case IO_METHOD_READ:
+ /* Nothing to do. */
+ break;
+
+ case IO_METHOD_MMAP:
+ for (i = 0; i < n_buffers; ++i) {
+ struct v4l2_buffer buf;
+
+ CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
+ errno_exit("VIDIOC_QBUF");
+ }
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
+ errno_exit("VIDIOC_STREAMON");
+ break;
+
+ case IO_METHOD_USERPTR:
+ for (i = 0; i < n_buffers; ++i) {
+ struct v4l2_buffer buf;
+
+ CLEAR(buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_USERPTR;
+ buf.index = i;
+ buf.m.userptr = (unsigned long)buffers[i].start;
+ buf.length = buffers[i].length;
+
+ if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
+ errno_exit("VIDIOC_QBUF");
+ }
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
+ errno_exit("VIDIOC_STREAMON");
+ break;
+ }
+}
+
+static void uninit_device(void)
+{
+ unsigned int i;
+
+ switch (io) {
+ case IO_METHOD_READ:
+ free(buffers[0].start);
+ break;
+
+ case IO_METHOD_MMAP:
+ for (i = 0; i < n_buffers; ++i)
+ if (-1 == munmap(buffers[i].start, buffers[i].length))
+ errno_exit("munmap");
+ break;
+
+ case IO_METHOD_USERPTR:
+ for (i = 0; i < n_buffers; ++i)
+ free(buffers[i].start);
+ break;
+ }
+
+ free(buffers);
+}
+
+static void init_read(unsigned int buffer_size)
+{
+ buffers = calloc(1, sizeof(*buffers));
+
+ if (!buffers) {
+ fprintf(stderr, "Out of memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ buffers[0].length = buffer_size;
+ buffers[0].start = malloc(buffer_size);
+
+ if (!buffers[0].start) {
+ fprintf(stderr, "Out of memory\n");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void init_mmap(void)
+{
+ struct v4l2_requestbuffers req;
+
+ CLEAR(req);
+
+ req.count = 4;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_MMAP;
+
+ if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
+ if (EINVAL == errno) {
+ fprintf(stderr, "%s does not support "
+ "memory mapping\n", dev_name);
+ exit(EXIT_FAILURE);
+ } else {
+ errno_exit("VIDIOC_REQBUFS");
+ }
+ }
+
+ if (req.count < 2) {
+ fprintf(stderr, "Insufficient buffer memory on %s\n",
+ dev_name);
+ exit(EXIT_FAILURE);
+ }
+
+ buffers = calloc(req.count, sizeof(*buffers));
+
+ if (!buffers) {
+ fprintf(stderr, "Out of memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
+ struct v4l2_buffer buf;
+
+ CLEAR(buf);
+
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = n_buffers;
+
+ if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
+ errno_exit("VIDIOC_QUERYBUF");
+
+ buffers[n_buffers].length = buf.length;
+ buffers[n_buffers].start =
+ mmap(NULL /* start anywhere */,
+ buf.length,
+ PROT_READ | PROT_WRITE /* required */,
+ MAP_SHARED /* recommended */,
+ fd, buf.m.offset);
+
+ if (MAP_FAILED == buffers[n_buffers].start)
+ errno_exit("mmap");
+ }
+}
+
+static void init_userp(unsigned int buffer_size)
+{
+ struct v4l2_requestbuffers req;
+
+ CLEAR(req);
+
+ req.count = 4;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_USERPTR;
+
+ if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
+ if (EINVAL == errno) {
+ fprintf(stderr, "%s does not support "
+ "user pointer i/o\n", dev_name);
+ exit(EXIT_FAILURE);
+ } else {
+ errno_exit("VIDIOC_REQBUFS");
+ }
+ }
+
+ buffers = calloc(4, sizeof(*buffers));
+
+ if (!buffers) {
+ fprintf(stderr, "Out of memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
+ buffers[n_buffers].length = buffer_size;
+ buffers[n_buffers].start = malloc(buffer_size);
+
+ if (!buffers[n_buffers].start) {
+ fprintf(stderr, "Out of memory\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+static void init_device(void)
+{
+ struct v4l2_capability cap;
+ struct v4l2_cropcap cropcap;
+ struct v4l2_crop crop;
+ struct v4l2_format fmt;
+ unsigned int min;
+
+ if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap)) {
+ if (EINVAL == errno) {
+ fprintf(stderr, "%s is no V4L2 device\n",
+ dev_name);
+ exit(EXIT_FAILURE);
+ } else {
+ errno_exit("VIDIOC_QUERYCAP");
+ }
+ }
+
+ if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
+ fprintf(stderr, "%s is no video capture device\n",
+ dev_name);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (io) {
+ case IO_METHOD_READ:
+ if (!(cap.capabilities & V4L2_CAP_READWRITE)) {
+ fprintf(stderr, "%s does not support read i/o\n",
+ dev_name);
+ exit(EXIT_FAILURE);
+ }
+ break;
+
+ case IO_METHOD_MMAP:
+ case IO_METHOD_USERPTR:
+ if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
+ fprintf(stderr, "%s does not support streaming i/o\n",
+ dev_name);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ }
+
+
+ /* Select video input, video standard and tune here. */
+
+
+ CLEAR(cropcap);
+
+ cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap)) {
+ crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ crop.c = cropcap.defrect; /* reset to default */
+
+ if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop)) {
+ switch (errno) {
+ case EINVAL:
+ /* Cropping not supported. */
+ break;
+ default:
+ /* Errors ignored. */
+ break;
+ }
+ }
+ } else {
+ /* Errors ignored. */
+ }
+
+
+ CLEAR(fmt);
+
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (force_format) {
+ fmt.fmt.pix.width = 640;
+ fmt.fmt.pix.height = 480;
+ fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
+ errno_exit("VIDIOC_S_FMT");
+
+ /* Note VIDIOC_S_FMT may change width and height. */
+ } else {
+ /* Preserve original settings as set by v4l2-ctl for example */
+ if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))
+ errno_exit("VIDIOC_G_FMT");
+ }
+
+ /* Buggy driver paranoia. */
+ min = fmt.fmt.pix.width * 2;
+ if (fmt.fmt.pix.bytesperline < min)
+ fmt.fmt.pix.bytesperline = min;
+ min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
+ if (fmt.fmt.pix.sizeimage < min)
+ fmt.fmt.pix.sizeimage = min;
+
+ switch (io) {
+ case IO_METHOD_READ:
+ init_read(fmt.fmt.pix.sizeimage);
+ break;
+
+ case IO_METHOD_MMAP:
+ init_mmap();
+ break;
+
+ case IO_METHOD_USERPTR:
+ init_userp(fmt.fmt.pix.sizeimage);
+ break;
+ }
+}
+
+static void close_device(void)
+{
+ if (-1 == close(fd))
+ errno_exit("close");
+
+ fd = -1;
+}
+
+static void open_device(void)
+{
+ struct stat st;
+
+ if (-1 == stat(dev_name, &st)) {
+ fprintf(stderr, "Cannot identify '%s': %d, %s\n",
+ dev_name, errno, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (!S_ISCHR(st.st_mode)) {
+ fprintf(stderr, "%s is no device\n", dev_name);
+ exit(EXIT_FAILURE);
+ }
+
+ fd = open(dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
+
+ if (-1 == fd) {
+ fprintf(stderr, "Cannot open '%s': %d, %s\n",
+ dev_name, errno, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void usage(FILE *fp, int argc, char **argv)
+{
+ fprintf(fp,
+ "Usage: %s [options]\n\n"
+ "Version 1.3\n"
+ "Options:\n"
+ "-d | --device name Video device name [%s]\n"
+ "-h | --help Print this message\n"
+ "-m | --mmap Use memory mapped buffers [default]\n"
+ "-r | --read Use read() calls\n"
+ "-u | --userp Use application allocated buffers\n"
+ "-o | --output Outputs stream to stdout\n"
+ "-f | --format Force format to 640x480 YUYV\n"
+ "-c | --count Number of frames to grab [%i]\n"
+ "",
+ argv[0], dev_name, frame_count);
+}
+
+static const char short_options[] = "d:hmruofc:";
+
+static const struct option
+long_options[] = {
+ { "device", required_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { "mmap", no_argument, NULL, 'm' },
+ { "read", no_argument, NULL, 'r' },
+ { "userp", no_argument, NULL, 'u' },
+ { "output", no_argument, NULL, 'o' },
+ { "format", no_argument, NULL, 'f' },
+ { "count", required_argument, NULL, 'c' },
+ { 0, 0, 0, 0 }
+};
+
+int main(int argc, char **argv)
+{
+ dev_name = "/dev/video0";
+
+ for (;;) {
+ int idx;
+ int c;
+
+ c = getopt_long(argc, argv,
+ short_options, long_options, &idx);
+
+ if (-1 == c)
+ break;
+
+ switch (c) {
+ case 0: /* getopt_long() flag */
+ break;
+
+ case 'd':
+ dev_name = optarg;
+ break;
+
+ case 'h':
+ usage(stdout, argc, argv);
+ exit(EXIT_SUCCESS);
+
+ case 'm':
+ io = IO_METHOD_MMAP;
+ break;
+
+ case 'r':
+ io = IO_METHOD_READ;
+ break;
+
+ case 'u':
+ io = IO_METHOD_USERPTR;
+ break;
+
+ case 'o':
+ out_buf++;
+ break;
+
+ case 'f':
+ force_format++;
+ break;
+
+ case 'c':
+ errno = 0;
+ frame_count = strtol(optarg, NULL, 0);
+ if (errno)
+ errno_exit(optarg);
+ break;
+
+ default:
+ usage(stderr, argc, argv);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ open_device();
+ init_device();
+ start_capturing();
+ mainloop();
+ stop_capturing();
+ uninit_device();
+ close_device();
+ fprintf(stderr, "\n");
+ return 0;
+} \ No newline at end of file
diff --git a/v3/inc/CameraProperties.h b/v3/inc/CameraProperties.h
new file mode 100755
index 0000000..62bb4c0
--- a/dev/null
+++ b/v3/inc/CameraProperties.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+#ifndef CAMERA_PROPERTIES_H
+#define CAMERA_PROPERTIES_H
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <stdio.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <cutils/properties.h>
+
+namespace android {
+
+#define EXIF_MAKE_DEFAULT "default_make"
+#define EXIF_MODEL_DEFAULT "default_model"
+
+// Class that handles the Camera Properties
+class CameraProperties
+{
+public:
+ static const char PIXEL_FORMAT_RGB24[];
+ static const char RELOAD_WHEN_OPEN[];
+ static const char DEVICE_NAME[];
+
+ static const char DEFAULT_VALUE[];
+ static const char PARAMS_DELIMITER[];
+ CameraProperties();
+ ~CameraProperties();
+
+ // container class passed around for accessing properties
+ class Properties
+ {
+ public:
+ Properties()
+ {
+ mProperties = new DefaultKeyedVector<String8, String8>(String8(DEFAULT_VALUE));
+ char property[PROPERTY_VALUE_MAX];
+ property_get("ro.product.manufacturer", property, EXIF_MAKE_DEFAULT);
+ property[0] = toupper(property[0]);
+ set(EXIF_MAKE, property);
+ property_get("ro.product.model", property, EXIF_MODEL_DEFAULT);
+ property[0] = toupper(property[0]);
+ set(EXIF_MODEL, property);
+ }
+ ~Properties()
+ {
+ delete mProperties;
+ }
+ ssize_t set(const char *prop, const char *value);
+ ssize_t set(const char *prop, int value);
+ const char* get(const char * prop);
+ void dump();
+
+ protected:
+ const char* keyAt(unsigned int);
+ const char* valueAt(unsigned int);
+
+ private:
+ DefaultKeyedVector<String8, String8>* mProperties;
+
+ };
+
+ ///Initializes the CameraProperties class
+ status_t initialize(int cameraid);
+ status_t loadProperties();
+ int camerasSupported();
+ int getProperties(int cameraIndex, Properties** properties);
+
+private:
+
+ uint32_t mCamerasSupported;
+ int mInitialized;
+ mutable Mutex mLock;
+
+ Properties mCameraProps[MAX_CAM_NUM_ADD_VCAM];
+
+};
+
+};
+
+#endif //CAMERA_PROPERTIES_H
diff --git a/v3/inc/DebugUtils.h b/v3/inc/DebugUtils.h
new file mode 100755
index 0000000..2686db2
--- a/dev/null
+++ b/v3/inc/DebugUtils.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+use "dumpsys media.camera -t x" to change log level to x or
+use "adb shell dumpsys media.camera -t x" to change log level to x
+*/
+
+#ifndef DEBUG_UTILS_H
+#define DEBUG_UTILS_H
+#include <stdint.h>
+#include <utils/Log.h>
+
+//Uncomment to enable more verbose/debug logs
+#define DEBUG_LOG
+extern volatile int32_t gCamHal_LogLevel;
+
+///Camera HAL Logging Functions
+#ifndef DEBUG_LOG
+
+#define CAMHAL_LOGDA(str)
+#define CAMHAL_LOGDB(str, ...)
+#define CAMHAL_LOGVA(str)
+#define CAMHAL_LOGVB(str, ...)
+
+#define CAMHAL_LOGIA ALOGD
+#define CAMHAL_LOGIB ALOGD
+#define CAMHAL_LOGWA ALOGE
+#define CAMHAL_LOGWB ALOGE
+#define CAMHAL_LOGEA ALOGE
+#define CAMHAL_LOGEB ALOGE
+#define CAMHAL_LOGFA ALOGE
+#define CAMHAL_LOGFB ALOGE
+
+#undef LOG_FUNCTION_NAME
+#undef LOG_FUNCTION_NAME_EXIT
+#define LOG_FUNCTION_NAME
+#define LOG_FUNCTION_NAME_EXIT
+
+#else
+
+#ifndef CAMHAL_BUILD_NAME
+#define CAMHAL_BUILD_NAME "===|||camera debug|||==="
+#endif
+///Defines for debug statements - Macro LOG_TAG needs to be defined in the respective files
+#define CAMHAL_LOGVA(str) ALOGV_IF(gCamHal_LogLevel >=6,"%5d %s - " str, __LINE__,__FUNCTION__);
+#define CAMHAL_LOGVB(str,...) ALOGV_IF(gCamHal_LogLevel >=6,"%5d %s - " str, __LINE__, __FUNCTION__, __VA_ARGS__);
+#define CAMHAL_LOGDA(str) ALOGD_IF(gCamHal_LogLevel >=5,"%5d %s - " str, __LINE__,__FUNCTION__);
+#define CAMHAL_LOGDB(str, ...) ALOGD_IF(gCamHal_LogLevel >=5,"%5d %s - " str, __LINE__, __FUNCTION__, __VA_ARGS__);
+#define CAMHAL_LOGIA(str) ALOGI_IF(gCamHal_LogLevel >=4,"%5d %s - " str, __LINE__, __FUNCTION__);
+#define CAMHAL_LOGIB(str, ...) ALOGI_IF(gCamHal_LogLevel >=4,"%5d %s - " str, __LINE__,__FUNCTION__, __VA_ARGS__);
+#define CAMHAL_LOGWA(str) ALOGW_IF(gCamHal_LogLevel >=3,"%5d %s - " str, __LINE__, __FUNCTION__);
+#define CAMHAL_LOGWB(str, ...) ALOGW_IF(gCamHal_LogLevel >=3,"%5d %s - " str, __LINE__,__FUNCTION__, __VA_ARGS__);
+#define CAMHAL_LOGEA(str) ALOGE_IF(gCamHal_LogLevel >=2,"%5d %s - " str, __LINE__, __FUNCTION__);
+#define CAMHAL_LOGEB(str, ...) ALOGE_IF(gCamHal_LogLevel >=2,"%5d %s - " str, __LINE__,__FUNCTION__, __VA_ARGS__);
+#define CAMHAL_LOGFA(str) ALOGF_IF(gCamHal_LogLevel >=1,"%5d %s - " str, __LINE__, __FUNCTION__);
+#define CAMHAL_LOGFB(str, ...) ALOGF_IF(gCamHal_LogLevel >=1,"%5d %s - " str, __LINE__,__FUNCTION__, __VA_ARGS__);
+
+#define LOG_FUNCTION_NAME CAMHAL_LOGVA("ENTER");
+#define LOG_FUNCTION_NAME_EXIT CAMHAL_LOGVA("EXIT");
+#define DBG_LOGA(str) ALOGI_IF(gCamHal_LogLevel >=4,"%10s-%5d %s - " str, CAMHAL_BUILD_NAME, __LINE__,__FUNCTION__)
+#define DBG_LOGB(str, ...) ALOGI_IF(gCamHal_LogLevel >=4,"%10s-%5d %s - " str, CAMHAL_BUILD_NAME, __LINE__,__FUNCTION__, __VA_ARGS__);
+
+#endif
+
+#endif //DEBUG_UTILS_H
diff --git a/v3/inc/MCameraParameters.h b/v3/inc/MCameraParameters.h
new file mode 100644
index 0000000..5240070
--- a/dev/null
+++ b/v3/inc/MCameraParameters.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+#define ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <camera/CameraParameters.h>
+
+namespace android {
+
+class MCameraParameters: public CameraParameters
+{
+public:
+ MCameraParameters();
+ MCameraParameters(const String8 &params) { unflatten(params); }
+ ~MCameraParameters();
+
+protected:
+
+private:
+ int mFd;
+};
+
+}; // namespace android
+
+#endif
diff --git a/v3/media_codecs.xml b/v3/media_codecs.xml
new file mode 100644
index 0000000..87d11f2
--- a/dev/null
+++ b/v3/media_codecs.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2012 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+<!DOCTYPE MediaCodecs [
+<!ELEMENT Include EMPTY>
+<!ATTLIST Include href CDATA #REQUIRED>
+<!ELEMENT MediaCodecs (Decoders|Encoders|Include)*>
+<!ELEMENT Decoders (MediaCodec|Include)*>
+<!ELEMENT Encoders (MediaCodec|Include)*>
+<!ELEMENT MediaCodec (Type|Quirk|Include)*>
+<!ATTLIST MediaCodec name CDATA #REQUIRED>
+<!ATTLIST MediaCodec type CDATA>
+<!ELEMENT Type EMPTY>
+<!ATTLIST Type name CDATA #REQUIRED>
+<!ELEMENT Quirk EMPTY>
+<!ATTLIST Quirk name CDATA #REQUIRED>
+]>
+
+There's a simple and a complex syntax to declare the availability of a
+media codec:
+
+A codec that properly follows the OpenMax spec and therefore doesn't have any
+quirks and that only supports a single content type can be declared like so:
+
+ <MediaCodec name="OMX.foo.bar" type="something/interesting" />
+
+If a codec has quirks OR supports multiple content types, the following syntax
+can be used:
+
+ <MediaCodec name="OMX.foo.bar" >
+ <Type name="something/interesting" />
+ <Type name="something/else" />
+ ...
+ <Quirk name="requires-allocate-on-input-ports" />
+ <Quirk name="requires-allocate-on-output-ports" />
+ <Quirk name="output-buffers-are-unreadable" />
+ </MediaCodec>
+
+Only the three quirks included above are recognized at this point:
+
+"requires-allocate-on-input-ports"
+ must be advertised if the component does not properly support specification
+ of input buffers using the OMX_UseBuffer(...) API but instead requires
+ OMX_AllocateBuffer to be used.
+
+"requires-allocate-on-output-ports"
+ must be advertised if the component does not properly support specification
+ of output buffers using the OMX_UseBuffer(...) API but instead requires
+ OMX_AllocateBuffer to be used.
+
+"output-buffers-are-unreadable"
+ must be advertised if the emitted output buffers of a decoder component
+ are not readable, i.e. use a custom format even though abusing one of
+ the official OMX colorspace constants.
+ Clients of such decoders will not be able to access the decoded data,
+ naturally making the component much less useful. The only use for
+ a component with this quirk is to render the output to the screen.
+ Audio decoders MUST NOT advertise this quirk.
+ Video decoders that advertise this quirk must be accompanied by a
+ corresponding color space converter for thumbnail extraction,
+ matching surfaceflinger support that can render the custom format to
+ a texture and possibly other code, so just DON'T USE THIS QUIRK.
+
+-->
+
+<MediaCodecs>
+ <Include href="media_codecs_google_audio.xml" />
+ <Include href="media_codecs_google_telephony.xml" />
+ <Include href="media_codecs_google_video.xml" />
+</MediaCodecs>
diff --git a/v3/media_profiles.xml b/v3/media_profiles.xml
new file mode 100644
index 0000000..42ceb8d
--- a/dev/null
+++ b/v3/media_profiles.xml
@@ -0,0 +1,414 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2010 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!DOCTYPE MediaSettings [
+<!ELEMENT MediaSettings (CamcorderProfiles,
+ EncoderOutputFileFormat+,
+ VideoEncoderCap+,
+ AudioEncoderCap+,
+ VideoDecoderCap,
+ AudioDecoderCap)>
+<!ELEMENT CamcorderProfiles (EncoderProfile+, ImageEncoding+, ImageDecoding, Camera)>
+<!ELEMENT EncoderProfile (Video, Audio)>
+<!ATTLIST EncoderProfile quality (high|low) #REQUIRED>
+<!ATTLIST EncoderProfile fileFormat (mp4|3gp) #REQUIRED>
+<!ATTLIST EncoderProfile duration (30|60) #REQUIRED>
+<!ATTLIST EncoderProfile cameraId (0|1) #REQUIRED>
+<!ELEMENT Video EMPTY>
+<!ATTLIST Video codec (h264|h263|m4v) #REQUIRED>
+<!ATTLIST Video bitRate CDATA #REQUIRED>
+<!ATTLIST Video width CDATA #REQUIRED>
+<!ATTLIST Video height CDATA #REQUIRED>
+<!ATTLIST Video frameRate CDATA #REQUIRED>
+<!ELEMENT Audio EMPTY>
+<!ATTLIST Audio codec (amrnb|amrwb|aac) #REQUIRED>
+<!ATTLIST Audio bitRate CDATA #REQUIRED>
+<!ATTLIST Audio sampleRate CDATA #REQUIRED>
+<!ATTLIST Audio channels (1|2) #REQUIRED>
+<!ELEMENT ImageEncoding EMPTY>
+<!ATTLIST ImageEncoding quality (90|80|70|60|50|40) #REQUIRED>
+<!ELEMENT ImageDecoding EMPTY>
+<!ATTLIST ImageDecoding memCap CDATA #REQUIRED>
+<!ELEMENT Camera EMPTY>
+<!ELEMENT EncoderOutputFileFormat EMPTY>
+<!ATTLIST EncoderOutputFileFormat name (mp4|3gp) #REQUIRED>
+<!ELEMENT VideoEncoderCap EMPTY>
+<!ATTLIST VideoEncoderCap name (h264|h263|m4v|wmv) #REQUIRED>
+<!ATTLIST VideoEncoderCap enabled (true|false) #REQUIRED>
+<!ATTLIST VideoEncoderCap minBitRate CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap maxBitRate CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap minFrameWidth CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap maxFrameWidth CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap minFrameHeight CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap maxFrameHeight CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap minFrameRate CDATA #REQUIRED>
+<!ATTLIST VideoEncoderCap maxFrameRate CDATA #REQUIRED>
+<!ELEMENT AudioEncoderCap EMPTY>
+<!ATTLIST AudioEncoderCap name (amrnb|amrwb|aac|wma) #REQUIRED>
+<!ATTLIST AudioEncoderCap enabled (true|false) #REQUIRED>
+<!ATTLIST AudioEncoderCap minBitRate CDATA #REQUIRED>
+<!ATTLIST AudioEncoderCap maxBitRate CDATA #REQUIRED>
+<!ATTLIST AudioEncoderCap minSampleRate CDATA #REQUIRED>
+<!ATTLIST AudioEncoderCap maxSampleRate CDATA #REQUIRED>
+<!ATTLIST AudioEncoderCap minChannels (1|2) #REQUIRED>
+<!ATTLIST AudioEncoderCap maxChannels (1|2) #REQUIRED>
+<!ELEMENT VideoDecoderCap EMPTY>
+<!ATTLIST VideoDecoderCap name (wmv) #REQUIRED>
+<!ATTLIST VideoDecoderCap enabled (true|false) #REQUIRED>
+<!ELEMENT AudioDecoderCap EMPTY>
+<!ATTLIST AudioDecoderCap name (wma) #REQUIRED>
+<!ATTLIST AudioDecoderCap enabled (true|false) #REQUIRED>
+<!ELEMENT VideoEditorCap EMPTY>
+<!ATTLIST VideoEditorCap maxInputFrameWidth CDATA #REQUIRED>
+<!ATTLIST VideoEditorCap maxInputFrameHeight CDATA #REQUIRED>
+<!ATTLIST VideoEditorCap maxOutputFrameWidth CDATA #REQUIRED>
+<!ATTLIST VideoEditorCap maxOutputFrameHeight CDATA #REQUIRED>
+<!ATTLIST VideoEditorCap maxPrefetchYUVFrames CDATA #REQUIRED>
+<!ELEMENT ExportVideoProfile EMPTY>
+<!ATTLIST ExportVideoProfile name (h264|h263|m4v) #REQUIRED>
+<!ATTLIST ExportVideoProfile profile CDATA #REQUIRED>
+<!ATTLIST ExportVideoProfile level CDATA #REQUIRED>
+]>
+<!--
+ This file is used to declare the multimedia profiles and capabilities
+ on an android-powered device.
+-->
+<MediaSettings>
+ <!-- Each camcorder profile defines a set of predefined configuration parameters -->
+ <CamcorderProfiles cameraId="0">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="1">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="2">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="3">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="4">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="5">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <CamcorderProfiles cameraId="6">
+
+ <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+ <Video codec="m4v"
+ bitRate="128000"
+ width="320"
+ height="240"
+ frameRate="15" />
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <EncoderProfile quality="timelapseqcif" fileFormat="mp4" duration="30">
+ <Video codec="h264"
+ bitRate="192000"
+ width="176"
+ height="144"
+ frameRate="30" />
+ <!-- audio setting is ignored -->
+ <Audio codec="amrnb"
+ bitRate="12200"
+ sampleRate="8000"
+ channels="1" />
+ </EncoderProfile>
+
+ <ImageEncoding quality="95" />
+ <ImageEncoding quality="80" />
+ <ImageEncoding quality="70" />
+ <ImageDecoding memCap="20000000" />
+
+ </CamcorderProfiles>
+
+ <EncoderOutputFileFormat name="3gp" />
+ <EncoderOutputFileFormat name="mp4" />
+
+ <!--
+ If a codec is not enabled, it is invisible to the applications
+ In other words, the applications won't be able to use the codec
+ or query the capabilities of the codec at all if it is disabled
+ -->
+ <VideoEncoderCap name="h264" enabled="true"
+ minBitRate="64000" maxBitRate="192000"
+ minFrameWidth="176" maxFrameWidth="320"
+ minFrameHeight="144" maxFrameHeight="240"
+ minFrameRate="15" maxFrameRate="30" />
+
+ <VideoEncoderCap name="h263" enabled="true"
+ minBitRate="64000" maxBitRate="192000"
+ minFrameWidth="176" maxFrameWidth="320"
+ minFrameHeight="144" maxFrameHeight="240"
+ minFrameRate="15" maxFrameRate="30" />
+
+ <VideoEncoderCap name="m4v" enabled="true"
+ minBitRate="64000" maxBitRate="192000"
+ minFrameWidth="176" maxFrameWidth="320"
+ minFrameHeight="144" maxFrameHeight="240"
+ minFrameRate="15" maxFrameRate="30" />
+
+ <AudioEncoderCap name="aac" enabled="true"
+ minBitRate="8000" maxBitRate="96000"
+ minSampleRate="8000" maxSampleRate="48000"
+ minChannels="1" maxChannels="1" />
+
+ <AudioEncoderCap name="amrwb" enabled="true"
+ minBitRate="6600" maxBitRate="23050"
+ minSampleRate="16000" maxSampleRate="16000"
+ minChannels="1" maxChannels="1" />
+
+ <AudioEncoderCap name="amrnb" enabled="true"
+ minBitRate="5525" maxBitRate="12200"
+ minSampleRate="8000" maxSampleRate="8000"
+ minChannels="1" maxChannels="1" />
+
+ <!--
+ FIXME:
+ We do not check decoder capabilities at present
+ At present, we only check whether windows media is visible
+ for TEST applications. For other applications, we do
+ not perform any checks at all.
+ -->
+ <VideoDecoderCap name="wmv" enabled="false"/>
+ <AudioDecoderCap name="wma" enabled="false"/>
+
+ <!--
+ The VideoEditor Capability configuration:
+ - maxInputFrameWidth: maximum video width of imported video clip.
+ - maxInputFrameHeight: maximum video height of imported video clip.
+ - maxOutputFrameWidth: maximum video width of exported video clip.
+ - maxOutputFrameHeight: maximum video height of exported video clip.
+ - maxPrefetchYUVFrames: maximum prefetch YUV frames for encoder,
+ used to limit the amount of memory for prefetched YUV frames.
+ For this platform, it allows maximum ~1MB(~0.1MB per QVGA frame x 10
+ frames) memory.
+ -->
+
+ <VideoEditorCap maxInputFrameWidth="320"
+ maxInputFrameHeight="240" maxOutputFrameWidth="320"
+ maxOutputFrameHeight="240" maxPrefetchYUVFrames="10" />
+ <!--
+ The VideoEditor Export codec profile and level values
+ correspond to the values in OMX_Video.h.
+ E.g. for h264, profile value 1 means OMX_VIDEO_AVCProfileBaseline
+ and level 4096 means OMX_VIDEO_AVCLevel41.
+ Please note that the values are in decimal.
+ These values are for video encoder.
+ -->
+ <!--
+ Codec = h.264, Baseline profile, level 4.1
+ -->
+ <ExportVideoProfile name="h264" profile= "1" level="512"/>
+ <!--
+ Codec = h.263, Baseline profile, level 0
+ -->
+ <ExportVideoProfile name="h263" profile= "1" level="1"/>
+ <!--
+ Codec = mpeg4, Simple profile, level 3
+ -->
+ <ExportVideoProfile name="m4v" profile= "1" level="16"/>
+</MediaSettings>