diff options
author | Android Build Coastguard Worker <android-build-coastguard-worker@google.com> | 2023-06-15 10:16:55 +0000 |
---|---|---|
committer | Android Build Coastguard Worker <android-build-coastguard-worker@google.com> | 2023-06-15 10:16:55 +0000 |
commit | de55dabf2f4a98016e5e275b60ef9b6bb7595db7 (patch) | |
tree | 035178e8d6dfc8128676f5c6a07d0357ea8a4b70 | |
parent | bd28a53a2e095441ef6562648a426de9e3055059 (diff) | |
parent | 73fc75989d8aa23ea5990a98ef4a2a7188a44135 (diff) |
Snap for 10323517 from 73fc75989d8aa23ea5990a98ef4a2a7188a44135 to t-keystone-qcom-release
Change-Id: Id16baab01ef3d010f1f95bfd21d0aa7a31a81de5
96 files changed, 2208 insertions, 502 deletions
diff --git a/camera/Camera.cpp b/camera/Camera.cpp index d1618e409b..224468274b 100644 --- a/camera/Camera.cpp +++ b/camera/Camera.cpp @@ -71,10 +71,11 @@ Camera::~Camera() } sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName, - int clientUid, int clientPid, int targetSdkVersion, bool overrideToPortrait) + int clientUid, int clientPid, int targetSdkVersion, bool overrideToPortrait, + bool forceSlowJpegMode) { return CameraBaseT::connect(cameraId, clientPackageName, clientUid, - clientPid, targetSdkVersion, overrideToPortrait); + clientPid, targetSdkVersion, overrideToPortrait, forceSlowJpegMode); } status_t Camera::reconnect() diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp index 0a5bc12ec9..9ae4607d3f 100644 --- a/camera/CameraBase.cpp +++ b/camera/CameraBase.cpp @@ -163,7 +163,7 @@ template <typename TCam, typename TCamTraits> sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId, const String16& clientPackageName, int clientUid, int clientPid, int targetSdkVersion, - bool overrideToPortrait) + bool overrideToPortrait, bool forceSlowJpegMode) { ALOGV("%s: connect", __FUNCTION__); sp<TCam> c = new TCam(cameraId); @@ -173,9 +173,11 @@ sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId, binder::Status ret; if (cs != nullptr) { TCamConnectService fnConnectService = TCamTraits::fnConnectService; - ALOGI("Connect camera (legacy API) - overrideToPortrait %d", overrideToPortrait); + ALOGI("Connect camera (legacy API) - overrideToPortrait %d, forceSlowJpegMode %d", + overrideToPortrait, forceSlowJpegMode); ret = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid, - clientPid, targetSdkVersion, overrideToPortrait, /*out*/ &c->mCamera); + clientPid, targetSdkVersion, overrideToPortrait, forceSlowJpegMode, + /*out*/ &c->mCamera); } if (ret.isOk() && c->mCamera != nullptr) { IInterface::asBinder(c->mCamera)->linkToDeath(c); diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl index 01baba1663..9f325950c0 100644 --- a/camera/aidl/android/hardware/ICameraService.aidl +++ b/camera/aidl/android/hardware/ICameraService.aidl @@ -84,7 +84,8 @@ interface ICameraService String opPackageName, int clientUid, int clientPid, int targetSdkVersion, - boolean overrideToPortrait); + boolean overrideToPortrait, + boolean forceSlowJpegMode); /** * Open a camera device through the new camera API diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h index 26c36a7713..21b57afabf 100644 --- a/camera/include/camera/Camera.h +++ b/camera/include/camera/Camera.h @@ -58,7 +58,7 @@ struct CameraTraits<Camera> typedef ::android::hardware::ICameraClient TCamCallbacks; typedef ::android::binder::Status(::android::hardware::ICameraService::*TCamConnectService) (const sp<::android::hardware::ICameraClient>&, - int, const String16&, int, int, int, bool, + int, const String16&, int, int, int, bool, bool, /*out*/ sp<::android::hardware::ICamera>*); static TCamConnectService fnConnectService; @@ -82,7 +82,7 @@ public: static sp<Camera> connect(int cameraId, const String16& clientPackageName, int clientUid, int clientPid, int targetSdkVersion, - bool overrideToPortrait); + bool overrideToPortrait, bool forceSlowJpegMode); virtual ~Camera(); diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h index 9d0721bb5d..b20dc1bcd3 100644 --- a/camera/include/camera/CameraBase.h +++ b/camera/include/camera/CameraBase.h @@ -120,7 +120,7 @@ public: static sp<TCam> connect(int cameraId, const String16& clientPackageName, int clientUid, int clientPid, int targetSdkVersion, - bool overrideToPortrait); + bool overrideToPortrait, bool forceSlowJpegMode); virtual void disconnect(); void setListener(const sp<TCamListener>& listener); diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp index 23d90ccf4a..02047ae506 100644 --- a/camera/ndk/impl/ACameraManager.cpp +++ b/camera/ndk/impl/ACameraManager.cpp @@ -696,7 +696,7 @@ camera_status_t ACameraManager::getCameraCharacteristics( CameraMetadata rawMetadata; int targetSdkVersion = android_get_application_target_sdk_version(); binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr), - targetSdkVersion, /*overrideToPortrait*/true, &rawMetadata); + targetSdkVersion, /*overrideToPortrait*/false, &rawMetadata); if (!serviceRet.isOk()) { switch(serviceRet.serviceSpecificErrorCode()) { case hardware::ICameraService::ERROR_DISCONNECTED: @@ -748,7 +748,7 @@ ACameraManager::openCamera( binder::Status serviceRet = cs->connectDevice( callbacks, String16(cameraId), String16(""), {}, hardware::ICameraService::USE_CALLING_UID, /*oomScoreOffset*/0, - targetSdkVersion, /*overrideToPortrait*/true, /*out*/&deviceRemote); + targetSdkVersion, /*overrideToPortrait*/false, /*out*/&deviceRemote); if (!serviceRet.isOk()) { ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string()); diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h index 3246757af5..20317b96b5 100644 --- a/camera/ndk/include/camera/NdkCameraMetadataTags.h +++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h @@ -9215,24 +9215,25 @@ typedef enum acamera_metadata_enum_acamera_request_available_capabilities { * camera's crop region is set to maximum size, the FOV of the physical streams for the * ultrawide lens will be the same as the logical stream, by making the crop region * smaller than its active array size to compensate for the smaller focal length.</p> - * <p>There are two ways for the application to capture RAW images from a logical camera - * with RAW capability:</p> - * <ul> - * <li>Because the underlying physical cameras may have different RAW capabilities (such - * as resolution or CFA pattern), to maintain backward compatibility, when a RAW stream - * is configured, the camera device makes sure the default active physical camera remains - * active and does not switch to other physical cameras. (One exception is that, if the - * logical camera consists of identical image sensors and advertises multiple focalLength - * due to different lenses, the camera device may generate RAW images from different - * physical cameras based on the focalLength being set by the application.) This - * backward-compatible approach usually results in loss of optical zoom, to telephoto - * lens or to ultrawide lens.</li> - * <li>Alternatively, to take advantage of the full zoomRatio range of the logical camera, - * the application should use <a href="https://developer.android.com/reference/android/hardware/camera2/MultiResolutionImageReader.html">MultiResolutionImageReader</a> - * to capture RAW images from the currently active physical camera. Because different - * physical camera may have different RAW characteristics, the application needs to use - * the characteristics and result metadata of the active physical camera for the - * relevant RAW metadata.</li> + * <p>For a logical camera, typically the underlying physical cameras have different RAW + * capabilities (such as resolution or CFA pattern). There are two ways for the + * application to capture RAW images from the logical camera:</p> + * <ul> + * <li>If the logical camera has RAW capability, the application can create and use RAW + * streams in the same way as before. In case a RAW stream is configured, to maintain + * backward compatibility, the camera device makes sure the default active physical + * camera remains active and does not switch to other physical cameras. (One exception + * is that, if the logical camera consists of identical image sensors and advertises + * multiple focalLength due to different lenses, the camera device may generate RAW + * images from different physical cameras based on the focalLength being set by the + * application.) This backward-compatible approach usually results in loss of optical + * zoom, to telephoto lens or to ultrawide lens.</li> + * <li>Alternatively, if supported by the device, + * <a href="https://developer.android.com/reference/android/hardware/camera2/MultiResolutionImageReader.html">MultiResolutionImageReader</a> + * can be used to capture RAW images from one of the underlying physical cameras ( + * depending on current zoom level). Because different physical cameras may have + * different RAW characteristics, the application needs to use the characteristics + * and result metadata of the active physical camera for the relevant RAW metadata.</li> * </ul> * <p>The capture request and result metadata tags required for backward compatible camera * functionalities will be solely based on the logical camera capability. On the other diff --git a/camera/tests/CameraZSLTests.cpp b/camera/tests/CameraZSLTests.cpp index bdfb84a046..64237096e3 100644 --- a/camera/tests/CameraZSLTests.cpp +++ b/camera/tests/CameraZSLTests.cpp @@ -211,7 +211,7 @@ TEST_F(CameraZSLTests, TestAllPictureSizes) { String16("ZSLTest"), hardware::ICameraService::USE_CALLING_UID, hardware::ICameraService::USE_CALLING_PID, /*targetSdkVersion*/__ANDROID_API_FUTURE__, - /*overrideToPortrait*/false, &cameraDevice); + /*overrideToPortrait*/false, /*forceSlowJpegMode*/false, &cameraDevice); EXPECT_TRUE(rc.isOk()); CameraParameters params(cameraDevice->getParameters()); diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp index c38dbef7cb..ea994833af 100644 --- a/drm/libmediadrm/DrmHalHidl.cpp +++ b/drm/libmediadrm/DrmHalHidl.cpp @@ -513,10 +513,14 @@ status_t DrmHalHidl::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory>& factor if (mimeType == "") { // isCryptoSchemeSupported(uuid) *isSupported = true; - } else { - // isCryptoSchemeSupported(uuid, mimeType) - *isSupported = factory->isContentTypeSupported(mimeType.string()); + return OK; + } + // isCryptoSchemeSupported(uuid, mimeType) + auto hResult = factory->isContentTypeSupported(mimeType.string()); + if (!hResult.isOk()) { + return DEAD_OBJECT; } + *isSupported = hResult; return OK; } else if (mimeType == "") { return BAD_VALUE; @@ -526,8 +530,12 @@ status_t DrmHalHidl::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory>& factor if (factoryV1_2 == NULL) { return ERROR_UNSUPPORTED; } else { - *isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(), + auto hResult = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(), toHidlSecurityLevel(level)); + if (!hResult.isOk()) { + return DEAD_OBJECT; + } + *isSupported = hResult; return OK; } } @@ -537,7 +545,8 @@ status_t DrmHalHidl::isCryptoSchemeSupported(const uint8_t uuid[16], const Strin Mutex::Autolock autoLock(mLock); *isSupported = false; for (ssize_t i = mFactories.size() - 1; i >= 0; i--) { - if (mFactories[i]->isCryptoSchemeSupported(uuid)) { + auto hResult = mFactories[i]->isCryptoSchemeSupported(uuid); + if (hResult.isOk() && hResult) { return matchMimeTypeAndSecurityLevel(mFactories[i], uuid, mimeType, level, isSupported); } } diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h index d4025e5cbc..78ea2a1634 100644 --- a/include/private/media/VideoFrame.h +++ b/include/private/media/VideoFrame.h @@ -42,9 +42,15 @@ public: mWidth(width), mHeight(height), mDisplayWidth(displayWidth), mDisplayHeight(displayHeight), mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0), - mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width), - mSize(hasData ? (bpp * width * height) : 0), - mIccSize(iccSize), mBitDepth(bitDepth) { + mRotationAngle(angle), mBytesPerPixel(bpp), mIccSize(iccSize), + mBitDepth(bitDepth) { + uint32_t multVal; + mRowBytes = __builtin_mul_overflow(bpp, width, &multVal) ? 0 : multVal; + mSize = __builtin_mul_overflow(multVal, height, &multVal) ? 0 : multVal; + if (hasData && (mRowBytes == 0 || mSize == 0)) { + ALOGE("Frame rowBytes/ size overflow %dx%d bpp %d", width, height, bpp); + android_errorWriteLog(0x534e4554, "233006499"); + } } void init(const VideoFrame& copy, const void* iccData, size_t iccSize) { diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp index 57c7d4dff1..d35bc0a590 100644 --- a/media/codec2/sfplugin/CCodec.cpp +++ b/media/codec2/sfplugin/CCodec.cpp @@ -877,6 +877,16 @@ void CCodec::configure(const sp<AMessage> &msg) { if (msg->findInt32(KEY_PUSH_BLANK_BUFFERS_ON_STOP, &pushBlankBuffersOnStop)) { config->mPushBlankBuffersOnStop = pushBlankBuffersOnStop == 1; } + // secure compoment or protected content default with + // "push-blank-buffers-on-shutdown" flag + if (!config->mPushBlankBuffersOnStop) { + int32_t usageProtected; + if (comp->getName().find(".secure") != std::string::npos) { + config->mPushBlankBuffersOnStop = true; + } else if (msg->findInt32("protected", &usageProtected) && usageProtected) { + config->mPushBlankBuffersOnStop = true; + } + } } } setSurface(surface); @@ -1869,18 +1879,14 @@ void CCodec::initiateStop() { } state->set(STOPPING); } - { - Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig); - const std::unique_ptr<Config> &config = *configLocked; - if (config->mPushBlankBuffersOnStop) { - mChannel->pushBlankBufferToOutputSurface(); - } - } mChannel->reset(); - (new AMessage(kWhatStop, this))->post(); + bool pushBlankBuffer = mConfig.lock().get()->mPushBlankBuffersOnStop; + sp<AMessage> stopMessage(new AMessage(kWhatStop, this)); + stopMessage->setInt32("pushBlankBuffer", pushBlankBuffer); + stopMessage->post(); } -void CCodec::stop() { +void CCodec::stop(bool pushBlankBuffer) { std::shared_ptr<Codec2Client::Component> comp; { Mutexed<State>::Locked state(mState); @@ -1899,7 +1905,7 @@ void CCodec::stop() { comp = state->comp; } status_t err = comp->stop(); - mChannel->stopUseOutputSurface(); + mChannel->stopUseOutputSurface(pushBlankBuffer); if (err != C2_OK) { // TODO: convert err into status_t mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL); @@ -1964,21 +1970,16 @@ void CCodec::initiateRelease(bool sendCallback /* = true */) { config->mInputSurfaceDataspace = HAL_DATASPACE_UNKNOWN; } } - { - Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig); - const std::unique_ptr<Config> &config = *configLocked; - if (config->mPushBlankBuffersOnStop) { - mChannel->pushBlankBufferToOutputSurface(); - } - } mChannel->reset(); + bool pushBlankBuffer = mConfig.lock().get()->mPushBlankBuffersOnStop; // thiz holds strong ref to this while the thread is running. sp<CCodec> thiz(this); - std::thread([thiz, sendCallback] { thiz->release(sendCallback); }).detach(); + std::thread([thiz, sendCallback, pushBlankBuffer] + { thiz->release(sendCallback, pushBlankBuffer); }).detach(); } -void CCodec::release(bool sendCallback) { +void CCodec::release(bool sendCallback, bool pushBlankBuffer) { std::shared_ptr<Codec2Client::Component> comp; { Mutexed<State>::Locked state(mState); @@ -1993,7 +1994,7 @@ void CCodec::release(bool sendCallback) { comp = state->comp; } comp->release(); - mChannel->stopUseOutputSurface(); + mChannel->stopUseOutputSurface(pushBlankBuffer); { Mutexed<State>::Locked state(mState); @@ -2007,6 +2008,7 @@ void CCodec::release(bool sendCallback) { } status_t CCodec::setSurface(const sp<Surface> &surface) { + bool pushBlankBuffer = false; { Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig); const std::unique_ptr<Config> &config = *configLocked; @@ -2032,8 +2034,9 @@ status_t CCodec::setSurface(const sp<Surface> &surface) { return err; } } + pushBlankBuffer = config->mPushBlankBuffersOnStop; } - return mChannel->setSurface(surface); + return mChannel->setSurface(surface, pushBlankBuffer); } void CCodec::signalFlush() { @@ -2335,7 +2338,11 @@ void CCodec::onMessageReceived(const sp<AMessage> &msg) { case kWhatStop: { // C2Component::stop() should return within 500ms. setDeadline(now, 1500ms, "stop"); - stop(); + int32_t pushBlankBuffer; + if (!msg->findInt32("pushBlankBuffer", &pushBlankBuffer)) { + pushBlankBuffer = 0; + } + stop(static_cast<bool>(pushBlankBuffer)); break; } case kWhatFlush: { diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp index 2118ea7b29..8dcf6eb57f 100644 --- a/media/codec2/sfplugin/CCodecBufferChannel.cpp +++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp @@ -1660,14 +1660,22 @@ void CCodecBufferChannel::stop() { mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed); } -void CCodecBufferChannel::stopUseOutputSurface() { - if (mOutputSurface.lock()->surface) { +void CCodecBufferChannel::stopUseOutputSurface(bool pushBlankBuffer) { + sp<Surface> surface = mOutputSurface.lock()->surface; + if (surface) { C2BlockPool::local_id_t outputPoolId; { Mutexed<BlockPools>::Locked pools(mBlockPools); outputPoolId = pools->outputPoolId; } if (mComponent) mComponent->stopUsingOutputSurface(outputPoolId); + + if (pushBlankBuffer) { + sp<ANativeWindow> anw = static_cast<ANativeWindow *>(surface.get()); + if (anw) { + pushBlankBuffersToNativeWindow(anw.get()); + } + } } } @@ -2195,14 +2203,20 @@ void CCodecBufferChannel::sendOutputBuffers() { } } -status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) { +status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface, bool pushBlankBuffer) { static std::atomic_uint32_t surfaceGeneration{0}; uint32_t generation = (getpid() << 10) | ((surfaceGeneration.fetch_add(1, std::memory_order_relaxed) + 1) & ((1 << 10) - 1)); sp<IGraphicBufferProducer> producer; - int maxDequeueCount = mOutputSurface.lock()->maxDequeueBuffers; + int maxDequeueCount; + sp<Surface> oldSurface; + { + Mutexed<OutputSurface>::Locked outputSurface(mOutputSurface); + maxDequeueCount = outputSurface->maxDequeueBuffers; + oldSurface = outputSurface->surface; + } if (newSurface) { newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW); newSurface->setDequeueTimeout(kDequeueTimeoutNs); @@ -2239,6 +2253,15 @@ status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) { output->generation = generation; } + if (oldSurface && pushBlankBuffer) { + // When ReleaseSurface was set from MediaCodec, + // pushing a blank buffer at the end might be necessary. + sp<ANativeWindow> anw = static_cast<ANativeWindow *>(oldSurface.get()); + if (anw) { + pushBlankBuffersToNativeWindow(anw.get()); + } + } + return OK; } @@ -2330,13 +2353,4 @@ status_t toStatusT(c2_status_t c2s, c2_operation_t c2op) { } } -status_t CCodecBufferChannel::pushBlankBufferToOutputSurface() { - Mutexed<OutputSurface>::Locked output(mOutputSurface); - sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(output->surface.get()); - if (nativeWindow == nullptr) { - return INVALID_OPERATION; - } - return pushBlankBuffersToNativeWindow(nativeWindow.get()); -} - } // namespace android diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h index c12c9665e4..79bdd1fb05 100644 --- a/media/codec2/sfplugin/CCodecBufferChannel.h +++ b/media/codec2/sfplugin/CCodecBufferChannel.h @@ -103,7 +103,7 @@ public: /** * Set output graphic surface for rendering. */ - status_t setSurface(const sp<Surface> &surface); + status_t setSurface(const sp<Surface> &surface, bool pushBlankBuffer); /** * Set GraphicBufferSource object from which the component extracts input @@ -152,8 +152,10 @@ public: /** * Stop using buffers of the current output surface for other Codec * instances to use the surface safely. + * + * \param pushBlankBuffer[in] push a blank buffer at the end if true */ - void stopUseOutputSurface(); + void stopUseOutputSurface(bool pushBlankBuffer); /** * Stop queueing buffers to the component. This object should never queue @@ -202,11 +204,6 @@ public: void setMetaMode(MetaMode mode); - /** - * Push a blank buffer to the configured native output surface. - */ - status_t pushBlankBufferToOutputSurface(); - private: class QueueGuard; diff --git a/media/codec2/sfplugin/include/media/stagefright/CCodec.h b/media/codec2/sfplugin/include/media/stagefright/CCodec.h index ec18128df0..13713bcce1 100644 --- a/media/codec2/sfplugin/include/media/stagefright/CCodec.h +++ b/media/codec2/sfplugin/include/media/stagefright/CCodec.h @@ -109,9 +109,9 @@ private: void allocate(const sp<MediaCodecInfo> &codecInfo); void configure(const sp<AMessage> &msg); void start(); - void stop(); + void stop(bool pushBlankBuffer); void flush(); - void release(bool sendCallback); + void release(bool sendCallback, bool pushBlankBuffer); /** * Creates an input surface for the current device configuration compatible with CCodec. diff --git a/media/janitors/media_solutions_OWNERS b/media/janitors/media_solutions_OWNERS index 8dc1c7b512..e0c87f7fea 100644 --- a/media/janitors/media_solutions_OWNERS +++ b/media/janitors/media_solutions_OWNERS @@ -1,10 +1,21 @@ # Bug component: 1344 # go/android-fwk-media-solutions for info on areas of ownership. -# Main owners: +# MediaRouter and native mirroring only: +adadukin@google.com aquilescanta@google.com -krocard@google.com +bishoygendy@google.com +ivanbuper@google.com -# In case of emergency: -andrewlewis@google.com #{LAST_RESORT_SUGGESTION} -olly@google.com #{LAST_RESORT_SUGGESTION} +# MediaMuxer, MediaRecorder, and seamless transcoding only: +andrewlewis@google.com +claincly@google.com + +# Everything in go/android-fwk-media-solutions not covered above: +bachinger@google.com +christosts@google.com +ibaker@google.com +michaelkatz@google.com +rohks@google.com +tianyifeng@google.com +tonihei@google.com diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp index af54be224f..41a86a69db 100644 --- a/media/libaudioclient/AudioTrack.cpp +++ b/media/libaudioclient/AudioTrack.cpp @@ -1871,13 +1871,21 @@ audio_io_handle_t AudioTrack::getOutput() const status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) { AutoMutex lock(mLock); - ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d", - __func__, mPortId, deviceId, mSelectedDeviceId); + ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d mRoutedDeviceId %d", + __func__, mPortId, deviceId, mSelectedDeviceId, mRoutedDeviceId); if (mSelectedDeviceId != deviceId) { mSelectedDeviceId = deviceId; - if (mStatus == NO_ERROR) { - android_atomic_or(CBLK_INVALID, &mCblk->mFlags); - mProxy->interrupt(); + if (mStatus == NO_ERROR && mSelectedDeviceId != mRoutedDeviceId) { + if (isPlaying_l()) { + android_atomic_or(CBLK_INVALID, &mCblk->mFlags); + mProxy->interrupt(); + } else { + // if the track is idle, try to restore now and + // defer to next start if not possible + if (restoreTrack_l("setOutputDevice") != OK) { + android_atomic_or(CBLK_INVALID, &mCblk->mFlags); + } + } } } return NO_ERROR; diff --git a/media/libaudioclient/aidl/android/media/ISpatializer.aidl b/media/libaudioclient/aidl/android/media/ISpatializer.aidl index a61ad58eeb..250c450b7b 100644 --- a/media/libaudioclient/aidl/android/media/ISpatializer.aidl +++ b/media/libaudioclient/aidl/android/media/ISpatializer.aidl @@ -96,17 +96,33 @@ interface ISpatializer { /** * Sets the display orientation. + * + * This is the rotation of the displayed content relative to its natural orientation. + * * Orientation is expressed in the angle of rotation from the physical "up" side of the screen * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as * viewed while facing the screen are positive. + * + * Note: DisplayManager currently only returns this in increments of 90 degrees, + * so the values will be 0, PI/2, PI, 3PI/2. */ void setDisplayOrientation(float physicalToLogicalAngle); /** * Sets the hinge angle for foldable devices. + * + * Per the hinge angle sensor, this returns a value from 0 to 2PI. + * The value of 0 is considered closed, and PI is considered flat open. */ void setHingeAngle(float hingeAngle); + /** + * Sets whether a foldable is considered "folded" or not. + * + * The fold state may affect which physical screen is active for display. + */ + void setFoldState(boolean folded); + /** Reports the list of supported spatialization modess (see SpatializationMode.aidl). * The list should never be empty if an ISpatializer interface was successfully * retrieved with IAudioPolicyService.getSpatializer(). diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h index 841fb0c61b..b52d1d577b 100644 --- a/media/libaudioclient/include/media/AudioTrack.h +++ b/media/libaudioclient/include/media/AudioTrack.h @@ -1177,6 +1177,9 @@ public: bool isPlaying() { AutoMutex lock(mLock); + return isPlaying_l(); + } + bool isPlaying_l() { return mState == STATE_ACTIVE || mState == STATE_STOPPING; } diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp index 7d7f8b955e..8870bb0f46 100644 --- a/media/libeffects/lvm/tests/Android.bp +++ b/media/libeffects/lvm/tests/Android.bp @@ -14,6 +14,10 @@ cc_test { vendor: true, gtest: true, host_supported: true, + // TODO(b/269868814) + test_options: { + unit_test: false, + }, srcs: [ "EffectReverbTest.cpp", "EffectTestHelper.cpp", diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp index f64aedf966..995586288a 100644 --- a/media/libheadtracking/Android.bp +++ b/media/libheadtracking/Android.bp @@ -16,11 +16,13 @@ cc_library { "Pose.cpp", "PoseBias.cpp", "PoseDriftCompensator.cpp", + "PosePredictor.cpp", "PoseRateLimiter.cpp", "QuaternionUtil.cpp", "ScreenHeadFusion.cpp", "StillnessDetector.cpp", "Twist.cpp", + "VectorRecorder.cpp", ], shared_libs: [ "libaudioutils", @@ -35,6 +37,15 @@ cc_library { export_header_lib_headers: [ "libeigen", ], + cflags: [ + "-Wthread-safety", + ], + product_variables: { + debuggable: { + // enable experiments only in userdebug and eng builds + cflags: ["-DENABLE_VERIFICATION"], + }, + }, } cc_library { @@ -76,6 +87,7 @@ cc_test_host { "Pose-test.cpp", "PoseBias-test.cpp", "PoseDriftCompensator-test.cpp", + "PosePredictor.cpp", "PoseRateLimiter-test.cpp", "QuaternionUtil-test.cpp", "ScreenHeadFusion-test.cpp", @@ -84,6 +96,7 @@ cc_test_host { ], shared_libs: [ "libaudioutils", + "libbase", // StringAppendF "libheadtracking", ], } diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp index 299192ff74..5190f528fc 100644 --- a/media/libheadtracking/HeadTrackingProcessor-test.cpp +++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp @@ -15,10 +15,10 @@ */ #include "media/HeadTrackingProcessor.h" +#include "media/QuaternionUtil.h" #include <gtest/gtest.h> -#include "QuaternionUtil.h" #include "TestUtil.h" namespace android { @@ -82,6 +82,8 @@ TEST(HeadTrackingProcessor, Prediction) { std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor( Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE); + processor->setPosePredictorType(PosePredictorType::TWIST); + // Establish a baseline for the drift compensators. processor->setWorldToHeadPose(0, Pose3f(), Twist3f()); processor->setWorldToScreenPose(0, Pose3f()); diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp index 101b8256eb..54d08d2833 100644 --- a/media/libheadtracking/HeadTrackingProcessor.cpp +++ b/media/libheadtracking/HeadTrackingProcessor.cpp @@ -18,10 +18,11 @@ #include <android-base/stringprintf.h> #include <audio_utils/SimpleLog.h> #include "media/HeadTrackingProcessor.h" +#include "media/QuaternionUtil.h" #include "ModeSelector.h" #include "PoseBias.h" -#include "QuaternionUtil.h" +#include "PosePredictor.h" #include "ScreenHeadFusion.h" #include "StillnessDetector.h" @@ -59,8 +60,8 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead, const Twist3f& headTwist) override { - Pose3f predictedWorldToHead = - worldToHead * integrate(headTwist, mOptions.predictionDuration); + const Pose3f predictedWorldToHead = mPosePredictor.predict( + timestamp, worldToHead, headTwist, mOptions.predictionDuration); mHeadPoseBias.setInput(predictedWorldToHead); mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead); mWorldToHeadTimestamp = timestamp; @@ -97,7 +98,7 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable); // Whenever the screen is unstable, recenter the head pose. if (!screenStable) { - recenter(true, false); + recenter(true, false, "calculate: screen movement"); } mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(), worldToLogicalScreen); @@ -109,7 +110,7 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { // Auto-recenter. bool headStable = mHeadStillnessDetector.calculate(timestamp); if (headStable || !screenStable) { - recenter(true, false); + recenter(true, false, "calculate: head movement"); worldToHead = mHeadPoseBias.getOutput(); } @@ -139,16 +140,16 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); } - void recenter(bool recenterHead, bool recenterScreen) override { + void recenter(bool recenterHead, bool recenterScreen, std::string source) override { if (recenterHead) { mHeadPoseBias.recenter(); mHeadStillnessDetector.reset(); - mLocalLog.log("recenter Head"); + mLocalLog.log("recenter Head from %s", source.c_str()); } if (recenterScreen) { mScreenPoseBias.recenter(); mScreenStillnessDetector.reset(); - mLocalLog.log("recenter Screen"); + mLocalLog.log("recenter Screen from %s", source.c_str()); } // If a sensor being recentered is included in the current mode, apply rate limiting to @@ -161,6 +162,10 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { } } + void setPosePredictorType(PosePredictorType type) override { + mPosePredictor.setPosePredictorType(type); + } + std::string toString_l(unsigned level) const override { std::string prefixSpace(level, ' '); std::string ss = prefixSpace + "HeadTrackingProcessor:\n"; @@ -186,6 +191,7 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { prefixSpace.c_str(), mOptions.screenStillnessRotationalThreshold); ss += mModeSelector.toString(level + 1); ss += mRateLimiter.toString(level + 1); + ss += mPosePredictor.toString(level + 1); ss.append(prefixSpace + "ReCenterHistory:\n"); ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), mMaxLocalLogLine); return ss; @@ -207,6 +213,7 @@ class HeadTrackingProcessorImpl : public HeadTrackingProcessor { ScreenHeadFusion mScreenHeadFusion; ModeSelector mModeSelector; PoseRateLimiter mRateLimiter; + PosePredictor mPosePredictor; static constexpr std::size_t mMaxLocalLogLine = 10; SimpleLog mLocalLog{mMaxLocalLogLine}; }; @@ -230,5 +237,26 @@ std::string toString(HeadTrackingMode mode) { return "EnumNotImplemented"; }; +std::string toString(PosePredictorType posePredictorType) { + switch (posePredictorType) { + case PosePredictorType::AUTO: return "AUTO"; + case PosePredictorType::LAST: return "LAST"; + case PosePredictorType::TWIST: return "TWIST"; + case PosePredictorType::LEAST_SQUARES: return "LEAST_SQUARES"; + } + return "UNKNOWN" + std::to_string((int)posePredictorType); +} + +bool isValidPosePredictorType(PosePredictorType posePredictorType) { + switch (posePredictorType) { + case PosePredictorType::AUTO: + case PosePredictorType::LAST: + case PosePredictorType::TWIST: + case PosePredictorType::LEAST_SQUARES: + return true; + } + return false; +} + } // namespace media } // namespace android diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp index a136e6bf18..6925908234 100644 --- a/media/libheadtracking/ModeSelector-test.cpp +++ b/media/libheadtracking/ModeSelector-test.cpp @@ -18,7 +18,7 @@ #include <gtest/gtest.h> -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" #include "TestUtil.h" namespace android { diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp index 6277090257..7ee21b394c 100644 --- a/media/libheadtracking/ModeSelector.cpp +++ b/media/libheadtracking/ModeSelector.cpp @@ -117,10 +117,12 @@ HeadTrackingMode ModeSelector::getActualMode() const { std::string ModeSelector::toString(unsigned level) const { std::string prefixSpace(level, ' '); std::string ss(prefixSpace); - StringAppendF(&ss, "ModeSelector: ScreenToStage %s\n", - mScreenToStage.toString().c_str()); - ss.append(prefixSpace + "Mode downgrade history:\n"); - ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine); + ss.append("ModeSelector: ScreenToStage ") + .append(mScreenToStage.toString()) + .append("\n") + .append(prefixSpace) + .append("Mode change history:\n") + .append(mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine)); return ss; } diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp index a9e18ce4ce..29dba29306 100644 --- a/media/libheadtracking/Pose-test.cpp +++ b/media/libheadtracking/Pose-test.cpp @@ -18,7 +18,7 @@ #include <gtest/gtest.h> -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" #include "TestUtil.h" using android::media::Pose3f; diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp index 4a4b56a35c..e03725bd6f 100644 --- a/media/libheadtracking/Pose.cpp +++ b/media/libheadtracking/Pose.cpp @@ -16,8 +16,8 @@ #include <android-base/stringprintf.h> #include "media/Pose.h" +#include "media/QuaternionUtil.h" #include "media/Twist.h" -#include "QuaternionUtil.h" namespace android { namespace media { diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp index 9f42a2ca0e..659dda0da7 100644 --- a/media/libheadtracking/PoseBias-test.cpp +++ b/media/libheadtracking/PoseBias-test.cpp @@ -17,7 +17,8 @@ #include <gtest/gtest.h> #include "PoseBias.h" -#include "QuaternionUtil.h" + +#include "media/QuaternionUtil.h" #include "TestUtil.h" namespace android { diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp index df0a05fe36..521e3ebf25 100644 --- a/media/libheadtracking/PoseDriftCompensator-test.cpp +++ b/media/libheadtracking/PoseDriftCompensator-test.cpp @@ -18,7 +18,8 @@ #include <cmath> #include "PoseDriftCompensator.h" -#include "QuaternionUtil.h" + +#include "media/QuaternionUtil.h" #include "TestUtil.h" namespace android { diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp index 0e90cadfc3..2775790a24 100644 --- a/media/libheadtracking/PoseDriftCompensator.cpp +++ b/media/libheadtracking/PoseDriftCompensator.cpp @@ -18,7 +18,7 @@ #include <cmath> -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" namespace android { namespace media { diff --git a/media/libheadtracking/PosePredictor.cpp b/media/libheadtracking/PosePredictor.cpp new file mode 100644 index 0000000000..5209d549e5 --- /dev/null +++ b/media/libheadtracking/PosePredictor.cpp @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PosePredictor.h" + +namespace android::media { + +namespace { +#ifdef ENABLE_VERIFICATION +constexpr bool kEnableVerification = true; +constexpr std::array<int, 3> kLookAheadMs{ 50, 100, 200 }; +#else +constexpr bool kEnableVerification = false; +constexpr std::array<int, 0> kLookAheadMs{}; +#endif + +} // namespace + +void LeastSquaresPredictor::add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) +{ + (void)twist; + mLastAtNs = atNs; + mLastPose = pose; + const auto q = pose.rotation(); + const double datNs = static_cast<double>(atNs); + mRw.add({datNs, q.w()}); + mRx.add({datNs, q.x()}); + mRy.add({datNs, q.y()}); + mRz.add({datNs, q.z()}); +} + +Pose3f LeastSquaresPredictor::predict(int64_t atNs) const +{ + if (mRw.getN() < kMinimumSamplesForPrediction) return mLastPose; + + /* + * Using parametric form, we have q(t) = { w(t), x(t), y(t), z(t) }. + * We compute the least squares prediction of w, x, y, z. + */ + const double dLookahead = static_cast<double>(atNs); + Eigen::Quaternionf lsq( + mRw.getYFromX(dLookahead), + mRx.getYFromX(dLookahead), + mRy.getYFromX(dLookahead), + mRz.getYFromX(dLookahead)); + + /* + * We cheat here, since the result lsq is the least squares prediction + * in H (arbitrary quaternion), not the least squares prediction in + * SO(3) (unit quaternion). + * + * In other words, the result for lsq is most likely not a unit quaternion. + * To solve this, we normalize, thereby selecting the closest unit quaternion + * in SO(3) to the prediction in H. + */ + lsq.normalize(); + return Pose3f(lsq); +} + +void LeastSquaresPredictor::reset() { + mLastAtNs = {}; + mLastPose = {}; + mRw.reset(); + mRx.reset(); + mRy.reset(); + mRz.reset(); +} + +std::string LeastSquaresPredictor::toString(size_t index) const { + std::string s(index, ' '); + s.append("LeastSquaresPredictor using alpha: ") + .append(std::to_string(mAlpha)) + .append(" last pose: ") + .append(mLastPose.toString()) + .append("\n"); + return s; +} + +// Formatting +static inline std::vector<size_t> createDelimiterIdx(size_t predictors, size_t lookaheads) { + if (lookaheads == 0) return {}; + --lookaheads; + std::vector<size_t> delimiterIdx(lookaheads); + for (size_t i = 0; i < lookaheads; ++i) { + delimiterIdx[i] = (i + 1) * predictors; + } + return delimiterIdx; +} + +PosePredictor::PosePredictor() + : mPredictors{ + // First predictors must match switch in getCurrentPredictor() + std::make_shared<LastPredictor>(), + std::make_shared<TwistPredictor>(), + std::make_shared<LeastSquaresPredictor>(), + // After this, can place additional predictors here for comparison such as + // std::make_shared<LeastSquaresPredictor>(0.25), + } + , mLookaheadMs(kLookAheadMs.begin(), kLookAheadMs.end()) + , mVerifiers(std::size(mLookaheadMs) * std::size(mPredictors)) + , mDelimiterIdx(createDelimiterIdx(std::size(mPredictors), std::size(mLookaheadMs))) + , mPredictionRecorder( + std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */, + mDelimiterIdx) + , mPredictionDurableRecorder( + std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */, + mDelimiterIdx) + { +} + +Pose3f PosePredictor::predict( + int64_t timestampNs, const Pose3f& pose, const Twist3f& twist, float predictionDurationNs) +{ + if (timestampNs - mLastTimestampNs > kMaximumSampleIntervalBeforeResetNs) { + for (const auto& predictor : mPredictors) { + predictor->reset(); + } + ++mResets; + } + mLastTimestampNs = timestampNs; + + auto selectedPredictor = getCurrentPredictor(); + if constexpr (kEnableVerification) { + // Update all Predictors + for (const auto& predictor : mPredictors) { + predictor->add(timestampNs, pose, twist); + } + + // Update Verifiers and calculate errors + std::vector<float> error(std::size(mVerifiers)); + for (size_t i = 0; i < mLookaheadMs.size(); ++i) { + constexpr float RADIAN_TO_DEGREES = 180 / M_PI; + const int64_t atNs = + timestampNs + mLookaheadMs[i] * PosePredictorVerifier::kMillisToNanos; + + for (size_t j = 0; j < mPredictors.size(); ++j) { + const size_t idx = i * std::size(mPredictors) + j; + mVerifiers[idx].verifyActualPose(timestampNs, pose); + mVerifiers[idx].addPredictedPose(atNs, mPredictors[j]->predict(atNs)); + error[idx] = RADIAN_TO_DEGREES * mVerifiers[idx].lastError(); + } + } + // Record errors + mPredictionRecorder.record(error); + mPredictionDurableRecorder.record(error); + } else /* constexpr */ { + selectedPredictor->add(timestampNs, pose, twist); + } + + // Deliver prediction + const int64_t predictionTimeNs = timestampNs + (int64_t)predictionDurationNs; + return selectedPredictor->predict(predictionTimeNs); +} + +void PosePredictor::setPosePredictorType(PosePredictorType type) { + if (!isValidPosePredictorType(type)) return; + if (type == mSetType) return; + mSetType = type; + if (type == android::media::PosePredictorType::AUTO) { + type = android::media::PosePredictorType::LEAST_SQUARES; + } + if (type != mCurrentType) { + mCurrentType = type; + if constexpr (!kEnableVerification) { + // Verification keeps all predictors up-to-date. + // If we don't enable verification, we must reset the current predictor. + getCurrentPredictor()->reset(); + } + } +} + +std::string PosePredictor::toString(size_t index) const { + std::string prefixSpace(index, ' '); + std::string ss(prefixSpace); + ss.append("PosePredictor:\n") + .append(prefixSpace) + .append(" Current Prediction Type: ") + .append(android::media::toString(mCurrentType)) + .append("\n") + .append(prefixSpace) + .append(" Resets: ") + .append(std::to_string(mResets)) + .append("\n") + .append(getCurrentPredictor()->toString(index + 1)); + if constexpr (kEnableVerification) { + // dump verification + ss.append(prefixSpace) + .append(" Prediction abs error (L1) degrees [ type ("); + for (size_t i = 0; i < mPredictors.size(); ++i) { + if (i > 0) ss.append(" , "); + ss.append(mPredictors[i]->name()); + } + ss.append(" ) x ( "); + for (size_t i = 0; i < mLookaheadMs.size(); ++i) { + if (i > 0) ss.append(" : "); + ss.append(std::to_string(mLookaheadMs[i])); + } + std::vector<float> cumulativeAverageErrors(std::size(mVerifiers)); + for (size_t i = 0; i < cumulativeAverageErrors.size(); ++i) { + cumulativeAverageErrors[i] = mVerifiers[i].cumulativeAverageError(); + } + ss.append(" ) ms ]\n") + .append(prefixSpace) + .append(" Cumulative Average Error:\n") + .append(prefixSpace) + .append(" ") + .append(VectorRecorder::toString(cumulativeAverageErrors, mDelimiterIdx, "%.3g")) + .append("\n") + .append(prefixSpace) + .append(" PerMinuteHistory:\n") + .append(mPredictionDurableRecorder.toString(index + 3)) + .append(prefixSpace) + .append(" PerSecondHistory:\n") + .append(mPredictionRecorder.toString(index + 3)); + } + return ss; +} + +std::shared_ptr<PredictorBase> PosePredictor::getCurrentPredictor() const { + // we don't use a map here, we look up directly + switch (mCurrentType) { + default: + case android::media::PosePredictorType::LAST: + return mPredictors[0]; + case android::media::PosePredictorType::TWIST: + return mPredictors[1]; + case android::media::PosePredictorType::AUTO: // shouldn't occur here. + case android::media::PosePredictorType::LEAST_SQUARES: + return mPredictors[2]; + } +} + +} // namespace android::media diff --git a/media/libheadtracking/PosePredictor.h b/media/libheadtracking/PosePredictor.h new file mode 100644 index 0000000000..53211e3820 --- /dev/null +++ b/media/libheadtracking/PosePredictor.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "PosePredictorVerifier.h" +#include <memory> +#include <audio_utils/Statistics.h> +#include <media/PosePredictorType.h> +#include <media/Twist.h> +#include <media/VectorRecorder.h> + +namespace android::media { + +// Interface for generic pose predictors +class PredictorBase { +public: + virtual ~PredictorBase() = default; + virtual void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) = 0; + virtual Pose3f predict(int64_t atNs) const = 0; + virtual void reset() = 0; + virtual std::string name() const = 0; + virtual std::string toString(size_t index) const = 0; +}; + +/** + * LastPredictor uses the last sample Pose for prediction + * + * This class is not thread-safe. + */ +class LastPredictor : public PredictorBase { +public: + void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override { + (void)atNs; + (void)twist; + mLastPose = pose; + } + + Pose3f predict(int64_t atNs) const override { + (void)atNs; + return mLastPose; + } + + void reset() override { + mLastPose = {}; + } + + std::string name() const override { + return "LAST"; + } + + std::string toString(size_t index) const override { + std::string s(index, ' '); + s.append("LastPredictor using last pose: ") + .append(mLastPose.toString()) + .append("\n"); + return s; + } + +private: + Pose3f mLastPose; +}; + +/** + * TwistPredictor uses the last sample Twist and Pose for prediction + * + * This class is not thread-safe. + */ +class TwistPredictor : public PredictorBase { +public: + void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override { + mLastAtNs = atNs; + mLastPose = pose; + mLastTwist = twist; + } + + Pose3f predict(int64_t atNs) const override { + return mLastPose * integrate(mLastTwist, atNs - mLastAtNs); + } + + void reset() override { + mLastAtNs = {}; + mLastPose = {}; + mLastTwist = {}; + } + + std::string name() const override { + return "TWIST"; + } + + std::string toString(size_t index) const override { + std::string s(index, ' '); + s.append("TwistPredictor using last pose: ") + .append(mLastPose.toString()) + .append(" last twist: ") + .append(mLastTwist.toString()) + .append("\n"); + return s; + } + +private: + int64_t mLastAtNs{}; + Pose3f mLastPose; + Twist3f mLastTwist; +}; + + +/** + * LeastSquaresPredictor uses the Pose history for prediction. + * + * A exponential weighted least squares is used. + * + * This class is not thread-safe. + */ +class LeastSquaresPredictor : public PredictorBase { +public: + // alpha is the exponential decay. + LeastSquaresPredictor(double alpha = kDefaultAlphaEstimator) + : mAlpha(alpha) + , mRw(alpha) + , mRx(alpha) + , mRy(alpha) + , mRz(alpha) + {} + + void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override; + Pose3f predict(int64_t atNs) const override; + void reset() override; + std::string name() const override { + return "LEAST_SQUARES(" + std::to_string(mAlpha) + ")"; + } + std::string toString(size_t index) const override; + +private: + const double mAlpha; + int64_t mLastAtNs{}; + Pose3f mLastPose; + static constexpr double kDefaultAlphaEstimator = 0.2; + static constexpr size_t kMinimumSamplesForPrediction = 4; + audio_utils::LinearLeastSquaresFit<double> mRw; + audio_utils::LinearLeastSquaresFit<double> mRx; + audio_utils::LinearLeastSquaresFit<double> mRy; + audio_utils::LinearLeastSquaresFit<double> mRz; +}; + +/* + * PosePredictor predicts the pose given sensor input at a time in the future. + * + * This class is not thread safe. + */ +class PosePredictor { +public: + PosePredictor(); + + Pose3f predict(int64_t timestampNs, const Pose3f& pose, const Twist3f& twist, + float predictionDurationNs); + + void setPosePredictorType(PosePredictorType type); + + // convert predictions to a printable string + std::string toString(size_t index) const; + +private: + static constexpr int64_t kMaximumSampleIntervalBeforeResetNs = + 300'000'000; + + // Predictors + const std::vector<std::shared_ptr<PredictorBase>> mPredictors; + + // Verifiers, create one for an array of future lookaheads for comparison. + const std::vector<int> mLookaheadMs; + + std::vector<PosePredictorVerifier> mVerifiers; + + const std::vector<size_t> mDelimiterIdx; + + // Recorders + media::VectorRecorder mPredictionRecorder{ + std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */, + mDelimiterIdx}; + media::VectorRecorder mPredictionDurableRecorder{ + std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */, + mDelimiterIdx}; + + // Status + + // SetType is the externally set predictor type. It may include AUTO. + PosePredictorType mSetType = PosePredictorType::LEAST_SQUARES; + + // CurrentType is the actual predictor type used by this class. + // It does not include AUTO because that metatype means the class + // chooses the best predictor type based on sensor statistics. + PosePredictorType mCurrentType = PosePredictorType::LEAST_SQUARES; + + int64_t mResets{}; + int64_t mLastTimestampNs{}; + + // Returns current predictor + std::shared_ptr<PredictorBase> getCurrentPredictor() const; +}; + +} // namespace android::media diff --git a/media/libheadtracking/PosePredictorVerifier.h b/media/libheadtracking/PosePredictorVerifier.h new file mode 100644 index 0000000000..6b4a357fb2 --- /dev/null +++ b/media/libheadtracking/PosePredictorVerifier.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include <string> + +#include <audio_utils/Statistics.h> +#include <media/Pose.h> + +namespace android::media { + +/** + * PosePredictorVerifier is used to validate predictions + * + * This class is not thread-safe + */ +class PosePredictorVerifier { +public: + std::string toString() const { + return mErrorStats.toString(); + } + + static constexpr int64_t kMillisToNanos = 1000000; + + void verifyActualPose(int64_t timestampNs, const Pose3f& pose) { + for (auto it = mPredictions.begin(); it != mPredictions.end();) { + if (it->first < timestampNs) { + it = mPredictions.erase(it); + } else { + int64_t dt = it->first - timestampNs; + if (std::abs(dt) < 10 * kMillisToNanos) { + const float angle = pose.rotation().angularDistance(it->second.rotation()); + const float error = std::abs(angle); // L1 (absolute difference) here. + mLastError = error; + mErrorStats.add(error); + } + break; + } + } + } + + void addPredictedPose(int64_t atNs, const Pose3f& pose) { + mPredictions.emplace_back(atNs, pose); + } + + float lastError() const { + return mLastError; + } + + float cumulativeAverageError() const { + return mErrorStats.getMean(); + } + +private: + static constexpr double kCumulativeErrorAlpha = 0.999; + std::deque<std::pair<int64_t, Pose3f>> mPredictions; + float mLastError{}; + android::audio_utils::Statistics<double> mErrorStats{kCumulativeErrorAlpha}; +}; + +} // namespace androd::media diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp index f306183a43..ded874af2d 100644 --- a/media/libheadtracking/PoseRateLimiter-test.cpp +++ b/media/libheadtracking/PoseRateLimiter-test.cpp @@ -17,7 +17,8 @@ #include <gtest/gtest.h> #include "PoseRateLimiter.h" -#include "QuaternionUtil.h" + +#include "media/QuaternionUtil.h" #include "TestUtil.h" namespace android { diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp index e79e54ab70..cfeca00fc4 100644 --- a/media/libheadtracking/QuaternionUtil-test.cpp +++ b/media/libheadtracking/QuaternionUtil-test.cpp @@ -16,7 +16,7 @@ #include <gtest/gtest.h> -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" #include "TestUtil.h" using Eigen::Quaternionf; @@ -51,6 +51,92 @@ TEST(QuaternionUtil, RoundTripFromVector) { EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec))); } +// Float precision necessitates this precision (1e-4f fails) +constexpr float NEAR = 1e-3f; + +TEST(QuaternionUtil, quaternionToAngles_basic) { + float pitch, roll, yaw; + + // angles as reported. + // choose 11 angles between -M_PI / 2 to M_PI / 2 + for (int step = -5; step <= 5; ++step) { + const float angle = M_PI * step * 0.1f; + + quaternionToAngles(rotationVectorToQuaternion({angle, 0.f, 0.f}), &pitch, &roll, &yaw); + EXPECT_NEAR(angle, pitch, NEAR); + EXPECT_NEAR(0.f, roll, NEAR); + EXPECT_NEAR(0.f, yaw, NEAR); + + quaternionToAngles(rotationVectorToQuaternion({0.f, angle, 0.f}), &pitch, &roll, &yaw); + EXPECT_NEAR(0.f, pitch, NEAR); + EXPECT_NEAR(angle, roll, NEAR); + EXPECT_NEAR(0.f, yaw, NEAR); + + quaternionToAngles(rotationVectorToQuaternion({0.f, 0.f, angle}), &pitch, &roll, &yaw); + EXPECT_NEAR(0.f, pitch, NEAR); + EXPECT_NEAR(0.f, roll, NEAR); + EXPECT_NEAR(angle, yaw, NEAR); + } + + // Generates a debug string + const std::string s = quaternionToAngles<true /* DEBUG */>( + rotationVectorToQuaternion({M_PI, 0.f, 0.f}), &pitch, &roll, &yaw); + ASSERT_FALSE(s.empty()); +} + +TEST(QuaternionUtil, quaternionToAngles_zaxis) { + float pitch, roll, yaw; + + for (int rot_step = -10; rot_step <= 10; ++rot_step) { + const float rot_angle = M_PI * rot_step * 0.1f; + // pitch independent of world Z rotation + + // We don't test the boundaries of pitch +-M_PI/2 as roll can become + // degenerate and atan(0, 0) may report 0, PI, or -PI. + for (int step = -4; step <= 4; ++step) { + const float angle = M_PI * step * 0.1f; + auto q = rotationVectorToQuaternion({angle, 0.f, 0.f}); + auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle}); + + // Sequential active rotations (on world frame) compose as R_2 * R_1. + quaternionToAngles(world_z * q, &pitch, &roll, &yaw); + + EXPECT_NEAR(angle, pitch, NEAR); + EXPECT_NEAR(0.f, roll, NEAR); + } + + // roll independent of world Z rotation + for (int step = -5; step <= 5; ++step) { + const float angle = M_PI * step * 0.1f; + auto q = rotationVectorToQuaternion({0.f, angle, 0.f}); + auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle}); + + // Sequential active rotations (on world frame) compose as R_2 * R_1. + quaternionToAngles(world_z * q, &pitch, &roll, &yaw); + + EXPECT_NEAR(0.f, pitch, NEAR); + EXPECT_NEAR(angle, roll, NEAR); + + // Convert extrinsic (world-based) active rotations to a sequence of + // intrinsic rotations (each rotation based off of previous rotation + // frame). + // + // R_1 * R_intrinsic = R_extrinsic * R_1 + // implies + // R_intrinsic = (R_1)^-1 R_extrinsic R_1 + // + auto world_z_intrinsic = rotationVectorToQuaternion( + q.inverse() * Vector3f(0.f, 0.f, rot_angle)); + + // Sequential intrinsic rotations compose as R_1 * R_2. + quaternionToAngles(q * world_z_intrinsic, &pitch, &roll, &yaw); + + EXPECT_NEAR(0.f, pitch, NEAR); + EXPECT_NEAR(angle, roll, NEAR); + } + } +} + } // namespace } // namespace media } // namespace android diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp index 5d090dec90..e245c80848 100644 --- a/media/libheadtracking/QuaternionUtil.cpp +++ b/media/libheadtracking/QuaternionUtil.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" #include <cassert> diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h deleted file mode 100644 index f7a2ca969a..0000000000 --- a/media/libheadtracking/QuaternionUtil.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -#include <Eigen/Geometry> - -namespace android { -namespace media { - -/** - * Converts a rotation vector to an equivalent quaternion. - * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its - * magnitude the rotation angle (in radians) around that axis. - */ -Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector); - -/** - * Converts a quaternion to an equivalent rotation vector. - * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its - * magnitude the rotation angle (in radians) around that axis. - */ -Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion); - -/** - * Returns a quaternion representing a rotation around the X-axis with the given amount (in - * radians). - */ -Eigen::Quaternionf rotateX(float angle); - -/** - * Returns a quaternion representing a rotation around the Y-axis with the given amount (in - * radians). - */ -Eigen::Quaternionf rotateY(float angle); - -/** - * Returns a quaternion representing a rotation around the Z-axis with the given amount (in - * radians). - */ -Eigen::Quaternionf rotateZ(float angle); - -} // namespace media -} // namespace android diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp index 31d469c52e..8a2902708e 100644 --- a/media/libheadtracking/SensorPoseProvider.cpp +++ b/media/libheadtracking/SensorPoseProvider.cpp @@ -32,7 +32,7 @@ #include <sensor/SensorManager.h> #include <utils/Looper.h> -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" namespace android { namespace media { diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp index b6cd479388..56e7b4e743 100644 --- a/media/libheadtracking/StillnessDetector-test.cpp +++ b/media/libheadtracking/StillnessDetector-test.cpp @@ -16,8 +16,9 @@ #include <gtest/gtest.h> -#include "QuaternionUtil.h" #include "StillnessDetector.h" + +#include "media/QuaternionUtil.h" #include "TestUtil.h" namespace android { diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp index 7984e1ea54..9fbf81fee2 100644 --- a/media/libheadtracking/Twist-test.cpp +++ b/media/libheadtracking/Twist-test.cpp @@ -16,9 +16,7 @@ #include "media/Twist.h" -#include <gtest/gtest.h> - -#include "QuaternionUtil.h" +#include "media/QuaternionUtil.h" #include "TestUtil.h" using Eigen::Quaternionf; diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp index 664c4d5d2f..fdec6948a2 100644 --- a/media/libheadtracking/Twist.cpp +++ b/media/libheadtracking/Twist.cpp @@ -15,8 +15,8 @@ */ #include "media/Twist.h" - -#include "QuaternionUtil.h" +#include <android-base/stringprintf.h> +#include "media/QuaternionUtil.h" namespace android { namespace media { @@ -39,5 +39,11 @@ std::ostream& operator<<(std::ostream& os, const Twist3f& twist) { return os; } +std::string Twist3f::toString() const { + return base::StringPrintf("[%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f]", + mTranslationalVelocity[0], mTranslationalVelocity[1], mTranslationalVelocity[2], + mRotationalVelocity[0], mRotationalVelocity[1], mRotationalVelocity[2]); +} + } // namespace media } // namespace android diff --git a/media/libheadtracking/VectorRecorder.cpp b/media/libheadtracking/VectorRecorder.cpp new file mode 100644 index 0000000000..5c87d05ad1 --- /dev/null +++ b/media/libheadtracking/VectorRecorder.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "media/VectorRecorder.h" + +namespace android::media { + +// Convert data to string with level indentation. +// No need for a lock as the SimpleLog is thread-safe. +std::string VectorRecorder::toString(size_t indent) const { + return mRecordLog.dumpToString(std::string(indent, ' ').c_str(), mMaxLocalLogLine); +} + +// Record into local log when it is time. +void VectorRecorder::record(const std::vector<float>& record) { + if (record.size() != mVectorSize) return; + + // Protect against concurrent calls to record(). + std::lock_guard lg(mLock); + + // if it is time, record average data and reset. + if (shouldRecordLog_l()) { + sumToAverage_l(); + mRecordLog.log( + "mean: %s, min: %s, max %s, calculated %zu samples in %0.4f second(s)", + toString(mSum, mDelimiterIdx, mFormatString.c_str()).c_str(), + toString(mMin, mDelimiterIdx, mFormatString.c_str()).c_str(), + toString(mMax, mDelimiterIdx, mFormatString.c_str()).c_str(), + mNumberOfSamples, + mNumberOfSecondsSinceFirstSample.count()); + resetRecord_l(); + } + + // update stream average. + if (mNumberOfSamples++ == 0) { + mFirstSampleTimestamp = std::chrono::steady_clock::now(); + for (size_t i = 0; i < mVectorSize; ++i) { + const float value = record[i]; + mSum[i] += value; + mMax[i] = value; + mMin[i] = value; + } + } else { + for (size_t i = 0; i < mVectorSize; ++i) { + const float value = record[i]; + mSum[i] += value; + mMax[i] = std::max(mMax[i], value); + mMin[i] = std::min(mMin[i], value); + } + } +} + +bool VectorRecorder::shouldRecordLog_l() { + mNumberOfSecondsSinceFirstSample = std::chrono::duration_cast<std::chrono::seconds>( + std::chrono::steady_clock::now() - mFirstSampleTimestamp); + return mNumberOfSecondsSinceFirstSample >= mRecordThreshold; +} + +void VectorRecorder::resetRecord_l() { + mSum.assign(mVectorSize, 0); + mMax.assign(mVectorSize, 0); + mMin.assign(mVectorSize, 0); + mNumberOfSamples = 0; + mNumberOfSecondsSinceFirstSample = std::chrono::seconds(0); +} + +void VectorRecorder::sumToAverage_l() { + if (mNumberOfSamples == 0) return; + const float reciprocal = 1.f / mNumberOfSamples; + for (auto& p : mSum) { + p *= reciprocal; + } +} + +} // namespace android::media diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h index 8ef8ab00e7..d2b78f2b3d 100644 --- a/media/libheadtracking/include/media/HeadTrackingProcessor.h +++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h @@ -19,6 +19,7 @@ #include "HeadTrackingMode.h" #include "Pose.h" +#include "PosePredictorType.h" #include "Twist.h" namespace android { @@ -95,7 +96,13 @@ class HeadTrackingProcessor { /** * This causes the current poses for both the head and/or screen to be considered "center". */ - virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0; + virtual void recenter( + bool recenterHead = true, bool recenterScreen = true, std::string source = "") = 0; + + /** + * Set the predictor type. + */ + virtual void setPosePredictorType(PosePredictorType type) = 0; /** * Dump HeadTrackingProcessor parameters under caller lock. diff --git a/media/libheadtracking/include/media/PosePredictorType.h b/media/libheadtracking/include/media/PosePredictorType.h new file mode 100644 index 0000000000..aa76d5da55 --- /dev/null +++ b/media/libheadtracking/include/media/PosePredictorType.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include <string> + +namespace android::media { + +enum class PosePredictorType { + /** Use best predictor determined from sensor input */ + AUTO, + + /** Use last pose for future prediction */ + LAST, + + /** Use twist angular velocity for future prediction */ + TWIST, + + /** Use weighted least squares history of prior poses (ignoring twist) */ + LEAST_SQUARES, +}; + +std::string toString(PosePredictorType posePredictorType); +bool isValidPosePredictorType(PosePredictorType posePredictorType); + +} // namespace android::media diff --git a/media/libheadtracking/include/media/QuaternionUtil.h b/media/libheadtracking/include/media/QuaternionUtil.h new file mode 100644 index 0000000000..a711d1784c --- /dev/null +++ b/media/libheadtracking/include/media/QuaternionUtil.h @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include <android-base/stringprintf.h> +#include <Eigen/Geometry> +#include <media/Pose.h> + +namespace android { +namespace media { + +/** + * Converts a rotation vector to an equivalent quaternion. + * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its + * magnitude the rotation angle (in radians) around that axis. + */ +Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector); + +/** + * Converts a quaternion to an equivalent rotation vector. + * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its + * magnitude the rotation angle (in radians) around that axis. + */ +Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion); + +/** + * Returns a quaternion representing a rotation around the X-axis with the given amount (in + * radians). + */ +Eigen::Quaternionf rotateX(float angle); + +/** + * Returns a quaternion representing a rotation around the Y-axis with the given amount (in + * radians). + */ +Eigen::Quaternionf rotateY(float angle); + +/** + * Returns a quaternion representing a rotation around the Z-axis with the given amount (in + * radians). + */ +Eigen::Quaternionf rotateZ(float angle); + +/** + * Compute separate roll, pitch, and yaw angles from a quaternion + * + * The roll, pitch, and yaw follow standard 3DOF virtual reality definitions + * with angles increasing counter-clockwise by the right hand rule. + * + * https://en.wikipedia.org/wiki/Six_degrees_of_freedom + * + * The roll, pitch, and yaw angles are calculated separately from the device frame + * rotation from the world frame. This is not to be confused with the + * intrinsic Euler xyz roll, pitch, yaw 'nautical' angles. + * + * The input quarternion is the active rotation that transforms the + * World/Stage frame to the Head/Screen frame. + * + * The input quaternion may come from two principal sensors: DEVICE and HEADSET + * and are interpreted as below. + * + * DEVICE SENSOR + * + * Android sensor stack assumes device coordinates along the x/y axis. + * + * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_rotation_vector: + * + * Looking down from the clouds. Android Device coordinate system (not used) + * DEVICE --> X (Y goes through top speaker towards the observer) + * | Z + * V + * USER + * + * Internally within this library, we transform the device sensor coordinate + * system by rotating the coordinate system around the X axis by -M_PI/2. + * This aligns the device coordinate system to match that of the + * Head Tracking sensor (see below), should the user be facing the device in + * natural (phone == portrait, tablet == ?) orientation. + * + * Looking down from the clouds. Spatializer device frame. + * Y + * ^ + * | + * DEVICE --> X (Z goes through top of the DEVICE towards the observer) + * + * USER + * + * The reference world frame is the device in vertical + * natural (phone == portrait) orientation with the top pointing straight + * up from the ground and the front-to-back direction facing north. + * The world frame is presumed locally fixed by magnetic and gravitational reference. + * + * HEADSET SENSOR + * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_head_tracker: + * + * Looking down from the clouds. Headset frame. + * Y + * ^ + * | + * USER ---> X + * (Z goes through the top of the USER head towards the observer) + * + * The Z axis goes from the neck to the top of the head, the X axis goes + * from the left ear to the right ear, the Y axis goes from the back of the + * head through the nose. + * + * Typically for a headset sensor, the X and Y axes have some arbitrary fixed + * reference. + * + * ROLL + * Roll is the counter-clockwise L/R motion around the Y axis (hence ZX plane). + * The right hand convention means the plane is ZX not XZ. + * This can be considered the azimuth angle in spherical coordinates + * with Pitch being the elevation angle. + * + * Roll has a range of -M_PI to M_PI radians. + * + * Rolling a device changes between portrait and landscape + * modes, and for L/R speakers will limit the amount of crosstalk cancellation. + * Roll increases as the device (if vertical like a coin) rolls from left to right. + * + * By this definition, Roll is less accurate when the device is flat + * on a table rather than standing on edge. + * When perfectly flat on the table, roll may report as 0, M_PI, or -M_PI + * due ambiguity / degeneracy of atan(0, 0) in this case (the device Y axis aligns with + * the world Z axis), but exactly flat rarely occurs. + * + * Roll for a headset is the angle the head is inclined to the right side + * (like sleeping). + * + * PITCH + * Pitch is the Surface normal Y deviation (along the Z axis away from the earth). + * This can be considered the elevation angle in spherical coordinates using + * Roll as the azimuth angle. + * + * Pitch for a device determines whether the device is "upright" or lying + * flat on the table (i.e. surface normal). Pitch is 0 when upright, decreases + * as the device top moves away from the user to -M_PI/2 when lying down face up. + * Pitch increases from 0 to M_PI/2 when the device tilts towards the user, and is + * M_PI/2 degrees when face down. + * + * Pitch for a headset is the user tilting the head/chin up or down, + * like nodding. + * + * Pitch has a range of -M_PI/2, M_PI/2 radians. + * + * YAW + * Yaw is the rotational component along the earth's XY tangential plane, + * where the Z axis points radially away from the earth. + * + * Yaw has a range of -M_PI to M_PI radians. If used for azimuth angle in + * spherical coordinates, the elevation angle may be derived from the Z axis. + * + * A positive increase means the phone is rotating from right to left + * when considered flat on the table. + * (headset: the user is rotating their head to look left). + * If left speaker or right earbud is pointing straight up or down, + * this value is imprecise and Pitch or Roll is a more useful measure. + * + * Yaw for a device is like spinning a vertical device along the axis of + * gravity, like spinning a coin. Yaw increases as the coin / device + * spins from right to left, rotating around the Z axis. + * + * Yaw for a headset is the user turning the head to look left or right + * like shaking the head for no. Yaw is the primary angle for a binaural + * head tracking device. + * + * @param q input active rotation Eigen quaternion. + * @param pitch output set to pitch if not nullptr + * @param roll output set to roll if not nullptr + * @param yaw output set to yaw if not nullptr + * @return (DEBUG==true) a debug string with intermediate transformation matrix + * interpreted as the unit basis vectors. + */ + +// DEBUG returns a debug string for analysis. +// We save unneeded rotation matrix computation by keeping the DEBUG option constexpr. +template <bool DEBUG = false> +auto quaternionToAngles(const Eigen::Quaternionf& q, float *pitch, float *roll, float *yaw) { + /* + * The quaternion here is the active rotation that transforms from the world frame + * to the device frame: the observer remains in the world frame, + * and the device (frame) moves. + * + * We use this to map device coordinates to world coordinates. + * + * Device: We transform the device right speaker (X == 1), top speaker (Z == 1), + * and surface inwards normal (Y == 1) positions to the world frame. + * + * Headset: We transform the headset right bud (X == 1), top (Z == 1) and + * nose normal (Y == 1) positions to the world frame. + * + * This is the same as the world frame coordinates of the + * unit device vector in the X dimension (ux), + * unit device vector in the Y dimension (uy), + * unit device vector in the Z dimension (uz). + * + * Rather than doing the rotation on unit vectors individually, + * one can simply use the columns of the rotation matrix of + * the world-to-body quaternion, so the computation is exceptionally fast. + * + * Furthermore, Eigen inlines the "toRotationMatrix" method + * and we rely on unused expression removal for efficiency + * and any elements not used should not be computed. + * + * Side note: For applying a rotation to several points, + * it is more computationally efficient to extract and + * use the rotation matrix form than the quaternion. + * So use of the rotation matrix is good for many reasons. + */ + const auto rotation = q.toRotationMatrix(); + + /* + * World location of unit vector right speaker assuming the phone is situated + * natural (phone == portrait) mode. + * (headset: right bud). + * + * auto ux = q.rotation() * Eigen::Vector3f{1.f, 0.f, 0.f}; + * = rotation.col(0); + */ + [[maybe_unused]] const auto ux_0 = rotation.coeff(0, 0); + [[maybe_unused]] const auto ux_1 = rotation.coeff(1, 0); + [[maybe_unused]] const auto ux_2 = rotation.coeff(2, 0); + + [[maybe_unused]] std::string coordinates; + if constexpr (DEBUG) { + base::StringAppendF(&coordinates, "ux: %f %f %f", ux_0, ux_1, ux_2); + } + + /* + * World location of screen-inwards normal assuming the phone is situated + * in natural (phone == portrait) mode. + * (headset: user nose). + * + * auto uy = q.rotation() * Eigen::Vector3f{0.f, 1.f, 0.f}; + * = rotation.col(1); + */ + [[maybe_unused]] const auto uy_0 = rotation.coeff(0, 1); + [[maybe_unused]] const auto uy_1 = rotation.coeff(1, 1); + [[maybe_unused]] const auto uy_2 = rotation.coeff(2, 1); + if constexpr (DEBUG) { + base::StringAppendF(&coordinates, "uy: %f %f %f", uy_0, uy_1, uy_2); + } + + /* + * World location of unit vector top speaker. + * (headset: top of head). + * auto uz = q.rotation() * Eigen::Vector3f{0.f, 0.f, 1.f}; + * = rotation.col(2); + */ + [[maybe_unused]] const auto uz_0 = rotation.coeff(0, 2); + [[maybe_unused]] const auto uz_1 = rotation.coeff(1, 2); + [[maybe_unused]] const auto uz_2 = rotation.coeff(2, 2); + if constexpr (DEBUG) { + base::StringAppendF(&coordinates, "uz: %f %f %f", uz_0, uz_1, uz_2); + } + + // pitch computed from nose world Z coordinate; + // hence independent of rotation around world Z. + if (pitch != nullptr) { + *pitch = asin(std::clamp(uy_2, -1.f, 1.f)); + } + + // roll computed from head/right world Z coordinate; + // hence independent of rotation around world Z. + if (roll != nullptr) { + // atan2 takes care of implicit scale normalization of Z, X. + *roll = -atan2(ux_2, uz_2); + } + + // yaw computed from right ear angle projected onto world XY plane + // where world Z == 0. This is the rotation around world Z. + if (yaw != nullptr) { + // atan2 takes care of implicit scale normalization of X, Y. + *yaw = atan2(ux_1, ux_0); + } + + if constexpr (DEBUG) { + return coordinates; + } +} + +} // namespace media +} // namespace android diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h index 291cea364d..51b83d8379 100644 --- a/media/libheadtracking/include/media/Twist.h +++ b/media/libheadtracking/include/media/Twist.h @@ -66,6 +66,9 @@ class Twist3f { return Twist3f(mTranslationalVelocity / s, mRotationalVelocity / s); } + // Convert instance to a string representation. + std::string toString() const; + private: Eigen::Vector3f mTranslationalVelocity; Eigen::Vector3f mRotationalVelocity; diff --git a/media/libheadtracking/include/media/VectorRecorder.h b/media/libheadtracking/include/media/VectorRecorder.h new file mode 100644 index 0000000000..4103a7d036 --- /dev/null +++ b/media/libheadtracking/include/media/VectorRecorder.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include <android-base/stringprintf.h> +#include <android-base/thread_annotations.h> +#include <audio_utils/SimpleLog.h> +#include <chrono> +#include <math.h> +#include <mutex> +#include <vector> + +namespace android::media { + +/** + * VectorRecorder records a vector of floats computing the average, max, and min + * over given time periods. + * + * The class is thread-safe. + */ +class VectorRecorder { + public: + /** + * @param vectorSize is the size of the vector input. + * If the input does not match this size, it is ignored. + * @param threshold is the time interval we bucket for averaging. + * @param maxLogLine is the number of lines we log. At this + * threshold, the oldest line will expire when the new line comes in. + * @param delimiterIdx is an optional array of delimiter indices that + * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then + * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27]. + * @param formatString is the sprintf format string for the double converted data + * to use. + */ + VectorRecorder( + size_t vectorSize, std::chrono::duration<double> threshold, int maxLogLine, + std::vector<size_t> delimiterIdx = {}, + const std::string_view formatString = {}) + : mVectorSize(vectorSize) + , mDelimiterIdx(std::move(delimiterIdx)) + , mFormatString(formatString) + , mRecordLog(maxLogLine) + , mRecordThreshold(threshold) + { + resetRecord_l(); // OK to call - we're in the constructor. + } + + /** Convert recorded vector data to string with level indentation */ + std::string toString(size_t indent) const; + + /** + * @brief Record a vector of floats. + * + * @param record a vector of floats. + */ + void record(const std::vector<float>& record); + + /** + * Format vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27]. + * + * @param delimiterIdx is an optional array of delimiter indices that + * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then + * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27]. + * @param formatString is the sprintf format string for the double converted data + * to use. + */ + template <typename T> + static std::string toString(const std::vector<T>& record, + const std::vector<size_t>& delimiterIdx = {}, + const char * const formatString = nullptr) { + if (record.size() == 0) { + return "[]"; + } + + std::string ss = "["; + auto nextDelimiter = delimiterIdx.begin(); + for (size_t i = 0; i < record.size(); ++i) { + if (i > 0) { + if (nextDelimiter != delimiterIdx.end() + && *nextDelimiter <= i) { + ss.append(" : "); + ++nextDelimiter; + } else { + ss.append(", "); + } + } + if (formatString != nullptr && *formatString) { + base::StringAppendF(&ss, formatString, static_cast<double>(record[i])); + } else { + base::StringAppendF(&ss, "%5.2lf", static_cast<double>(record[i])); + } + } + ss.append("]"); + return ss; + } + + private: + static constexpr int mMaxLocalLogLine = 10; + + const size_t mVectorSize; + const std::vector<size_t> mDelimiterIdx; + const std::string mFormatString; + + // Local log for historical vector data. + // Locked internally, so does not need mutex below. + SimpleLog mRecordLog{mMaxLocalLogLine}; + + std::mutex mLock; + + // Time threshold to record vectors in the local log. + // Vector data will be recorded into log at least every mRecordThreshold. + std::chrono::duration<double> mRecordThreshold GUARDED_BY(mLock); + + // Number of seconds since first sample in mSum. + std::chrono::duration<double> mNumberOfSecondsSinceFirstSample GUARDED_BY(mLock); + + // Timestamp of first sample recorded in mSum. + std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp GUARDED_BY(mLock); + + // Number of samples in mSum. + size_t mNumberOfSamples GUARDED_BY(mLock) = 0; + + std::vector<double> mSum GUARDED_BY(mLock); + std::vector<float> mMax GUARDED_BY(mLock); + std::vector<float> mMin GUARDED_BY(mLock); + + // Computes mNumberOfSecondsSinceFirstSample, returns true if time to record. + bool shouldRecordLog_l() REQUIRES(mLock); + + // Resets the running mNumberOfSamples, mSum, mMax, mMin. + void resetRecord_l() REQUIRES(mLock); + + // Convert mSum to an average. + void sumToAverage_l() REQUIRES(mLock); +}; // VectorRecorder + +} // namespace android::media diff --git a/media/libmedia/xsd/api/current.txt b/media/libmedia/xsd/api/current.txt index 73b5f8ddac..35aa21342a 100644 --- a/media/libmedia/xsd/api/current.txt +++ b/media/libmedia/xsd/api/current.txt @@ -47,7 +47,9 @@ package media.profiles { method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile_optional(); method public java.util.List<media.profiles.CamcorderProfiles.ImageDecodingOptional> getImageDecoding_optional(); method public java.util.List<media.profiles.CamcorderProfiles.ImageEncodingOptional> getImageEncoding_optional(); + method public int getStartOffsetMs(); method public void setCameraId(int); + method public void setStartOffsetMs(int); } public static class CamcorderProfiles.ImageDecodingOptional { diff --git a/media/libmedia/xsd/media_profiles.xsd b/media/libmedia/xsd/media_profiles.xsd index 9664456e09..dcc302897b 100644 --- a/media/libmedia/xsd/media_profiles.xsd +++ b/media/libmedia/xsd/media_profiles.xsd @@ -49,6 +49,7 @@ </xs:element> </xs:choice> <xs:attribute name="cameraId" type="xs:int"/> + <xs:attribute name="startOffsetMs" type="xs:int"/> </xs:complexType> <xs:complexType name="EncoderProfile"> <xs:sequence> diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp index 97c59e596a..78f9b11769 100644 --- a/media/libmediaplayerservice/MediaPlayerService.cpp +++ b/media/libmediaplayerservice/MediaPlayerService.cpp @@ -1836,7 +1836,6 @@ MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, } else { mAttributes = NULL; } - setMinBufferCount(); } diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index 534027552a..b0f0cb1b31 100644 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -152,7 +152,8 @@ status_t CameraSource::isCameraAvailable( if (camera == 0) { mCamera = Camera::connect(cameraId, clientName, clientUid, clientPid, - /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/true); + /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/false, + /*forceSlowJpegMode*/false); if (mCamera == 0) return -EBUSY; mCameraFlags &= ~FLAGS_HOT_CAMERA; } else { diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp index 2b45f2d16d..5b39618ad7 100644 --- a/media/libstagefright/NuMediaExtractor.cpp +++ b/media/libstagefright/NuMediaExtractor.cpp @@ -639,9 +639,11 @@ status_t NuMediaExtractor::appendVorbisNumPageSamples( numPageSamples = -1; } + // insert, including accounting for the space used. memcpy((uint8_t *)buffer->data() + mbuf->range_length(), &numPageSamples, sizeof(numPageSamples)); + buffer->setRange(buffer->offset(), buffer->size() + sizeof(numPageSamples)); uint32_t type; const void *data; @@ -690,6 +692,8 @@ status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) { ssize_t minIndex = fetchAllTrackSamples(); + buffer->setRange(0, 0); // start with an empty buffer + if (minIndex < 0) { return ERROR_END_OF_STREAM; } @@ -705,25 +709,25 @@ status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) { sampleSize += sizeof(int32_t); } + // capacity() is ok since we cleared out the buffer if (buffer->capacity() < sampleSize) { return -ENOMEM; } + const size_t srclen = it->mBuffer->range_length(); const uint8_t *src = (const uint8_t *)it->mBuffer->data() + it->mBuffer->range_offset(); - memcpy((uint8_t *)buffer->data(), src, it->mBuffer->range_length()); + memcpy((uint8_t *)buffer->data(), src, srclen); + buffer->setRange(0, srclen); status_t err = OK; if (info->mTrackFlags & kIsVorbis) { + // adjusts range when it inserts the extra bits err = appendVorbisNumPageSamples(it->mBuffer, buffer); } - if (err == OK) { - buffer->setRange(0, sampleSize); - } - return err; } diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp index 1f569efe0e..f526e056d4 100644 --- a/media/libstagefright/SurfaceUtils.cpp +++ b/media/libstagefright/SurfaceUtils.cpp @@ -222,6 +222,11 @@ status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull * static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->allowAllocation(true); + // In nonblocking mode(timetout = 0), native_window_dequeue_buffer_and_wait() + // can fail with timeout. Changing to blocking mode will ensure that dequeue + // does not timeout. + static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->setDequeueTimeout(-1); + err = nativeWindow->query(nativeWindow, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs); if (err != NO_ERROR) { diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp index 4b4f65f13c..4df79efea5 100644 --- a/media/libstagefright/colorconversion/ColorConverter.cpp +++ b/media/libstagefright/colorconversion/ColorConverter.cpp @@ -340,6 +340,14 @@ size_t ColorConverter::BitmapParams::cropHeight() const { return mCropBottom - mCropTop + 1; } +bool ColorConverter::BitmapParams::isValid() const { + if (!((mStride & 1) == 0 // stride must be even + && mStride >= mBpp * cropWidth())) { + return false; + } + return true; +} + status_t ColorConverter::convert( const void *srcBits, size_t srcWidth, size_t srcHeight, size_t srcStride, @@ -359,9 +367,11 @@ status_t ColorConverter::convert( dstWidth, dstHeight, dstStride, dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat); - if (!((src.mCropLeft & 1) == 0 - && src.cropWidth() == dst.cropWidth() - && src.cropHeight() == dst.cropHeight())) { + if (!(src.isValid() + && dst.isValid() + && (src.mCropLeft & 1) == 0 + && src.cropWidth() == dst.cropWidth() + && src.cropHeight() == dst.cropHeight())) { return ERROR_UNSUPPORTED; } @@ -463,6 +473,7 @@ const struct ColorConverter::Coeffs *ColorConverter::getMatrix() const { } } +// Interleaved YUV 422 CbYCrY to RGB565 status_t ColorConverter::convertCbYCrY( const BitmapParams &src, const BitmapParams &dst) { // XXX Untested @@ -485,10 +496,10 @@ status_t ColorConverter::convertCbYCrY( + dst.mCropTop * dst.mWidth + dst.mCropLeft; const uint8_t *src_ptr = (const uint8_t *)src.mBits - + (src.mCropTop * dst.mWidth + src.mCropLeft) * 2; + + (src.mCropTop * src.mWidth + src.mCropLeft) * 2; for (size_t y = 0; y < src.cropHeight(); ++y) { - for (size_t x = 0; x < src.cropWidth(); x += 2) { + for (size_t x = 0; x < src.cropWidth() - 1; x += 2) { signed y1 = (signed)src_ptr[2 * x + 1] - _c16; signed y2 = (signed)src_ptr[2 * x + 3] - _c16; signed u = (signed)src_ptr[2 * x] - 128; @@ -658,13 +669,15 @@ getReadFromSrc(OMX_COLOR_FORMATTYPE srcFormat) { *u = ((uint8_t*)src_u)[x / 2] - 128; *v = ((uint8_t*)src_v)[x / 2] - 128; }; + // this format stores 10 bits content with 16 bits + // converting it to 8 bits src case OMX_COLOR_FormatYUV420Planar16: return [](void *src_y, void *src_u, void *src_v, size_t x, signed *y1, signed *y2, signed *u, signed *v) { - *y1 = (signed)(((uint16_t*)src_y)[x] >> 2); - *y2 = (signed)(((uint16_t*)src_y)[x + 1] >> 2); - *u = (signed)(((uint16_t*)src_u)[x / 2] >> 2) - 128; - *v = (signed)(((uint16_t*)src_v)[x / 2] >> 2) - 128; + *y1 = (uint8_t)(((uint16_t*)src_y)[x] >> 2); + *y2 = (uint8_t)(((uint16_t*)src_y)[x + 1] >> 2); + *u = (uint8_t)(((uint16_t*)src_u)[x / 2] >> 2) - 128; + *v = (uint8_t)(((uint16_t*)src_v)[x / 2] >> 2) - 128; }; default: TRESPASS(); @@ -1122,46 +1135,25 @@ status_t ColorConverter::convertYUV420Planar16ToY410( status_t ColorConverter::convertQCOMYUV420SemiPlanar( const BitmapParams &src, const BitmapParams &dst) { - const uint8_t *src_y = - (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft; - - const uint8_t *src_u = - (const uint8_t *)src_y + src.mWidth * src.mHeight - + src.mCropTop * src.mWidth + src.mCropLeft; - /* QCOMYUV420SemiPlanar is NV21, while MediaCodec uses NV12 */ return convertYUV420SemiPlanarBase( - src, dst, src_y, src_u, src.mWidth /* row_inc */, true /* isNV21 */); + src, dst, src.mWidth /* row_inc */, true /* isNV21 */); } status_t ColorConverter::convertTIYUV420PackedSemiPlanar( const BitmapParams &src, const BitmapParams &dst) { - const uint8_t *src_y = - (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft; - - const uint8_t *src_u = - (const uint8_t *)src_y + src.mWidth * (src.mHeight - src.mCropTop / 2); - return convertYUV420SemiPlanarBase( - src, dst, src_y, src_u, src.mWidth /* row_inc */); + src, dst, src.mWidth /* row_inc */); } status_t ColorConverter::convertYUV420SemiPlanar( const BitmapParams &src, const BitmapParams &dst) { - const uint8_t *src_y = - (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft; - - const uint8_t *src_u = - (const uint8_t *)src.mBits + src.mHeight * src.mStride + - (src.mCropTop / 2) * src.mStride + src.mCropLeft; - return convertYUV420SemiPlanarBase( - src, dst, src_y, src_u, src.mStride /* row_inc */); + src, dst, src.mStride /* row_inc */); } -status_t ColorConverter::convertYUV420SemiPlanarBase( - const BitmapParams &src, const BitmapParams &dst, - const uint8_t *src_y, const uint8_t *src_u, size_t row_inc, bool isNV21) { +status_t ColorConverter::convertYUV420SemiPlanarBase(const BitmapParams &src, + const BitmapParams &dst, size_t row_inc, bool isNV21) { const struct Coeffs *matrix = getMatrix(); if (!matrix) { return ERROR_UNSUPPORTED; @@ -1179,6 +1171,12 @@ status_t ColorConverter::convertYUV420SemiPlanarBase( uint16_t *dst_ptr = (uint16_t *)((uint8_t *) dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp); + const uint8_t *src_y = + (const uint8_t *)src.mBits + src.mCropTop * row_inc + src.mCropLeft; + + const uint8_t *src_u = (const uint8_t *)src.mBits + src.mHeight * row_inc + + (src.mCropTop / 2) * row_inc + src.mCropLeft; + for (size_t y = 0; y < src.cropHeight(); ++y) { for (size_t x = 0; x < src.cropWidth(); x += 2) { signed y1 = (signed)src_y[x] - _c16; diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h index 7a05f0039e..da3267e5d1 100644 --- a/media/libstagefright/include/media/stagefright/ColorConverter.h +++ b/media/libstagefright/include/media/stagefright/ColorConverter.h @@ -74,6 +74,8 @@ private: size_t cropWidth() const; size_t cropHeight() const; + bool isValid() const; + void *mBits; OMX_COLOR_FORMATTYPE mColorFormat; size_t mWidth, mHeight; @@ -121,7 +123,7 @@ private: status_t convertYUV420SemiPlanarBase( const BitmapParams &src, const BitmapParams &dst, - const uint8_t *src_y, const uint8_t *src_u, size_t row_inc, bool isNV21 = false); + size_t row_inc, bool isNV21 = false); status_t convertTIYUV420PackedSemiPlanar( const BitmapParams &src, const BitmapParams &dst); diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h index 9209038260..632dddb529 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h @@ -302,6 +302,10 @@ public: return mActiveClients; } + // Returns 0 if not all active clients have the same exclusive preferred device + // or the number of active clients with the same exclusive preferred device + size_t sameExclusivePreferredDevicesCount() const; + bool useHwGain() const { return !devices().isEmpty() ? devices().itemAt(0)->hasGainController() : false; diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp index fba3491e7a..76ccba6327 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp @@ -237,6 +237,27 @@ TrackClientVector AudioOutputDescriptor::clientsList(bool activeOnly, product_st return clients; } +size_t AudioOutputDescriptor::sameExclusivePreferredDevicesCount() const +{ + audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE; + size_t count = 0; + for (const auto &client : getClientIterable()) { + if (client->active()) { + if (!(client->hasPreferredDevice() && + client->isPreferredDeviceForExclusiveUse())) { + return 0; + } + if (deviceId == AUDIO_PORT_HANDLE_NONE) { + deviceId = client->preferredDeviceId(); + } else if (deviceId != client->preferredDeviceId()) { + return 0; + } + count++; + } + } + return count; +} + bool AudioOutputDescriptor::isAnyActive(VolumeSource volumeSourceToIgnore) const { return std::find_if(begin(mActiveClients), end(mActiveClients), diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h index 5378f645f4..f40ab1c865 100644 --- a/services/audiopolicy/engine/common/include/VolumeGroup.h +++ b/services/audiopolicy/engine/common/include/VolumeGroup.h @@ -39,7 +39,7 @@ public: VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; } void addSupportedAttributes(const audio_attributes_t &attr); - AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); } + AttributesVector getSupportedAttributes() const; void addSupportedStream(audio_stream_type_t stream); StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); } diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp index e1898070cb..f5ffbba6ca 100644 --- a/services/audiopolicy/engine/common/src/VolumeGroup.cpp +++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp @@ -37,6 +37,17 @@ VolumeGroup::VolumeGroup(const std::string &name, int indexMin, int indexMax) : { } +// Used for introspection, e.g. JAVA +AttributesVector VolumeGroup::getSupportedAttributes() const +{ + AttributesVector supportedAttributes = {}; + for (auto &aa : mGroupVolumeCurves.getAttributes()) { + aa.source = AUDIO_SOURCE_INVALID; + supportedAttributes.push_back(aa); + } + return supportedAttributes; +} + void VolumeGroup::dump(String8 *dst, int spaces) const { dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId); diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp index 8d45d0c544..9eba86b468 100644 --- a/services/audiopolicy/enginedefault/src/Engine.cpp +++ b/services/audiopolicy/enginedefault/src/Engine.cpp @@ -274,14 +274,19 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, break; case STRATEGY_PHONE: { - devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID); - if (!devices.isEmpty()) break; + // TODO(b/243670205): remove this logic that gives preference to last removable devices + // once a UX decision has been made if (getDpConnAndAllowedForVoice() && isInCall()) { devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_AUX_DIGITAL); if (!devices.isEmpty()) break; } devices = availableOutputDevices.getFirstDevicesFromTypes( - getLastRemovableMediaDevices(GROUP_NONE, {AUDIO_DEVICE_OUT_BLE_HEADSET})); + getLastRemovableMediaDevices(GROUP_NONE, { + // excluding HEARING_AID and BLE_HEADSET because Dialer uses + // setCommunicationDevice to select them explicitly + AUDIO_DEVICE_OUT_HEARING_AID, + AUDIO_DEVICE_OUT_BLE_HEADSET + })); if (!devices.isEmpty()) break; devices = availableOutputDevices.getFirstDevicesFromTypes({ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_EARPIECE}); @@ -296,7 +301,9 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, if ((strategy == STRATEGY_SONIFICATION) || (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) { - devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER); + // favor dock over speaker when available + devices = availableOutputDevices.getFirstDevicesFromTypes({ + AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_SPEAKER}); } // if SCO headset is connected and we are told to use it, play ringtone over diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp index 7c326c8229..eaa21b5c20 100644 --- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp +++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp @@ -2223,6 +2223,10 @@ status_t AudioPolicyManager::startOutput(audio_port_handle_t portId) outputDesc->stop(); return status; } + if (client->hasPreferredDevice()) { + // playback activity with preferred device impacts routing occurred, inform upper layers + mpClientInterface->onRoutingUpdated(); + } if (delayMs != 0) { usleep(delayMs * 1000); } @@ -2305,8 +2309,7 @@ status_t AudioPolicyManager::startSource(const sp<SwAudioOutputDescriptor>& outp outputDesc->setClientActive(client, true); if (client->hasPreferredDevice(true)) { - if (outputDesc->clientsList(true /*activeOnly*/).size() == 1 && - client->isPreferredDeviceForExclusiveUse()) { + if (outputDesc->sameExclusivePreferredDevicesCount() > 0) { // Preferred device may be exclusive, use only if no other active clients on this output devices = DeviceVector( mAvailableOutputDevices.getDeviceFromId(client->preferredDeviceId())); @@ -2468,6 +2471,11 @@ status_t AudioPolicyManager::stopOutput(audio_port_handle_t portId) } sp<TrackClientDescriptor> client = outputDesc->getClient(portId); + if (client->hasPreferredDevice(true)) { + // playback activity with preferred device impacts routing occurred, inform upper layers + mpClientInterface->onRoutingUpdated(); + } + ALOGV("stopOutput() output %d, stream %d, session %d", outputDesc->mIoHandle, client->stream(), client->session()); @@ -2505,7 +2513,8 @@ status_t AudioPolicyManager::stopSource(const sp<SwAudioOutputDescriptor>& outpu } } bool forceDeviceUpdate = false; - if (client->hasPreferredDevice(true)) { + if (client->hasPreferredDevice(true) && + outputDesc->sameExclusivePreferredDevicesCount() < 2) { checkStrategyRoute(client->strategy(), AUDIO_IO_HANDLE_NONE); forceDeviceUpdate = true; } diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp index b79bcdba95..c33dba8096 100644 --- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp +++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp @@ -302,7 +302,8 @@ Status AudioPolicyService::getOutput(AudioStreamType streamAidl, int32_t* _aidl_ audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS( aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl)); - if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) { + if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT + && stream != AUDIO_STREAM_ASSISTANT && stream != AUDIO_STREAM_CALL_ASSISTANT) { *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS( legacy2aidl_audio_io_handle_t_int32_t(AUDIO_IO_HANDLE_NONE)); return Status::ok(); diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp index c3f9caa0a3..9d2beb6bd1 100644 --- a/services/audiopolicy/service/Spatializer.cpp +++ b/services/audiopolicy/service/Spatializer.cpp @@ -20,6 +20,7 @@ //#define LOG_NDEBUG 0 #include <utils/Log.h> +#include <algorithm> #include <inttypes.h> #include <limits.h> #include <stdint.h> @@ -33,6 +34,7 @@ #include <media/stagefright/foundation/AHandler.h> #include <media/stagefright/foundation/AMessage.h> #include <media/MediaMetricsItem.h> +#include <media/QuaternionUtil.h> #include <media/ShmemCompat.h> #include <mediautils/SchedulingPolicyService.h> #include <mediautils/ServiceUtilities.h> @@ -75,6 +77,34 @@ static audio_channel_mask_t getMaxChannelMask( return maxMask; } +static std::vector<float> recordFromTranslationRotationVector( + const std::vector<float>& trVector) { + auto headToStageOpt = Pose3f::fromVector(trVector); + if (!headToStageOpt) return {}; + + const auto stageToHead = headToStageOpt.value().inverse(); + const auto stageToHeadTranslation = stageToHead.translation(); + constexpr float RAD_TO_DEGREE = 180.f / M_PI; + std::vector<float> record{ + stageToHeadTranslation[0], stageToHeadTranslation[1], stageToHeadTranslation[2], + 0.f, 0.f, 0.f}; + media::quaternionToAngles(stageToHead.rotation(), &record[3], &record[4], &record[5]); + record[3] *= RAD_TO_DEGREE; + record[4] *= RAD_TO_DEGREE; + record[5] *= RAD_TO_DEGREE; + return record; +} + +template<typename T> +static constexpr const T& safe_clamp(const T& value, const T& low, const T& high) { + if constexpr (std::is_floating_point_v<T>) { + return value != value /* constexpr isnan */ + ? low : std::clamp(value, low, high); + } else /* constexpr */ { + return std::clamp(value, low, high); + } +} + // --------------------------------------------------------------------------- class Spatializer::EngineCallbackHandler : public AHandler { @@ -185,41 +215,6 @@ const std::vector<const char *> Spatializer::sHeadPoseKeys = { }; // --------------------------------------------------------------------------- - -// Convert recorded sensor data to string with level indentation. -std::string Spatializer::HeadToStagePoseRecorder::toString(unsigned level) const { - std::string prefixSpace(level, ' '); - return mPoseRecordLog.dumpToString((prefixSpace + " ").c_str(), Spatializer::mMaxLocalLogLine); -} - -// Compute sensor data, record into local log when it is time. -void Spatializer::HeadToStagePoseRecorder::record(const std::vector<float>& headToStage) { - if (headToStage.size() != mPoseVectorSize) return; - - if (mNumOfSampleSinceLastRecord++ == 0) { - mFirstSampleTimestamp = std::chrono::steady_clock::now(); - } - // if it's time, do record and reset. - if (shouldRecordLog()) { - poseSumToAverage(); - mPoseRecordLog.log( - "mean: %s, min: %s, max %s, calculated %d samples in %0.4f second(s)", - Spatializer::toString<double>(mPoseRadianSum, true /* radianToDegree */).c_str(), - Spatializer::toString<float>(mMinPoseAngle, true /* radianToDegree */).c_str(), - Spatializer::toString<float>(mMaxPoseAngle, true /* radianToDegree */).c_str(), - mNumOfSampleSinceLastRecord, mNumOfSecondsSinceLastRecord.count()); - resetRecord(); - } - // update stream average. - for (int i = 0; i < mPoseVectorSize; i++) { - mPoseRadianSum[i] += headToStage[i]; - mMaxPoseAngle[i] = std::max(mMaxPoseAngle[i], headToStage[i]); - mMinPoseAngle[i] = std::min(mMinPoseAngle[i], headToStage[i]); - } - return; -} - -// --------------------------------------------------------------------------- sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) { sp<Spatializer> spatializer; @@ -590,7 +585,8 @@ Status Spatializer::setGlobalTransform(const std::vector<float>& screenToStage) } std::lock_guard lock(mLock); if (mPoseController != nullptr) { - mLocalLog.log("%s with screenToStage %s", __func__, toString<float>(screenToStage).c_str()); + mLocalLog.log("%s with screenToStage %s", __func__, + media::VectorRecorder::toString<float>(screenToStage).c_str()); mPoseController->setScreenToStagePose(maybePose.value()); } return Status::ok(); @@ -653,28 +649,48 @@ Status Spatializer::setScreenSensor(int sensorHandle) { Status Spatializer::setDisplayOrientation(float physicalToLogicalAngle) { ALOGV("%s physicalToLogicalAngle %f", __func__, physicalToLogicalAngle); - if (!mSupportsHeadTracking) { - return binderStatusFromStatusT(INVALID_OPERATION); - } - std::lock_guard lock(mLock); - mDisplayOrientation = physicalToLogicalAngle; mLocalLog.log("%s with %f", __func__, physicalToLogicalAngle); + const float angle = safe_clamp(physicalToLogicalAngle, 0.f, (float)(2. * M_PI)); + // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI. + ALOGI_IF(angle != physicalToLogicalAngle, + "%s: clamping %f to %f", __func__, physicalToLogicalAngle, angle); + std::lock_guard lock(mLock); + mDisplayOrientation = angle; if (mPoseController != nullptr) { - mPoseController->setDisplayOrientation(mDisplayOrientation); + // This turns on the rate-limiter. + mPoseController->setDisplayOrientation(angle); } if (mEngine != nullptr) { setEffectParameter_l( - SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{physicalToLogicalAngle}); + SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{angle}); } return Status::ok(); } Status Spatializer::setHingeAngle(float hingeAngle) { - std::lock_guard lock(mLock); ALOGV("%s hingeAngle %f", __func__, hingeAngle); + mLocalLog.log("%s with %f", __func__, hingeAngle); + const float angle = safe_clamp(hingeAngle, 0.f, (float)(2. * M_PI)); + // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI. + ALOGI_IF(angle != hingeAngle, + "%s: clamping %f to %f", __func__, hingeAngle, angle); + std::lock_guard lock(mLock); + mHingeAngle = angle; + if (mEngine != nullptr) { + setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{angle}); + } + return Status::ok(); +} + +Status Spatializer::setFoldState(bool folded) { + ALOGV("%s foldState %d", __func__, (int)folded); + mLocalLog.log("%s with %d", __func__, (int)folded); + std::lock_guard lock(mLock); + mFoldedState = folded; if (mEngine != nullptr) { - mLocalLog.log("%s with %f", __func__, hingeAngle); - setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{hingeAngle}); + // we don't suppress multiple calls with the same folded state - that's + // done at the caller. + setEffectParameter_l(SPATIALIZER_PARAM_FOLD_STATE, std::vector<uint8_t>{mFoldedState}); } return Status::ok(); } @@ -771,8 +787,9 @@ void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) { callback = mHeadTrackingCallback; if (mEngine != nullptr) { setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage); - mPoseRecorder.record(headToStage); - mPoseDurableRecorder.record(headToStage); + const auto record = recordFromTranslationRotationVector(headToStage); + mPoseRecorder.record(record); + mPoseDurableRecorder.record(record); } } @@ -823,8 +840,7 @@ void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) { } } callback = mHeadTrackingCallback; - mLocalLog.log("%s: %s, spatializerMode %s", __func__, media::toString(mode).c_str(), - media::toString(spatializerMode).c_str()); + mLocalLog.log("%s: updating mode to %s", __func__, media::toString(mode).c_str()); } if (callback != nullptr) { callback->onHeadTrackingModeChanged(spatializerMode); @@ -878,6 +894,14 @@ status_t Spatializer::attachOutput(audio_io_handle_t output, size_t numActiveTra checkSensorsState_l(); } callback = mSpatializerCallback; + + // Restore common effect state. + setEffectParameter_l(SPATIALIZER_PARAM_DISPLAY_ORIENTATION, + std::vector<float>{mDisplayOrientation}); + setEffectParameter_l(SPATIALIZER_PARAM_FOLD_STATE, + std::vector<uint8_t>{mFoldedState}); + setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, + std::vector<float>{mHingeAngle}); } if (outputChanged && callback != nullptr) { @@ -1049,8 +1073,7 @@ void Spatializer::postFramesProcessedMsg(int frames) { } std::string Spatializer::toString(unsigned level) const { - std::string prefixSpace; - prefixSpace.append(level, ' '); + std::string prefixSpace(level, ' '); std::string ss = prefixSpace + "Spatializer:\n"; bool needUnlock = false; @@ -1106,14 +1129,15 @@ std::string Spatializer::toString(unsigned level) const { // PostController dump. if (mPoseController != nullptr) { - ss += mPoseController->toString(level + 1); - ss.append(prefixSpace + - "Sensor data format - [rx, ry, rz, vx, vy, vz] (units-degree, " - "r-transform, v-angular velocity, x-pitch, y-roll, z-yaw):\n"); - ss.append(prefixSpace + " PerMinuteHistory:\n"); - ss += mPoseDurableRecorder.toString(level + 1); - ss.append(prefixSpace + " PerSecondHistory:\n"); - ss += mPoseRecorder.toString(level + 1); + ss.append(mPoseController->toString(level + 1)) + .append(prefixSpace) + .append("Pose (active stage-to-head) [tx, ty, tz : pitch, roll, yaw]:\n") + .append(prefixSpace) + .append(" PerMinuteHistory:\n") + .append(mPoseDurableRecorder.toString(level + 3)) + .append(prefixSpace) + .append(" PerSecondHistory:\n") + .append(mPoseRecorder.toString(level + 3)); } else { ss.append(prefixSpace).append("SpatializerPoseController not exist\n"); } diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h index 7415b1ef2e..b101ae6b7e 100644 --- a/services/audiopolicy/service/Spatializer.h +++ b/services/audiopolicy/service/Spatializer.h @@ -26,6 +26,7 @@ #include <audio_utils/SimpleLog.h> #include <math.h> #include <media/AudioEffect.h> +#include <media/VectorRecorder.h> #include <media/audiohal/EffectHalInterface.h> #include <media/stagefright/foundation/ALooper.h> #include <system/audio_effects/effect_spatializer.h> @@ -117,6 +118,7 @@ class Spatializer : public media::BnSpatializer, binder::Status setScreenSensor(int sensorHandle) override; binder::Status setDisplayOrientation(float physicalToLogicalAngle) override; binder::Status setHingeAngle(float hingeAngle) override; + binder::Status setFoldState(bool folded) override; binder::Status getSupportedModes(std::vector<media::SpatializationMode>* modes) override; binder::Status registerHeadTrackingCallback( const sp<media::ISpatializerHeadTrackingCallback>& callback) override; @@ -170,30 +172,6 @@ class Spatializer : public media::BnSpatializer, return result.has_value() ? media::toString(*result) : "unknown_latency_mode"; } - /** - * Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27]. - */ - template <typename T> - static std::string toString(const std::vector<T>& vec, bool radianToDegree = false) { - if (vec.size() == 0) { - return "[]"; - } - - std::string ss = "["; - for (auto f = vec.begin(); f != vec.end(); ++f) { - if (f != vec.begin()) { - ss .append(", "); - } - if (radianToDegree) { - base::StringAppendF(&ss, "%0.2f", HeadToStagePoseRecorder::getDegreeWithRadian(*f)); - } else { - base::StringAppendF(&ss, "%f", *f); - } - } - ss.append("]"); - return ss; - }; - // If the Spatializer is not created, we send the status for metrics purposes. // OK: Spatializer not expected to be created. // NO_INIT: Spatializer creation failed. @@ -397,8 +375,13 @@ private: int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR; /** Last display orientation received */ - static constexpr float kDisplayOrientationInvalid = 1000; - float mDisplayOrientation GUARDED_BY(mLock) = kDisplayOrientationInvalid; + float mDisplayOrientation GUARDED_BY(mLock) = 0.f; // aligned to natural up orientation. + + /** Last folded state */ + bool mFoldedState GUARDED_BY(mLock) = false; // foldable: true means folded. + + /** Last hinge angle */ + float mHingeAngle GUARDED_BY(mLock) = 0.f; // foldable: 0.f is closed, M_PI flat open. std::vector<media::SpatializationLevel> mLevels; std::vector<media::SpatializerHeadTrackingMode> mHeadTrackingModes; @@ -425,92 +408,12 @@ private: * @brief Calculate and record sensor data. * Dump to local log with max/average pose angle every mPoseRecordThreshold. */ - class HeadToStagePoseRecorder { - public: - HeadToStagePoseRecorder(std::chrono::duration<double> threshold, int maxLogLine) - : mPoseRecordThreshold(threshold), mPoseRecordLog(maxLogLine) { - resetRecord(); - } - - /** Convert recorded sensor data to string with level indentation */ - std::string toString(unsigned level) const; - - /** - * @brief Calculate sensor data, record into local log when it is time. - * - * @param headToStage The vector from Pose3f::toVector(). - */ - void record(const std::vector<float>& headToStage); - - static constexpr float getDegreeWithRadian(const float radian) { - float radianToDegreeRatio = (180 / PI); - return (radian * radianToDegreeRatio); - } - - private: - static constexpr float PI = M_PI; - /** - * Pose recorder time threshold to record sensor data in local log. - * Sensor data will be recorded into log at least every mPoseRecordThreshold. - */ - std::chrono::duration<double> mPoseRecordThreshold; - // Number of seconds pass since last record. - std::chrono::duration<double> mNumOfSecondsSinceLastRecord; - /** - * According to frameworks/av/media/libheadtracking/include/media/Pose.h - * "The vector will have exactly 6 elements, where the first three are a translation vector - * and the last three are a rotation vector." - */ - static constexpr size_t mPoseVectorSize = 6; - /** - * Timestamp of last sensor data record in local log. - */ - std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp; - /** - * Number of sensor samples received since last record, sample rate is ~100Hz which produce - * ~6k samples/minute. - */ - uint32_t mNumOfSampleSinceLastRecord = 0; - /* The sum of pose angle represented by radian since last dump, div - * mNumOfSampleSinceLastRecord to get arithmetic mean. Largest possible value: 2PI * 100Hz * - * mPoseRecordThreshold. - */ - std::vector<double> mPoseRadianSum; - std::vector<float> mMaxPoseAngle; - std::vector<float> mMinPoseAngle; - // Local log for history sensor data. - SimpleLog mPoseRecordLog{mMaxLocalLogLine}; - - bool shouldRecordLog() { - mNumOfSecondsSinceLastRecord = std::chrono::duration_cast<std::chrono::seconds>( - std::chrono::steady_clock::now() - mFirstSampleTimestamp); - return mNumOfSecondsSinceLastRecord >= mPoseRecordThreshold; - } - - void resetRecord() { - mPoseRadianSum.assign(mPoseVectorSize, 0); - mMaxPoseAngle.assign(mPoseVectorSize, -PI); - mMinPoseAngle.assign(mPoseVectorSize, PI); - mNumOfSampleSinceLastRecord = 0; - mNumOfSecondsSinceLastRecord = std::chrono::seconds(0); - } - - // Add each sample to sum and only calculate when record. - void poseSumToAverage() { - if (mNumOfSampleSinceLastRecord == 0) return; - for (auto& p : mPoseRadianSum) { - const float reciprocal = 1.f / mNumOfSampleSinceLastRecord; - p *= reciprocal; - } - } - }; // HeadToStagePoseRecorder - // Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data. - HeadToStagePoseRecorder mPoseRecorder GUARDED_BY(mLock) = - HeadToStagePoseRecorder(std::chrono::seconds(1), mMaxLocalLogLine); + media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) { + 6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */}; // Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data. - HeadToStagePoseRecorder mPoseDurableRecorder GUARDED_BY(mLock) = - HeadToStagePoseRecorder(std::chrono::minutes(1), mMaxLocalLogLine); + media::VectorRecorder mPoseDurableRecorder GUARDED_BY(mLock) { + 6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */}; }; // Spatializer }; // namespace android diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp index 72dba3ddef..874bde4e4d 100644 --- a/services/audiopolicy/service/SpatializerPoseController.cpp +++ b/services/audiopolicy/service/SpatializerPoseController.cpp @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "SpatializerPoseController.h" #include <android-base/stringprintf.h> #include <chrono> @@ -21,8 +22,10 @@ #define LOG_TAG "SpatializerPoseController" //#define LOG_NDEBUG 0 +#include <cutils/properties.h> #include <sensor/Sensor.h> #include <media/MediaMetricsItem.h> +#include <media/QuaternionUtil.h> #include <utils/Log.h> #include <utils/SystemClock.h> @@ -45,11 +48,17 @@ constexpr float kMaxTranslationalVelocity = 2; // This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting. constexpr float kMaxRotationalVelocity = 0.8f; -// This is how far into the future we predict the head pose, using linear extrapolation based on -// twist (velocity). It should be set to a value that matches the characteristic durations of moving -// one's head. The higher we set this, the more latency we are able to reduce, but setting this too -// high will result in high prediction errors whenever the head accelerates (changes velocity). -constexpr auto kPredictionDuration = 50ms; +// This is how far into the future we predict the head pose. +// The prediction duration should be based on the actual latency from +// head-tracker to audio output, though setting the prediction duration too +// high may result in higher prediction errors when the head accelerates or +// decelerates (changes velocity). +// +// The head tracking predictor will do a best effort to achieve the requested +// prediction duration. If the duration is too far in the future based on +// current sensor variance, the predictor may internally restrict duration to what +// is achievable with reasonable confidence as the "best prediction". +constexpr auto kPredictionDuration = 120ms; // After not getting a pose sample for this long, we would treat the measurement as stale. // The max connection interval is 50ms, and HT sensor event interval can differ depending on the @@ -97,7 +106,15 @@ SpatializerPoseController::SpatializerPoseController(Listener* listener, .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond, .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond, .freshnessTimeout = Ticks(kFreshnessTimeout).count(), - .predictionDuration = Ticks(kPredictionDuration).count(), + .predictionDuration = []() -> float { + const int duration_ms = + property_get_int32("audio.spatializer.prediction_duration_ms", -1); + if (duration_ms >= 0) { + return duration_ms * 1'000'000LL; + } else { + return Ticks(kPredictionDuration).count(); + } + }(), .autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(), .autoRecenterTranslationalThreshold = kAutoRecenterTranslationThreshold, .autoRecenterRotationalThreshold = kAutoRecenterRotationThreshold, @@ -145,7 +162,14 @@ SpatializerPoseController::SpatializerPoseController(Listener* listener, mShouldCalculate = false; } } - }) {} + }) { + const media::PosePredictorType posePredictorType = + (media::PosePredictorType) + property_get_int32("audio.spatializer.pose_predictor_type", -1); + if (isValidPosePredictorType(posePredictorType)) { + mProcessor->setPosePredictorType(posePredictorType); + } + } SpatializerPoseController::~SpatializerPoseController() { { @@ -192,7 +216,7 @@ void SpatializerPoseController::setHeadSensor(int32_t sensor) { mHeadSensor = INVALID_SENSOR; } - mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */); + mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */, __func__); } void SpatializerPoseController::setScreenSensor(int32_t sensor) { @@ -229,7 +253,7 @@ void SpatializerPoseController::setScreenSensor(int32_t sensor) { mScreenSensor = INVALID_SENSOR; } - mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */); + mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */, __func__); } void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) { @@ -276,30 +300,66 @@ SpatializerPoseController::calculate_l() { void SpatializerPoseController::recenter() { std::lock_guard lock(mMutex); - mProcessor->recenter(); + mProcessor->recenter(true /* recenterHead */, true /* recenterScreen */, __func__); } void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose, const std::optional<Twist3f>& twist, bool isNewReference) { std::lock_guard lock(mMutex); + constexpr float NANOS_TO_MILLIS = 1e-6; + constexpr float RAD_TO_DEGREE = 180.f / M_PI; + + const float delayMs = (elapsedRealtimeNano() - timestamp) * NANOS_TO_MILLIS; // CLOCK_BOOTTIME + if (sensor == mHeadSensor) { + std::vector<float> pryprydt(8); // pitch, roll, yaw, d_pitch, d_roll, d_yaw, + // discontinuity, timestamp_delay + media::quaternionToAngles(pose.rotation(), &pryprydt[0], &pryprydt[1], &pryprydt[2]); + if (twist) { + const auto rotationalVelocity = twist->rotationalVelocity(); + // The rotational velocity is an intrinsic transform (i.e. based on the head + // coordinate system, not the world coordinate system). It is a 3 element vector: + // axis (d theta / dt). + // + // We leave rotational velocity relative to the head coordinate system, + // as the initial head tracking sensor's world frame is arbitrary. + media::quaternionToAngles(media::rotationVectorToQuaternion(rotationalVelocity), + &pryprydt[3], &pryprydt[4], &pryprydt[5]); + } + pryprydt[6] = isNewReference; + pryprydt[7] = delayMs; + for (size_t i = 0; i < 6; ++i) { + // pitch, roll, yaw in degrees, referenced in degrees on the world frame. + // d_pitch, d_roll, d_yaw rotational velocity in degrees/s, based on the world frame. + pryprydt[i] *= RAD_TO_DEGREE; + } + mHeadSensorRecorder.record(pryprydt); + mHeadSensorDurableRecorder.record(pryprydt); + mProcessor->setWorldToHeadPose(timestamp, pose, twist.value_or(Twist3f()) / kTicksPerSecond); if (isNewReference) { - mProcessor->recenter(true, false); + mProcessor->recenter(true, false, __func__); } } if (sensor == mScreenSensor) { + std::vector<float> pryt{ 0.f, 0.f, 0.f, delayMs}; // pitch, roll, yaw, timestamp_delay + media::quaternionToAngles(pose.rotation(), &pryt[0], &pryt[1], &pryt[2]); + for (size_t i = 0; i < 3; ++i) { + pryt[i] *= RAD_TO_DEGREE; + } + mScreenSensorRecorder.record(pryt); + mScreenSensorDurableRecorder.record(pryt); + mProcessor->setWorldToScreenPose(timestamp, pose); if (isNewReference) { - mProcessor->recenter(false, true); + mProcessor->recenter(false, true, __func__); } } } std::string SpatializerPoseController::toString(unsigned level) const { - std::string prefixSpace; - prefixSpace.append(level, ' '); + std::string prefixSpace(level, ' '); std::string ss = prefixSpace + "SpatializerPoseController:\n"; bool needUnlock = false; @@ -315,14 +375,31 @@ std::string SpatializerPoseController::toString(unsigned level) const { if (mHeadSensor == INVALID_SENSOR) { ss += "HeadSensor: INVALID\n"; } else { - base::StringAppendF(&ss, "HeadSensor: 0x%08x\n", mHeadSensor); + base::StringAppendF(&ss, "HeadSensor: 0x%08x " + "(active world-to-head : head-relative velocity) " + "[ pitch, roll, yaw : d_pitch, d_roll, d_yaw : disc : delay ] " + "(degrees, degrees/s, bool, ms)\n", mHeadSensor); + ss.append(prefixSpace) + .append(" PerMinuteHistory:\n") + .append(mHeadSensorDurableRecorder.toString(level + 3)) + .append(prefixSpace) + .append(" PerSecondHistory:\n") + .append(mHeadSensorRecorder.toString(level + 3)); } ss += prefixSpace; if (mScreenSensor == INVALID_SENSOR) { ss += "ScreenSensor: INVALID\n"; } else { - base::StringAppendF(&ss, "ScreenSensor: 0x%08x\n", mScreenSensor); + base::StringAppendF(&ss, "ScreenSensor: 0x%08x (active world-to-screen) " + "[ pitch, roll, yaw : delay ] " + "(degrees, ms)\n", mScreenSensor); + ss.append(prefixSpace) + .append(" PerMinuteHistory:\n") + .append(mScreenSensorDurableRecorder.toString(level + 3)) + .append(prefixSpace) + .append(" PerSecondHistory:\n") + .append(mScreenSensorRecorder.toString(level + 3)); } ss += prefixSpace; diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h index 546eba00df..2e6fffda25 100644 --- a/services/audiopolicy/service/SpatializerPoseController.h +++ b/services/audiopolicy/service/SpatializerPoseController.h @@ -24,6 +24,7 @@ #include <media/HeadTrackingProcessor.h> #include <media/SensorPoseProvider.h> +#include <media/VectorRecorder.h> namespace android { @@ -143,6 +144,20 @@ class SpatializerPoseController : private media::SensorPoseProvider::Listener { bool mShouldExit = false; bool mCalculated = false; + media::VectorRecorder mHeadSensorRecorder{ + 8 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */, + { 3, 6, 7 } /* delimiterIdx */}; + media::VectorRecorder mHeadSensorDurableRecorder{ + 8 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */, + { 3, 6, 7 } /* delimiterIdx */}; + + media::VectorRecorder mScreenSensorRecorder{ + 4 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */, + { 3 } /* delimiterIdx */}; + media::VectorRecorder mScreenSensorDurableRecorder{ + 4 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */, + { 3 } /* delimiterIdx */}; + // It's important that mThread is the last variable in this class // since we starts mThread in initializer list std::thread mThread; diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 66e34de998..0236c4fbde 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -954,7 +954,7 @@ Status CameraService::makeClient(const sp<CameraService>& cameraService, int api1CameraId, int facing, int sensorOrientation, int clientPid, uid_t clientUid, int servicePid, std::pair<int, IPCTransport> deviceVersionAndTransport, apiLevel effectiveApiLevel, bool overrideForPerfClass, bool overrideToPortrait, - /*out*/sp<BasicClient>* client) { + bool forceSlowJpegMode, /*out*/sp<BasicClient>* client) { // For HIDL devices if (deviceVersionAndTransport.second == IPCTransport::HIDL) { // Create CameraClient based on device version reported by the HAL. @@ -987,9 +987,9 @@ Status CameraService::makeClient(const sp<CameraService>& cameraService, sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get()); *client = new Camera2Client(cameraService, tmp, packageName, featureId, cameraId, api1CameraId, facing, sensorOrientation, clientPid, clientUid, - servicePid, overrideForPerfClass, overrideToPortrait); - ALOGI("%s: Camera1 API (legacy), override to portrait %d", __FUNCTION__, - overrideToPortrait); + servicePid, overrideForPerfClass, overrideToPortrait, forceSlowJpegMode); + ALOGI("%s: Camera1 API (legacy), override to portrait %d, forceSlowJpegMode %d", + __FUNCTION__, overrideToPortrait, forceSlowJpegMode); } else { // Camera2 API route sp<hardware::camera2::ICameraDeviceCallbacks> tmp = static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get()); @@ -1086,7 +1086,8 @@ Status CameraService::initializeShimMetadata(int cameraId) { sp<ICameraClient>{nullptr}, id, cameraId, internalPackageName, /*systemNativeClient*/ false, {}, uid, USE_CALLING_PID, API_1, /*shimUpdateOnly*/ true, /*oomScoreOffset*/ 0, - /*targetSdkVersion*/ __ANDROID_API_FUTURE__, /*overrideToPortrait*/ true, /*out*/ tmp) + /*targetSdkVersion*/ __ANDROID_API_FUTURE__, /*overrideToPortrait*/ true, + /*forceSlowJpegMode*/false, /*out*/ tmp) ).isOk()) { ALOGE("%s: Error initializing shim metadata: %s", __FUNCTION__, ret.toString8().string()); } @@ -1603,6 +1604,7 @@ Status CameraService::connect( int clientPid, int targetSdkVersion, bool overrideToPortrait, + bool forceSlowJpegMode, /*out*/ sp<ICamera>* device) { @@ -1614,7 +1616,7 @@ Status CameraService::connect( ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId, clientPackageName,/*systemNativeClient*/ false, {}, clientUid, clientPid, API_1, /*shimUpdateOnly*/ false, /*oomScoreOffset*/ 0, targetSdkVersion, - overrideToPortrait, /*out*/client); + overrideToPortrait, forceSlowJpegMode, /*out*/client); if(!ret.isOk()) { logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName), @@ -1743,7 +1745,8 @@ Status CameraService::connectDevice( ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id, /*api1CameraId*/-1, clientPackageNameAdj, systemNativeClient,clientFeatureId, clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, oomScoreOffset, - targetSdkVersion, overrideToPortrait, /*out*/client); + targetSdkVersion, overrideToPortrait, /*forceSlowJpegMode*/false, + /*out*/client); if(!ret.isOk()) { logRejected(id, callingPid, String8(clientPackageNameAdj), ret.toString8()); @@ -1805,7 +1808,8 @@ Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient, const std::optional<String16>& clientFeatureId, int clientUid, int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly, int oomScoreOffset, int targetSdkVersion, - bool overrideToPortrait, /*out*/sp<CLIENT>& device) { + bool overrideToPortrait, bool forceSlowJpegMode, + /*out*/sp<CLIENT>& device) { binder::Status ret = binder::Status::ok(); bool isNonSystemNdk = false; @@ -1921,7 +1925,8 @@ Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& clientFeatureId, cameraId, api1CameraId, facing, orientation, clientPid, clientUid, getpid(), deviceVersionAndTransport, effectiveApiLevel, overrideForPerfClass, - overrideToPortrait, /*out*/&tmp)).isOk()) { + overrideToPortrait, forceSlowJpegMode, + /*out*/&tmp)).isOk()) { return ret; } client = static_cast<CLIENT*>(tmp.get()); @@ -2043,6 +2048,7 @@ Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& } client->setImageDumpMask(mImageDumpMask); + client->setStreamUseCaseOverrides(mStreamUseCaseOverrides); } // lock is destroyed, allow further connect calls // Important: release the mutex here so the client can call back into the service from its @@ -4420,6 +4426,13 @@ status_t CameraService::dump(int fd, const Vector<String16>& args) { String8 activeClientString = mActiveClientManager.toString(); dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string()); dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string()); + if (mStreamUseCaseOverrides.size() > 0) { + dprintf(fd, "Active stream use case overrides:"); + for (int64_t useCaseOverride : mStreamUseCaseOverrides) { + dprintf(fd, " %" PRId64, useCaseOverride); + } + dprintf(fd, "\n"); + } dumpEventLog(fd); @@ -4911,6 +4924,10 @@ status_t CameraService::shellCommand(int in, int out, int err, const Vector<Stri return handleGetImageDumpMask(out); } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) { return handleSetCameraMute(args); + } else if (args.size() >= 2 && args[0] == String16("set-stream-use-case-override")) { + return handleSetStreamUseCaseOverrides(args); + } else if (args.size() >= 1 && args[0] == String16("clear-stream-use-case-override")) { + return handleClearStreamUseCaseOverrides(); } else if (args.size() >= 2 && args[0] == String16("watch")) { return handleWatchCommand(args, in, out); } else if (args.size() >= 2 && args[0] == String16("set-watchdog")) { @@ -5083,6 +5100,43 @@ status_t CameraService::handleSetCameraMute(const Vector<String16>& args) { return OK; } +status_t CameraService::handleSetStreamUseCaseOverrides(const Vector<String16>& args) { + std::vector<int64_t> useCasesOverride; + for (size_t i = 1; i < args.size(); i++) { + int64_t useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT; + String8 arg8 = String8(args[i]); + if (arg8 == "DEFAULT") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT; + } else if (arg8 == "PREVIEW") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW; + } else if (arg8 == "STILL_CAPTURE") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE; + } else if (arg8 == "VIDEO_RECORD") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD; + } else if (arg8 == "PREVIEW_VIDEO_STILL") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL; + } else if (arg8 == "VIDEO_CALL") { + useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_CALL; + } else { + ALOGE("%s: Invalid stream use case %s", __FUNCTION__, String8(args[i]).c_str()); + return BAD_VALUE; + } + useCasesOverride.push_back(useCase); + } + + Mutex::Autolock lock(mServiceLock); + mStreamUseCaseOverrides = std::move(useCasesOverride); + + return OK; +} + +status_t CameraService::handleClearStreamUseCaseOverrides() { + Mutex::Autolock lock(mServiceLock); + mStreamUseCaseOverrides.clear(); + + return OK; +} + status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) { if (args.size() >= 3 && args[1] == String16("start")) { return startWatchingTags(args, outFd); @@ -5437,6 +5491,15 @@ status_t CameraService::printHelp(int out) { " Valid values 0=OFF, 1=ON for JPEG\n" " get-image-dump-mask returns the current image-dump-mask value\n" " set-camera-mute <0/1> enable or disable camera muting\n" + " set-stream-use-case-override <usecase1> <usecase2> ... override stream use cases\n" + " Use cases applied in descending resolutions. So usecase1 is assigned to the\n" + " largest resolution, usecase2 is assigned to the 2nd largest resolution, and so\n" + " on. In case the number of usecases is smaller than the number of streams, the\n" + " last use case is assigned to all the remaining streams. In case of multiple\n" + " streams with the same resolution, the tie-breaker is (JPEG, RAW, YUV, and PRIV)\n" + " Valid values are (case sensitive): DEFAULT, PREVIEW, STILL_CAPTURE, VIDEO_RECORD,\n" + " PREVIEW_VIDEO_STILL, VIDEO_CALL\n" + " clear-stream-use-case-override clear the stream use case override\n" " watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n" " help print this message\n"); } diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h index 840e9b6412..70293f4c5b 100644 --- a/services/camera/libcameraservice/CameraService.h +++ b/services/camera/libcameraservice/CameraService.h @@ -141,7 +141,7 @@ public: virtual binder::Status connect(const sp<hardware::ICameraClient>& cameraClient, int32_t cameraId, const String16& clientPackageName, int32_t clientUid, int clientPid, int targetSdkVersion, - bool overrideToPortrait, + bool overrideToPortrait, bool forceSlowJpegMode, /*out*/ sp<hardware::ICamera>* device) override; @@ -348,6 +348,13 @@ public: // Set Camera service watchdog virtual status_t setCameraServiceWatchdog(bool enabled) = 0; + // Set stream use case overrides + virtual void setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides) = 0; + + // Clear stream use case overrides + virtual void clearStreamUseCaseOverrides() = 0; + // The injection camera session to replace the internal camera // session. virtual status_t injectCamera(const String8& injectedCamId, @@ -502,6 +509,7 @@ public: virtual bool canCastToApiClient(apiLevel level) const; void setImageDumpMask(int /*mask*/) { } + void setStreamUseCaseOverrides(const std::vector<int64_t>& /*usecaseOverrides*/) { } protected: // Initialized in constructor @@ -852,7 +860,8 @@ private: int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient, const std::optional<String16>& clientFeatureId, int clientUid, int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly, int scoreOffset, int targetSdkVersion, - bool overrideToPortrait, /*out*/sp<CLIENT>& device); + bool overrideToPortrait, bool forceSlowJpegMode, + /*out*/sp<CLIENT>& device); // Lock guarding camera service state Mutex mServiceLock; @@ -1216,6 +1225,12 @@ private: // Set the camera mute state status_t handleSetCameraMute(const Vector<String16>& args); + // Set the stream use case overrides + status_t handleSetStreamUseCaseOverrides(const Vector<String16>& args); + + // Clear the stream use case overrides + status_t handleClearStreamUseCaseOverrides(); + // Handle 'watch' command as passed through 'cmd' status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd); @@ -1267,7 +1282,8 @@ private: const String8& cameraId, int api1CameraId, int facing, int sensorOrientation, int clientPid, uid_t clientUid, int servicePid, std::pair<int, IPCTransport> deviceVersionAndIPCTransport, apiLevel effectiveApiLevel, - bool overrideForPerfClass, bool overrideToPortrait, /*out*/sp<BasicClient>* client); + bool overrideForPerfClass, bool overrideToPortrait, bool forceSlowJpegMode, + /*out*/sp<BasicClient>* client); status_t checkCameraAccess(const String16& opPackageName); @@ -1311,6 +1327,9 @@ private: // Camera Service watchdog flag bool mCameraServiceWatchdogEnabled = true; + // Current stream use case overrides + std::vector<int64_t> mStreamUseCaseOverrides; + /** * A listener class that implements the IBinder::DeathRecipient interface * for use to call back the error state injected by the external camera, and diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 0887cedba8..ac073767d1 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -62,7 +62,8 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService, uid_t clientUid, int servicePid, bool overrideForPerfClass, - bool overrideToPortrait): + bool overrideToPortrait, + bool forceSlowJpegMode): Camera2ClientBase(cameraService, cameraClient, clientPackageName, false/*systemNativeClient - since no ndk for api1*/, clientFeatureId, cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation, clientPid, @@ -78,6 +79,9 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService, SharedParameters::Lock l(mParameters); l.mParameters.state = Parameters::DISCONNECTED; + if (forceSlowJpegMode) { + l.mParameters.isSlowJpegModeForced = true; + } } status_t Camera2Client::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) { @@ -2347,6 +2351,15 @@ status_t Camera2Client::setCameraMute(bool enabled) { return mDevice->setCameraMute(enabled); } +void Camera2Client::setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides) { + mDevice->setStreamUseCaseOverrides(useCaseOverrides); +} + +void Camera2Client::clearStreamUseCaseOverrides() { + mDevice->clearStreamUseCaseOverrides(); +} + status_t Camera2Client::waitUntilCurrentRequestIdLocked() { int32_t activeRequestId = mStreamingProcessor->getActiveRequestId(); if (activeRequestId != 0) { diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h index 9c540a4173..bbad6d8ff8 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.h +++ b/services/camera/libcameraservice/api1/Camera2Client.h @@ -92,6 +92,10 @@ public: virtual status_t setCameraServiceWatchdog(bool enabled); + virtual void setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides); + virtual void clearStreamUseCaseOverrides(); + /** * Interface used by CameraService */ @@ -108,7 +112,8 @@ public: uid_t clientUid, int servicePid, bool overrideForPerfClass, - bool overrideToPortrait); + bool overrideToPortrait, + bool forceSlowJpegMode); virtual ~Camera2Client(); diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 123cd757e8..50f1a7a87a 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -984,9 +984,8 @@ status_t Parameters::initialize(CameraDeviceBase *device) { Size maxJpegSize = getMaxSize(getAvailableJpegSizes()); int64_t minFrameDurationNs = getJpegStreamMinFrameDurationNs(maxJpegSize); - slowJpegMode = false; - if (minFrameDurationNs > kSlowJpegModeThreshold) { - slowJpegMode = true; + slowJpegMode = isSlowJpegModeForced || minFrameDurationNs > kSlowJpegModeThreshold; + if (slowJpegMode) { // Slow jpeg devices does not support video snapshot without // slowing down preview. // TODO: support video size video snapshot only? @@ -2083,7 +2082,7 @@ status_t Parameters::set(const String8& paramString) { paramsFlattened = newParams.flatten(); params = newParams; - slowJpegMode = false; + slowJpegMode = isSlowJpegModeForced; Size pictureSize = { pictureWidth, pictureHeight }; bool zslFrameRateSupported = false; int64_t jpegMinFrameDurationNs = getJpegStreamMinFrameDurationNs(pictureSize); diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index cbe62a7754..afad024c88 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -182,6 +182,8 @@ struct Parameters { bool isDeviceZslSupported; // Whether the device supports geometric distortion correction bool isDistortionCorrectionSupported; + // Whether slowJpegMode is forced regardless of jpeg stream FPS + bool isSlowJpegModeForced; // Overall camera state enum State { diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 371e53d8e0..39ebc386d9 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -1769,6 +1769,15 @@ status_t CameraDeviceClient::setCameraMute(bool enabled) { return mDevice->setCameraMute(enabled); } +void CameraDeviceClient::setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides) { + mDevice->setStreamUseCaseOverrides(useCaseOverrides); +} + +void CameraDeviceClient::clearStreamUseCaseOverrides() { + mDevice->clearStreamUseCaseOverrides(); +} + binder::Status CameraDeviceClient::switchToOffline( const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, const std::vector<int>& offlineOutputIds, diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h index 06b9562e39..d1a2f001aa 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.h +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h @@ -210,6 +210,9 @@ public: virtual status_t setCameraServiceWatchdog(bool enabled); + virtual void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides); + virtual void clearStreamUseCaseOverrides() override; + /** * Device listener interface */ diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp index beb655bfdf..2810667cde 100644 --- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp +++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp @@ -85,6 +85,12 @@ status_t CameraOfflineSessionClient::setCameraMute(bool) { return INVALID_OPERATION; } +void CameraOfflineSessionClient::setStreamUseCaseOverrides( + const std::vector<int64_t>& /*useCaseOverrides*/) { +} + +void CameraOfflineSessionClient::clearStreamUseCaseOverrides() { +} status_t CameraOfflineSessionClient::dump(int fd, const Vector<String16>& args) { return BasicClient::dump(fd, args); diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h index 8edb64ad5b..23e1f3d053 100644 --- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h +++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h @@ -87,6 +87,11 @@ public: status_t setCameraServiceWatchdog(bool enabled) override; + void setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides) override; + + void clearStreamUseCaseOverrides() override; + // permissions management status_t startCameraOps() override; status_t finishCameraOps() override; diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp index dcb1d67b49..8f1485564f 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp +++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp @@ -335,6 +335,29 @@ void Camera2ClientBase<TClientBase>::notifyError( } template <typename TClientBase> +void Camera2ClientBase<TClientBase>::notifyPhysicalCameraChange(const std::string &physicalId) { + // We're only interested in this notification if overrideToPortrait is turned on. + if (!TClientBase::mOverrideToPortrait) { + return; + } + + String8 physicalId8(physicalId.c_str()); + auto physicalCameraMetadata = mDevice->infoPhysical(physicalId8); + auto orientationEntry = physicalCameraMetadata.find(ANDROID_SENSOR_ORIENTATION); + + if (orientationEntry.count == 1) { + int orientation = orientationEntry.data.i32[0]; + int rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_NONE; + + if (orientation == 0 || orientation == 180) { + rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_90; + } + + static_cast<TClientBase *>(this)->setRotateAndCropOverride(rotateAndCropMode); + } +} + +template <typename TClientBase> status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) { if (!mDeviceActive) { status_t res = TClientBase::startCameraStreamingOps(); diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h index d2dcdb1ab0..705fe69765 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.h +++ b/services/camera/libcameraservice/common/Camera2ClientBase.h @@ -75,6 +75,7 @@ public: virtual void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras); + virtual void notifyPhysicalCameraChange(const std::string &physicalId) override; // Returns errors on app ops permission failures virtual status_t notifyActive(float maxPreviewFps); virtual void notifyIdle(int64_t /*requestCount*/, int64_t /*resultErrorCount*/, diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h index 69514f3ff1..8f7b16d657 100644 --- a/services/camera/libcameraservice/common/CameraDeviceBase.h +++ b/services/camera/libcameraservice/common/CameraDeviceBase.h @@ -464,6 +464,15 @@ class CameraDeviceBase : public virtual FrameProducer { void setImageDumpMask(int mask) { mImageDumpMask = mask; } /** + * Set stream use case overrides + */ + void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides) { + mStreamUseCaseOverrides = useCaseOverrides; + } + + void clearStreamUseCaseOverrides() {} + + /** * The injection camera session to replace the internal camera * session. */ @@ -477,6 +486,7 @@ class CameraDeviceBase : public virtual FrameProducer { protected: bool mImageDumpMask = 0; + std::vector<int64_t> mStreamUseCaseOverrides; }; }; // namespace android diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h index f39b92a48c..63abcf0015 100644 --- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h +++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h @@ -40,6 +40,10 @@ class NotificationListener : public virtual RefBase { // Required for API 1 and 2 virtual void notifyError(int32_t errorCode, const CaptureResultExtras &resultExtras) = 0; + + // Optional for API 1 and 2 + virtual void notifyPhysicalCameraChange(const std::string &/*physicalId*/) {} + // May return an error since it checks appops virtual status_t notifyActive(float maxPreviewFps) = 0; virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError, diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp index 3132787608..43f92a9927 100644 --- a/services/camera/libcameraservice/common/CameraProviderManager.cpp +++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp @@ -2029,7 +2029,7 @@ status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16 } CameraMetadata info2; res = device->getCameraCharacteristics(true /*overrideForPerfClass*/, &info2, - /*overrideToPortrait*/true); + /*overrideToPortrait*/false); if (res == INVALID_OPERATION) { dprintf(fd, " API2 not directly supported\n"); } else if (res != OK) { @@ -2380,8 +2380,8 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacterist if (overrideToPortrait) { const auto &lensFacingEntry = characteristics->find(ANDROID_LENS_FACING); const auto &sensorOrientationEntry = characteristics->find(ANDROID_SENSOR_ORIENTATION); + uint8_t lensFacing = lensFacingEntry.data.u8[0]; if (lensFacingEntry.count > 0 && sensorOrientationEntry.count > 0) { - uint8_t lensFacing = lensFacingEntry.data.u8[0]; int32_t sensorOrientation = sensorOrientationEntry.data.i32[0]; int32_t newSensorOrientation = sensorOrientation; @@ -2402,6 +2402,8 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacterist } if (characteristics->exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) { + ALOGV("%s: Erasing ANDROID_INFO_DEVICE_STATE_ORIENTATIONS for lens facing %d", + __FUNCTION__, lensFacing); characteristics->erase(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS); } } @@ -2437,8 +2439,8 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::filterSmallJpegSizes( for (size_t i = 0; i < streamConfigs.count; i += 4) { if ((streamConfigs.data.i32[i] == HAL_PIXEL_FORMAT_BLOB) && (streamConfigs.data.i32[i+3] == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) { - if (streamConfigs.data.i32[i+1] < thresholdW || - streamConfigs.data.i32[i+2] < thresholdH) { + if (streamConfigs.data.i32[i+1] * streamConfigs.data.i32[i+2] < + thresholdW * thresholdH) { continue; } else { largeJpegCount ++; @@ -2458,8 +2460,8 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::filterSmallJpegSizes( mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS); for (size_t i = 0; i < minDurations.count; i += 4) { if (minDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) { - if (minDurations.data.i64[i+1] < thresholdW || - minDurations.data.i64[i+2] < thresholdH) { + if ((int32_t)minDurations.data.i64[i+1] * (int32_t)minDurations.data.i64[i+2] < + thresholdW * thresholdH) { continue; } else { largeJpegCount++; @@ -2479,8 +2481,8 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::filterSmallJpegSizes( mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS); for (size_t i = 0; i < stallDurations.count; i += 4) { if (stallDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) { - if (stallDurations.data.i64[i+1] < thresholdW || - stallDurations.data.i64[i+2] < thresholdH) { + if ((int32_t)stallDurations.data.i64[i+1] * (int32_t)stallDurations.data.i64[i+2] < + thresholdW * thresholdH) { continue; } else { largeJpegCount++; diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp index 2c035deeef..67c4841a6c 100644 --- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp +++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp @@ -760,7 +760,7 @@ status_t AidlProviderInfo::convertToAidlHALStreamCombinationAndCameraIdsLocked( SessionConfigurationUtils::targetPerfClassPrimaryCamera( perfClassPrimaryCameraIds, cameraId, targetSdkVersion); res = mManager->getCameraCharacteristicsLocked(cameraId, overrideForPerfClass, &deviceInfo, - /*overrideToPortrait*/true); + /*overrideToPortrait*/false); if (res != OK) { return res; } @@ -768,7 +768,8 @@ status_t AidlProviderInfo::convertToAidlHALStreamCombinationAndCameraIdsLocked( [this](const String8 &id, bool overrideForPerfClass) { CameraMetadata physicalDeviceInfo; mManager->getCameraCharacteristicsLocked(id.string(), overrideForPerfClass, - &physicalDeviceInfo, /*overrideToPortrait*/true); + &physicalDeviceInfo, + /*overrideToPortrait*/false); return physicalDeviceInfo; }; std::vector<std::string> physicalCameraIds; diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp index 1df6ec4449..630090b4ac 100644 --- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp +++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp @@ -920,7 +920,7 @@ status_t HidlProviderInfo::convertToHALStreamCombinationAndCameraIdsLocked( SessionConfigurationUtils::targetPerfClassPrimaryCamera( perfClassPrimaryCameraIds, cameraId, targetSdkVersion); res = mManager->getCameraCharacteristicsLocked(cameraId, overrideForPerfClass, &deviceInfo, - /*overrideToPortrait*/true); + /*overrideToPortrait*/false); if (res != OK) { return res; } @@ -928,7 +928,7 @@ status_t HidlProviderInfo::convertToHALStreamCombinationAndCameraIdsLocked( [this](const String8 &id, bool overrideForPerfClass) { CameraMetadata physicalDeviceInfo; mManager->getCameraCharacteristicsLocked(id.string(), overrideForPerfClass, - &physicalDeviceInfo, /*overrideToPortrait*/true); + &physicalDeviceInfo, /*overrideToPortrait*/false); return physicalDeviceInfo; }; std::vector<std::string> physicalCameraIds; diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 67ae4040fc..89a16e1266 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -130,7 +130,10 @@ Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass, bool mLastTemplateId(-1), mNeedFixupMonochromeTags(false), mOverrideForPerfClass(overrideForPerfClass), - mOverrideToPortrait(overrideToPortrait) + mOverrideToPortrait(overrideToPortrait), + mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE), + mComposerOutput(false), + mActivePhysicalId("") { ATRACE_CALL(); ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string()); @@ -1405,12 +1408,34 @@ status_t Camera3Device::filterParamsAndConfigureLocked(const CameraMetadata& ses set_camera_metadata_vendor_id(meta, mVendorTagId); filteredParams.unlock(meta); if (availableSessionKeys.count > 0) { + bool rotateAndCropSessionKey = false; for (size_t i = 0; i < availableSessionKeys.count; i++) { camera_metadata_ro_entry entry = params.find( availableSessionKeys.data.i32[i]); if (entry.count > 0) { filteredParams.update(entry); } + if (ANDROID_SCALER_ROTATE_AND_CROP == availableSessionKeys.data.i32[i]) { + rotateAndCropSessionKey = true; + } + } + + if (rotateAndCropSessionKey) { + sp<CaptureRequest> request = new CaptureRequest(); + PhysicalCameraSettings settingsList; + settingsList.metadata = filteredParams; + request->mSettingsList.push_back(settingsList); + + auto rotateAndCropEntry = filteredParams.find(ANDROID_SCALER_ROTATE_AND_CROP); + if (rotateAndCropEntry.count > 0 && + rotateAndCropEntry.data.u8[0] == ANDROID_SCALER_ROTATE_AND_CROP_AUTO) { + request->mRotateAndCropAuto = true; + } else { + request->mRotateAndCropAuto = false; + } + + overrideAutoRotateAndCrop(request, mOverrideToPortrait, mRotateAndCropOverride); + filteredParams = request->mSettingsList.begin()->metadata; } } @@ -2399,6 +2424,9 @@ status_t Camera3Device::configureStreamsLocked(int operatingMode, tryRemoveFakeStreamLocked(); } + // Override stream use case based on "adb shell command" + overrideStreamUseCaseLocked(); + // Start configuring the streams ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string()); @@ -2428,7 +2456,7 @@ status_t Camera3Device::configureStreamsLocked(int operatingMode, } mGroupIdPhysicalCameraMap.clear(); - bool composerSurfacePresent = false; + mComposerOutput = false; for (size_t i = 0; i < mOutputStreams.size(); i++) { // Don't configure bidi streams twice, nor add them twice to the list @@ -2471,7 +2499,7 @@ status_t Camera3Device::configureStreamsLocked(int operatingMode, } if (outputStream->usage & GraphicBuffer::USAGE_HW_COMPOSER) { - composerSurfacePresent = true; + mComposerOutput = true; } } @@ -2540,7 +2568,7 @@ status_t Camera3Device::configureStreamsLocked(int operatingMode, } } - mRequestThread->setComposerSurface(composerSurfacePresent); + mRequestThread->setComposerSurface(mComposerOutput); // Request thread needs to know to avoid using repeat-last-settings protocol // across configure_streams() calls @@ -3503,6 +3531,16 @@ bool Camera3Device::RequestThread::threadLoop() { latestRequestId = NAME_NOT_FOUND; } + for (size_t i = 0; i < mNextRequests.size(); i++) { + auto& nextRequest = mNextRequests.editItemAt(i); + sp<CaptureRequest> captureRequest = nextRequest.captureRequest; + // Do not override rotate&crop for stream configurations that include + // SurfaceViews(HW_COMPOSER) output, unless mOverrideToPortrait is set. + // The display rotation there will be compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY + captureRequest->mRotateAndCropChanged = (mComposerOutput && !mOverrideToPortrait) ? false : + overrideAutoRotateAndCrop(captureRequest); + } + // 'mNextRequests' will at this point contain either a set of HFR batched requests // or a single request from streaming or burst. In either case the first element // should contain the latest camera settings that we need to check for any session @@ -3648,18 +3686,13 @@ status_t Camera3Device::RequestThread::prepareHalRequests() { bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0); mPrevTriggers = triggerCount; - // Do not override rotate&crop for stream configurations that include - // SurfaceViews(HW_COMPOSER) output, unless mOverrideToPortrait is set. - // The display rotation there will be compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY - bool rotateAndCropChanged = (mComposerOutput && !mOverrideToPortrait) ? false : - overrideAutoRotateAndCrop(captureRequest); bool testPatternChanged = overrideTestPattern(captureRequest); // If the request is the same as last, or we had triggers now or last time or // changing overrides this time bool newRequest = (mPrevRequest != captureRequest || triggersMixedIn || - rotateAndCropChanged || testPatternChanged) && + captureRequest->mRotateAndCropChanged || testPatternChanged) && // Request settings are all the same within one batch, so only treat the first // request in a batch as new !(batchedRequest && i > 0); @@ -4122,9 +4155,6 @@ status_t Camera3Device::RequestThread::setRotateAndCropAutoBehavior( camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue) { ATRACE_CALL(); Mutex::Autolock l(mTriggerMutex); - if (rotateAndCropValue == ANDROID_SCALER_ROTATE_AND_CROP_AUTO) { - return BAD_VALUE; - } mRotateAndCropOverride = rotateAndCropValue; return OK; } @@ -4188,6 +4218,19 @@ status_t Camera3Device::setCameraServiceWatchdog(bool enabled) { return OK; } +void Camera3Device::setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides) { + Mutex::Autolock il(mInterfaceLock); + Mutex::Autolock l(mLock); + mStreamUseCaseOverrides = useCaseOverrides; +} + +void Camera3Device::clearStreamUseCaseOverrides() { + Mutex::Autolock il(mInterfaceLock); + Mutex::Autolock l(mLock); + mStreamUseCaseOverrides.clear(); +} + void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) { if (mNextRequests.empty()) { return; @@ -4688,13 +4731,20 @@ status_t Camera3Device::RequestThread::addFakeTriggerIds( return OK; } -bool Camera3Device::RequestThread::overrideAutoRotateAndCrop( - const sp<CaptureRequest> &request) { +bool Camera3Device::RequestThread::overrideAutoRotateAndCrop(const sp<CaptureRequest> &request) { + ATRACE_CALL(); + Mutex::Autolock l(mTriggerMutex); + return Camera3Device::overrideAutoRotateAndCrop(request, this->mOverrideToPortrait, + this->mRotateAndCropOverride); +} + +bool Camera3Device::overrideAutoRotateAndCrop(const sp<CaptureRequest> &request, + bool overrideToPortrait, + camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropOverride) { ATRACE_CALL(); - if (mOverrideToPortrait) { - Mutex::Autolock l(mTriggerMutex); - uint8_t rotateAndCrop_u8 = mRotateAndCropOverride; + if (overrideToPortrait) { + uint8_t rotateAndCrop_u8 = rotateAndCropOverride; CameraMetadata &metadata = request->mSettingsList.begin()->metadata; metadata.update(ANDROID_SCALER_ROTATE_AND_CROP, &rotateAndCrop_u8, 1); @@ -4702,24 +4752,23 @@ bool Camera3Device::RequestThread::overrideAutoRotateAndCrop( } if (request->mRotateAndCropAuto) { - Mutex::Autolock l(mTriggerMutex); CameraMetadata &metadata = request->mSettingsList.begin()->metadata; auto rotateAndCropEntry = metadata.find(ANDROID_SCALER_ROTATE_AND_CROP); if (rotateAndCropEntry.count > 0) { - if (rotateAndCropEntry.data.u8[0] == mRotateAndCropOverride) { + if (rotateAndCropEntry.data.u8[0] == rotateAndCropOverride) { return false; } else { - rotateAndCropEntry.data.u8[0] = mRotateAndCropOverride; + rotateAndCropEntry.data.u8[0] = rotateAndCropOverride; return true; } } else { - uint8_t rotateAndCrop_u8 = mRotateAndCropOverride; - metadata.update(ANDROID_SCALER_ROTATE_AND_CROP, - &rotateAndCrop_u8, 1); + uint8_t rotateAndCrop_u8 = rotateAndCropOverride; + metadata.update(ANDROID_SCALER_ROTATE_AND_CROP, &rotateAndCrop_u8, 1); return true; } } + return false; } @@ -5205,6 +5254,10 @@ status_t Camera3Device::setRotateAndCropAutoBehavior( if (mRequestThread == nullptr) { return INVALID_OPERATION; } + if (rotateAndCropValue == ANDROID_SCALER_ROTATE_AND_CROP_AUTO) { + return BAD_VALUE; + } + mRotateAndCropOverride = rotateAndCropValue; return mRequestThread->setRotateAndCropAutoBehavior(rotateAndCropValue); } @@ -5301,4 +5354,55 @@ status_t Camera3Device::stopInjection() { return mInjectionMethods->stopInjection(); } +void Camera3Device::overrideStreamUseCaseLocked() { + if (mStreamUseCaseOverrides.size() == 0) { + return; + } + + // Start from an array of indexes in mStreamUseCaseOverrides, and sort them + // based first on size, and second on formats of [JPEG, RAW, YUV, PRIV]. + std::vector<int> outputStreamsIndices(mOutputStreams.size()); + for (size_t i = 0; i < outputStreamsIndices.size(); i++) { + outputStreamsIndices[i] = i; + } + + std::sort(outputStreamsIndices.begin(), outputStreamsIndices.end(), + [&](int a, int b) -> bool { + + auto formatScore = [](int format) { + switch (format) { + case HAL_PIXEL_FORMAT_BLOB: + return 4; + case HAL_PIXEL_FORMAT_RAW16: + case HAL_PIXEL_FORMAT_RAW10: + case HAL_PIXEL_FORMAT_RAW12: + return 3; + case HAL_PIXEL_FORMAT_YCBCR_420_888: + return 2; + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + return 1; + default: + return 0; + } + }; + + int sizeA = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight(); + int sizeB = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight(); + int formatAScore = formatScore(mOutputStreams[a]->getFormat()); + int formatBScore = formatScore(mOutputStreams[b]->getFormat()); + if (sizeA > sizeB || + (sizeA == sizeB && formatAScore >= formatBScore)) { + return true; + } else { + return false; + } + }); + + size_t overlapSize = std::min(mStreamUseCaseOverrides.size(), mOutputStreams.size()); + for (size_t i = 0; i < mOutputStreams.size(); i++) { + mOutputStreams[outputStreamsIndices[i]]->setStreamUseCase( + mStreamUseCaseOverrides[std::min(i, overlapSize-1)]); + } +} + }; // namespace android diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h index 1a21c12513..e6073a94d4 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.h +++ b/services/camera/libcameraservice/device3/Camera3Device.h @@ -287,6 +287,13 @@ class Camera3Device : */ status_t setCameraServiceWatchdog(bool enabled); + // Set stream use case overrides + void setStreamUseCaseOverrides( + const std::vector<int64_t>& useCaseOverrides); + + // Clear stream use case overrides + void clearStreamUseCaseOverrides(); + // Get the status trackeer for the camera device wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; } @@ -574,6 +581,9 @@ class Camera3Device : // overriding of ROTATE_AND_CROP value and adjustment of coordinates // in several other controls in both the request and the result bool mRotateAndCropAuto; + // Indicates that the ROTATE_AND_CROP value within 'mSettingsList' was modified + // irrespective of the original value. + bool mRotateAndCropChanged = false; // Whether this capture request has its zoom ratio set to 1.0x before // the framework overrides it for camera HAL consumption. @@ -760,6 +770,11 @@ class Camera3Device : */ static nsecs_t getMonoToBoottimeOffset(); + // Override rotate_and_crop control if needed + static bool overrideAutoRotateAndCrop(const sp<CaptureRequest> &request /*out*/, + bool overrideToPortrait, + camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropOverride); + struct RequestTrigger { // Metadata tag number, e.g. android.control.aePrecaptureTrigger uint32_t metadataTag; @@ -910,7 +925,7 @@ class Camera3Device : status_t addFakeTriggerIds(const sp<CaptureRequest> &request); // Override rotate_and_crop control if needed; returns true if the current value was changed - bool overrideAutoRotateAndCrop(const sp<CaptureRequest> &request); + bool overrideAutoRotateAndCrop(const sp<CaptureRequest> &request /*out*/); // Override test_pattern control if needed for camera mute; returns true // if the current value was changed @@ -1349,6 +1364,11 @@ class Camera3Device : // Whether the camera framework overrides the device characteristics for // app compatibility reasons. bool mOverrideToPortrait; + camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride; + bool mComposerOutput; + + // Current active physical id of the logical multi-camera, if any + std::string mActivePhysicalId; // The current minimum expected frame duration based on AE_TARGET_FPS_RANGE nsecs_t mMinExpectedDuration = 0; @@ -1438,6 +1458,8 @@ class Camera3Device : sp<Camera3DeviceInjectionMethods> mInjectionMethods; + void overrideStreamUseCaseLocked(); + }; // class Camera3Device }; // namespace android diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h index a93d1da759..1e9f4782fb 100644 --- a/services/camera/libcameraservice/device3/Camera3FakeStream.h +++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h @@ -101,6 +101,8 @@ class Camera3FakeStream : virtual status_t setBatchSize(size_t batchSize) override; virtual void onMinDurationChanged(nsecs_t /*duration*/, bool /*fixedFps*/) {} + + virtual void setStreamUseCase(int64_t /*streamUseCase*/) {} protected: /** diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp index b841d8ebc8..5c37da9264 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp @@ -1388,6 +1388,11 @@ void Camera3OutputStream::onMinDurationChanged(nsecs_t duration, bool fixedFps) mFixedFps = fixedFps; } +void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) { + Mutex::Autolock l(mLock); + camera_stream::use_case = streamUseCase; +} + void Camera3OutputStream::returnPrefetchedBuffersLocked() { std::vector<Surface::BatchBuffer> batchedBuffers; diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h index db988a0114..a719d6be52 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputStream.h +++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h @@ -253,6 +253,11 @@ class Camera3OutputStream : virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) override; /** + * Modify stream use case + */ + virtual void setStreamUseCase(int64_t streamUseCase) override; + + /** * Apply ZSL related consumer usage quirk. */ static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/); diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h index dbc6fe1514..4baa7e8298 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h +++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h @@ -117,6 +117,11 @@ class Camera3OutputStreamInterface : public virtual Camera3StreamInterface { * AE_TARGET_FPS_RANGE in the capture request. */ virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) = 0; + + /** + * Modify the stream use case for this output. + */ + virtual void setStreamUseCase(int64_t streamUseCase) = 0; }; // Helper class to organize a synchronized mapping of stream IDs to stream instances diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp index 65693956ae..5021f297c8 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp +++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp @@ -521,27 +521,50 @@ void processCaptureResult(CaptureOutputStates& states, const camera_capture_resu if (result->partial_result != 0) request.resultExtras.partialResultCount = result->partial_result; - if ((result->result != nullptr) && !states.legacyClient && !states.overrideToPortrait) { + if (result->result != nullptr) { camera_metadata_ro_entry entry; auto ret = find_camera_metadata_ro_entry(result->result, ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry); if ((ret == OK) && (entry.count > 0)) { std::string physicalId(reinterpret_cast<const char *>(entry.data.u8)); - auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId); - if (deviceInfo != states.physicalDeviceInfoMap.end()) { - auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION); - if (orientation.count > 0) { - ret = CameraUtils::getRotationTransform(deviceInfo->second, - OutputConfiguration::MIRROR_MODE_AUTO, &request.transform); - if (ret != OK) { - ALOGE("%s: Failed to calculate current stream transformation: %s (%d)", - __FUNCTION__, strerror(-ret), ret); + if (!states.activePhysicalId.empty() && physicalId != states.activePhysicalId) { + states.listener->notifyPhysicalCameraChange(physicalId); + } + states.activePhysicalId = physicalId; + + if (!states.legacyClient && !states.overrideToPortrait) { + auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId); + if (deviceInfo != states.physicalDeviceInfoMap.end()) { + auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION); + if (orientation.count > 0) { + int32_t transform; + ret = CameraUtils::getRotationTransform(deviceInfo->second, + OutputConfiguration::MIRROR_MODE_AUTO, &transform); + if (ret == OK) { + // It is possible for camera providers to return the capture + // results after the processed frames. In such scenario, we will + // not be able to set the output transformation before the frames + // return back to the consumer for the current capture request + // but we could still try and configure it for any future requests + // that are still in flight. The assumption is that the physical + // device id remains the same for the duration of the pending queue. + for (size_t i = 0; i < states.inflightMap.size(); i++) { + auto &r = states.inflightMap.editValueAt(i); + if (r.requestTimeNs >= request.requestTimeNs) { + r.transform = transform; + } + } + } else { + ALOGE("%s: Failed to calculate current stream transformation: %s " + "(%d)", __FUNCTION__, strerror(-ret), ret); + } + } else { + ALOGE("%s: Physical device orientation absent!", __FUNCTION__); } } else { - ALOGE("%s: Physical device orientation absent!", __FUNCTION__); + ALOGE("%s: Physical device not found in device info map found!", + __FUNCTION__); } - } else { - ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__); } } } diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h index 019c8a8c11..d5328c5ad4 100644 --- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h +++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h @@ -108,6 +108,7 @@ namespace camera3 { nsecs_t& minFrameDuration; bool& isFixedFps; bool overrideToPortrait; + std::string &activePhysicalId; }; void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result); diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp index 1e103f2522..1bc9ef2490 100644 --- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp +++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp @@ -210,7 +210,7 @@ status_t AidlCamera3Device::initialize(sp<CameraProviderManager> manager, // Do not override characteristics for physical cameras res = manager->getCameraCharacteristics( physicalId, /*overrideForPerfClass*/false, &mPhysicalDeviceInfoMap[physicalId], - /*overrideToPortrait*/true); + mOverrideToPortrait); if (res != OK) { SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)", physicalId.c_str(), strerror(-res), res); @@ -376,7 +376,7 @@ status_t AidlCamera3Device::initialize(sp<CameraProviderManager> manager, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps, - mOverrideToPortrait}, mResultMetadataQueue + mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue }; for (const auto& result : results) { @@ -418,7 +418,7 @@ status_t AidlCamera3Device::initialize(sp<CameraProviderManager> manager, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps, - mOverrideToPortrait}, mResultMetadataQueue + mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue }; for (const auto& msg : msgs) { camera3::notify(states, msg); diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp index 9ce0622fdc..4b1fb1d801 100644 --- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp +++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp @@ -111,6 +111,7 @@ status_t AidlCamera3OfflineSession::initialize(wp<NotificationListener> listener listener = mListener.promote(); } + std::string activePhysicalId(""); // Unused AidlCaptureOutputStates states { {mId, mOfflineReqsLock, mLastCompletedRegularFrameNumber, @@ -125,7 +126,7 @@ status_t AidlCamera3OfflineSession::initialize(wp<NotificationListener> listener mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps, - /*overrideToPortrait*/false}, mResultMetadataQueue + /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue }; std::lock_guard<std::mutex> lock(mProcessCaptureResultLock); @@ -157,6 +158,7 @@ status_t AidlCamera3OfflineSession::initialize(wp<NotificationListener> listener listener = mListener.promote(); } + std::string activePhysicalId(""); // Unused AidlCaptureOutputStates states { {mId, mOfflineReqsLock, mLastCompletedRegularFrameNumber, @@ -171,7 +173,7 @@ status_t AidlCamera3OfflineSession::initialize(wp<NotificationListener> listener mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps, - /*overrideToPortrait*/false}, mResultMetadataQueue + /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue }; for (const auto& msg : msgs) { camera3::notify(states, msg); diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp index 44c60cfe69..c675c6349e 100644 --- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp +++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp @@ -163,7 +163,7 @@ status_t HidlCamera3Device::initialize(sp<CameraProviderManager> manager, } res = manager->getCameraCharacteristics(mId.string(), mOverrideForPerfClass, &mDeviceInfo, - mOverrideToPortrait); + /*overrideToPortrait*/false); if (res != OK) { SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res); session->close(); @@ -178,7 +178,7 @@ status_t HidlCamera3Device::initialize(sp<CameraProviderManager> manager, // Do not override characteristics for physical cameras res = manager->getCameraCharacteristics( physicalId, /*overrideForPerfClass*/false, &mPhysicalDeviceInfoMap[physicalId], - /*overrideToPortrait*/true); + /*overrideToPortrait*/false); if (res != OK) { SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)", physicalId.c_str(), strerror(-res), res); @@ -365,8 +365,8 @@ hardware::Return<void> HidlCamera3Device::processCaptureResult_3_4( mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, - *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait}, - mResultMetadataQueue + *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait, + mActivePhysicalId}, mResultMetadataQueue }; //HidlCaptureOutputStates hidlStates { @@ -428,8 +428,8 @@ hardware::Return<void> HidlCamera3Device::processCaptureResult( mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, - *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait}, - mResultMetadataQueue + *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait, + mActivePhysicalId}, mResultMetadataQueue }; for (const auto& result : results) { @@ -476,8 +476,8 @@ hardware::Return<void> HidlCamera3Device::notifyHelper( mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, - *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait}, - mResultMetadataQueue + *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait, + mActivePhysicalId}, mResultMetadataQueue }; for (const auto& msg : msgs) { camera3::notify(states, msg); diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp index c7f8fa1497..0a6a6f73b0 100644 --- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp +++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp @@ -92,6 +92,7 @@ hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult_3_4( listener = mListener.promote(); } + std::string activePhysicalId(""); HidlCaptureOutputStates states { {mId, mOfflineReqsLock, mLastCompletedRegularFrameNumber, @@ -106,7 +107,7 @@ hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult_3_4( mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps, - /*overrideToPortrait*/false}, mResultMetadataQueue + /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue }; std::lock_guard<std::mutex> lock(mProcessCaptureResultLock); @@ -133,6 +134,7 @@ hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult( hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata; + std::string activePhysicalId(""); HidlCaptureOutputStates states { {mId, mOfflineReqsLock, mLastCompletedRegularFrameNumber, @@ -147,7 +149,7 @@ hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult( mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps, - /*overrideToPortrait*/false}, mResultMetadataQueue + /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue }; std::lock_guard<std::mutex> lock(mProcessCaptureResultLock); @@ -169,6 +171,7 @@ hardware::Return<void> HidlCamera3OfflineSession::notify( listener = mListener.promote(); } + std::string activePhysicalId(""); HidlCaptureOutputStates states { {mId, mOfflineReqsLock, mLastCompletedRegularFrameNumber, @@ -183,7 +186,7 @@ hardware::Return<void> HidlCamera3OfflineSession::notify( mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers, mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps, - /*overrideToPortrait*/false}, mResultMetadataQueue + /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue }; for (const auto& msg : msgs) { camera3::notify(states, msg); diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp index 259e8a5666..fc063aba66 100644 --- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp +++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp @@ -65,7 +65,7 @@ HidlCameraService::getCameraCharacteristics(const hidl_string& cameraId, HStatus status = HStatus::NO_ERROR; binder::Status serviceRet = mAidlICameraService->getCameraCharacteristics(String16(cameraId.c_str()), - /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/true, + /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/false, &cameraMetadata); HCameraMetadata hidlMetadata; if (!serviceRet.isOk()) { @@ -117,7 +117,7 @@ Return<void> HidlCameraService::connectDevice(const sp<HCameraDeviceCallback>& h binder::Status serviceRet = mAidlICameraService->connectDevice( callbacks, String16(cameraId.c_str()), String16(""), {}, hardware::ICameraService::USE_CALLING_UID, 0/*oomScoreOffset*/, - /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/true, + /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/false, /*out*/&deviceRemote); HStatus status = HStatus::NO_ERROR; if (!serviceRet.isOk()) { diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp index 09f8eb601a..120d43de82 100644 --- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp +++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp @@ -321,6 +321,7 @@ void CameraFuzzer::invokeCameraAPIs() { rc = mCameraService->connect(this, cameraId, String16(), android::CameraService::USE_CALLING_UID, android::CameraService::USE_CALLING_PID, /*targetSdkVersion*/__ANDROID_API_FUTURE__, /*overrideToPortrait*/true, + /*forceSlowJpegMode*/false, &cameraDevice); if (!rc.isOk()) { // camera not connected |