diff options
60 files changed, 1510 insertions, 154 deletions
diff --git a/camera/Android.bp b/camera/Android.bp index fa36bb31b1..21ef9eed70 100644 --- a/camera/Android.bp +++ b/camera/Android.bp @@ -14,6 +14,7 @@ cc_library_shared { name: "libcamera_client", + defaults: ["camera_parameter_library_defaults"], aidl: { export_aidl_headers: true, @@ -32,7 +33,6 @@ cc_library_shared { // Source for camera interface parcelables, and manually-written interfaces "Camera.cpp", "CameraMetadata.cpp", - "CameraParameters.cpp", "CaptureResult.cpp", "CameraParameters2.cpp", "ICamera.cpp", @@ -77,6 +77,16 @@ cc_library_shared { } +cc_library_static { + name: "libcamera_parameters", + + export_include_dirs: [ + "include", + ], + srcs: ["CameraParameters.cpp"], + +} + // AIDL interface between camera clients and the camera service. filegroup { name: "libcamera_client_aidl", diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp index 68969cf649..de8ac2ffce 100644 --- a/camera/CameraParameters.cpp +++ b/camera/CameraParameters.cpp @@ -237,6 +237,9 @@ void CameraParameters::unflatten(const String8 ¶ms) void CameraParameters::set(const char *key, const char *value) { + if (key == NULL || value == NULL) + return; + // XXX i think i can do this with strspn() if (strchr(key, '=') || strchr(key, ';')) { //XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key); diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp index bef2ea0acd..be82ff4ec6 100644 --- a/camera/ICameraClient.cpp +++ b/camera/ICameraClient.cpp @@ -51,7 +51,11 @@ public: data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor()); data.writeInt32(msgType); data.writeInt32(ext1); - data.writeInt32(ext2); + if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) { + data.writeFileDescriptor(ext2); + } else { + data.writeInt32(ext2); + } remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY); } @@ -129,8 +133,13 @@ status_t BnCameraClient::onTransact( ALOGV("NOTIFY_CALLBACK"); CHECK_INTERFACE(ICameraClient, data, reply); int32_t msgType = data.readInt32(); - int32_t ext1 = data.readInt32(); - int32_t ext2 = data.readInt32(); + int32_t ext1 = data.readInt32(); + int32_t ext2 = 0; + if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) { + ext2 = data.readFileDescriptor(); + } else { + ext2 = data.readInt32(); + } notifyCallback(msgType, ext1, ext2); return NO_ERROR; } break; diff --git a/camera/include/camera/CameraParameters2.h b/camera/include/camera/CameraParameters2.h index f691cd6ea0..5fae079376 100644 --- a/camera/include/camera/CameraParameters2.h +++ b/camera/include/camera/CameraParameters2.h @@ -19,7 +19,7 @@ #include <utils/Vector.h> #include <utils/String8.h> -#include "CameraParameters.h" +#include <camera/CameraParameters.h> namespace android { diff --git a/include/media/AudioSession.h b/include/media/AudioSession.h new file mode 120000 index 0000000000..005e48e7ed --- /dev/null +++ b/include/media/AudioSession.h @@ -0,0 +1 @@ +../../media/libaudioclient/include/media/AudioSession.h
\ No newline at end of file diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp index c7588e9a51..dd1f4858a5 100644 --- a/media/codec2/sfplugin/C2OMXNode.cpp +++ b/media/codec2/sfplugin/C2OMXNode.cpp @@ -25,6 +25,7 @@ #include <C2AllocatorGralloc.h> #include <C2BlockInternal.h> #include <C2Component.h> +#include <C2Config.h> #include <C2PlatformSupport.h> #include <OMX_Component.h> @@ -44,6 +45,8 @@ namespace android { namespace { +constexpr OMX_U32 kPortIndexInput = 0; + class Buffer2D : public C2Buffer { public: explicit Buffer2D(C2ConstGraphicBlock block) : C2Buffer({ block }) {} @@ -200,11 +203,27 @@ status_t C2OMXNode::getParameter(OMX_INDEXTYPE index, void *params, size_t size) return BAD_VALUE; } OMX_PARAM_PORTDEFINITIONTYPE *pDef = (OMX_PARAM_PORTDEFINITIONTYPE *)params; - // TODO: read these from intf() + if (pDef->nPortIndex != kPortIndexInput) { + break; + } + pDef->nBufferCountActual = 16; + + std::shared_ptr<Codec2Client::Component> comp = mComp.lock(); + C2PortActualDelayTuning::input inputDelay(0); + C2ActualPipelineDelayTuning pipelineDelay(0); + c2_status_t c2err = comp->query( + {&inputDelay, &pipelineDelay}, {}, C2_DONT_BLOCK, nullptr); + if (c2err == C2_OK || c2err == C2_BAD_INDEX) { + pDef->nBufferCountActual = 4; + pDef->nBufferCountActual += (inputDelay ? inputDelay.value : 0u); + pDef->nBufferCountActual += (pipelineDelay ? pipelineDelay.value : 0u); + } + pDef->eDomain = OMX_PortDomainVideo; pDef->format.video.nFrameWidth = mWidth; pDef->format.video.nFrameHeight = mHeight; + pDef->format.video.eColorFormat = OMX_COLOR_FormatAndroidOpaque; err = OK; break; } diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp index 54107bd852..55ff18f4a2 100644 --- a/media/codec2/sfplugin/CCodec.cpp +++ b/media/codec2/sfplugin/CCodec.cpp @@ -246,8 +246,24 @@ public: if (source == nullptr) { return NO_INIT; } - constexpr size_t kNumSlots = 16; - for (size_t i = 0; i < kNumSlots; ++i) { + + size_t numSlots = 16; + // WORKAROUND: having more slots improve performance while consuming + // more memory. This is a temporary workaround to reduce memory for + // larger-than-4K scenario. + if (mWidth * mHeight > 4096 * 2340) { + constexpr OMX_U32 kPortIndexInput = 0; + + OMX_PARAM_PORTDEFINITIONTYPE param; + param.nPortIndex = kPortIndexInput; + status_t err = mNode->getParameter(OMX_IndexParamPortDefinition, + ¶m, sizeof(param)); + if (err == OK) { + numSlots = param.nBufferCountActual; + } + } + + for (size_t i = 0; i < numSlots; ++i) { source->onInputBufferAdded(i); } diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp index bddaa9f22b..566a18fbee 100644 --- a/media/codec2/sfplugin/CCodecBuffers.cpp +++ b/media/codec2/sfplugin/CCodecBuffers.cpp @@ -91,9 +91,14 @@ void CCodecBuffers::handleImageData(const sp<Codec2Buffer> &buffer) { newFormat->setInt32(KEY_STRIDE, stride); ALOGD("[%s] updating stride = %d", mName, stride); if (img->mNumPlanes > 1 && stride > 0) { - int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride; + int64_t offsetDelta = + (int64_t)img->mPlane[1].mOffset - (int64_t)img->mPlane[0].mOffset; + int32_t vstride = int32_t(offsetDelta / stride); newFormat->setInt32(KEY_SLICE_HEIGHT, vstride); ALOGD("[%s] updating vstride = %d", mName, vstride); + buffer->setRange( + img->mPlane[0].mOffset, + buffer->size() - img->mPlane[0].mOffset); } } setFormat(newFormat); diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h index 4772ab53eb..c383a7ce1a 100644 --- a/media/codec2/sfplugin/CCodecBuffers.h +++ b/media/codec2/sfplugin/CCodecBuffers.h @@ -33,8 +33,8 @@ class MemoryDealer; class SkipCutBuffer; constexpr size_t kLinearBufferSize = 1048576; -// This can fit 4K RGBA frame, and most likely client won't need more than this. -constexpr size_t kMaxLinearBufferSize = 4096 * 2304 * 4; +// This can fit an 8K frame. +constexpr size_t kMaxLinearBufferSize = 7680 * 4320 * 2; /** * Base class for representation of buffers at one port. diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp index 25e7da9206..19414a0a0c 100644 --- a/media/codec2/sfplugin/Codec2Buffer.cpp +++ b/media/codec2/sfplugin/Codec2Buffer.cpp @@ -276,20 +276,22 @@ public: int32_t planeSize = 0; for (uint32_t i = 0; i < layout.numPlanes; ++i) { const C2PlaneInfo &plane = layout.planes[i]; - ssize_t minOffset = plane.minOffset(mWidth, mHeight); - ssize_t maxOffset = plane.maxOffset(mWidth, mHeight); + int64_t planeStride = std::abs(plane.rowInc / plane.colInc); + ssize_t minOffset = plane.minOffset( + mWidth / plane.colSampling, mHeight / plane.rowSampling); + ssize_t maxOffset = plane.maxOffset( + mWidth / plane.colSampling, mHeight / plane.rowSampling); if (minPtr > mView.data()[i] + minOffset) { minPtr = mView.data()[i] + minOffset; } if (maxPtr < mView.data()[i] + maxOffset) { maxPtr = mView.data()[i] + maxOffset; } - planeSize += std::abs(plane.rowInc) * align(mHeight, 64) - / plane.rowSampling / plane.colSampling - * divUp(mAllocatedDepth, 8u); + planeSize += planeStride * divUp(mAllocatedDepth, 8u) + * align(mHeight, 64) / plane.rowSampling; } - if ((maxPtr - minPtr + 1) <= planeSize) { + if (minPtr == mView.data()[0] && (maxPtr - minPtr + 1) <= planeSize) { // FIXME: this is risky as reading/writing data out of bound results // in an undefined behavior, but gralloc does assume a // contiguous mapping diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h index bb35763f41..479acb109b 100644 --- a/media/codec2/sfplugin/InputSurfaceWrapper.h +++ b/media/codec2/sfplugin/InputSurfaceWrapper.h @@ -61,24 +61,24 @@ public: /// Input Surface configuration struct Config { // IN PARAMS (GBS) - float mMinFps; // minimum fps (repeat frame to achieve this) - float mMaxFps; // max fps (via frame drop) - float mCaptureFps; // capture fps - float mCodedFps; // coded fps - bool mSuspended; // suspended - int64_t mTimeOffsetUs; // time offset (input => codec) - int64_t mSuspendAtUs; // suspend/resume time - int64_t mStartAtUs; // start time - bool mStopped; // stopped - int64_t mStopAtUs; // stop time + float mMinFps = 0.0; // minimum fps (repeat frame to achieve this) + float mMaxFps = 0.0; // max fps (via frame drop) + float mCaptureFps = 0.0; // capture fps + float mCodedFps = 0.0; // coded fps + bool mSuspended = false; // suspended + int64_t mTimeOffsetUs = 0; // time offset (input => codec) + int64_t mSuspendAtUs = 0; // suspend/resume time + int64_t mStartAtUs = 0; // start time + bool mStopped = false; // stopped + int64_t mStopAtUs = 0; // stop time // OUT PARAMS (GBS) - int64_t mInputDelayUs; // delay between encoder input and surface input + int64_t mInputDelayUs = 0; // delay between encoder input and surface input // IN PARAMS (CODEC WRAPPER) - float mFixedAdjustedFps; // fixed fps via PTS manipulation - float mMinAdjustedFps; // minimum fps via PTS manipulation - uint64_t mUsage; // consumer usage + float mFixedAdjustedFps = 0.0; // fixed fps via PTS manipulation + float mMinAdjustedFps = 0.0; // minimum fps via PTS manipulation + uint64_t mUsage = 0; // consumer usage }; /** diff --git a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp index 5bee605276..ad8f6e555b 100644 --- a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp +++ b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp @@ -18,22 +18,31 @@ #include <gtest/gtest.h> +#include <media/stagefright/foundation/AString.h> #include <media/stagefright/MediaCodecConstants.h> +#include <C2BlockInternal.h> #include <C2PlatformSupport.h> namespace android { +static std::shared_ptr<RawGraphicOutputBuffers> GetRawGraphicOutputBuffers( + int32_t width, int32_t height) { + std::shared_ptr<RawGraphicOutputBuffers> buffers = + std::make_shared<RawGraphicOutputBuffers>("test"); + sp<AMessage> format{new AMessage}; + format->setInt32(KEY_WIDTH, width); + format->setInt32(KEY_HEIGHT, height); + buffers->setFormat(format); + return buffers; +} + TEST(RawGraphicOutputBuffersTest, ChangeNumSlots) { constexpr int32_t kWidth = 3840; constexpr int32_t kHeight = 2160; std::shared_ptr<RawGraphicOutputBuffers> buffers = - std::make_shared<RawGraphicOutputBuffers>("test"); - sp<AMessage> format{new AMessage}; - format->setInt32("width", kWidth); - format->setInt32("height", kHeight); - buffers->setFormat(format); + GetRawGraphicOutputBuffers(kWidth, kHeight); std::shared_ptr<C2BlockPool> pool; ASSERT_EQ(OK, GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, nullptr, &pool)); @@ -96,4 +105,435 @@ TEST(RawGraphicOutputBuffersTest, ChangeNumSlots) { } } +class TestGraphicAllocation : public C2GraphicAllocation { +public: + TestGraphicAllocation( + uint32_t width, + uint32_t height, + const C2PlanarLayout &layout, + size_t capacity, + std::vector<size_t> offsets) + : C2GraphicAllocation(width, height), + mLayout(layout), + mMemory(capacity, 0xAA), + mOffsets(offsets) { + } + + c2_status_t map( + C2Rect rect, C2MemoryUsage usage, C2Fence *fence, + C2PlanarLayout *layout, uint8_t **addr) override { + (void)rect; + (void)usage; + (void)fence; + *layout = mLayout; + for (size_t i = 0; i < mLayout.numPlanes; ++i) { + addr[i] = mMemory.data() + mOffsets[i]; + } + return C2_OK; + } + + c2_status_t unmap(uint8_t **, C2Rect, C2Fence *) override { return C2_OK; } + + C2Allocator::id_t getAllocatorId() const override { return -1; } + + const C2Handle *handle() const override { return nullptr; } + + bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) const override { + return other.get() == this; + } + +private: + C2PlanarLayout mLayout; + std::vector<uint8_t> mMemory; + std::vector<uint8_t *> mAddr; + std::vector<size_t> mOffsets; +}; + +class LayoutTest : public ::testing::TestWithParam<std::tuple<bool, std::string, bool, int32_t>> { +private: + static C2PlanarLayout YUVPlanarLayout(int32_t stride) { + C2PlanarLayout layout = { + C2PlanarLayout::TYPE_YUV, + 3, /* numPlanes */ + 3, /* rootPlanes */ + {}, /* planes --- to be filled below */ + }; + layout.planes[C2PlanarLayout::PLANE_Y] = { + C2PlaneInfo::CHANNEL_Y, + 1, /* colInc */ + stride, /* rowInc */ + 1, /* colSampling */ + 1, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_Y, /* rootIx */ + 0, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_U] = { + C2PlaneInfo::CHANNEL_CB, + 1, /* colInc */ + stride / 2, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_U, /* rootIx */ + 0, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_V] = { + C2PlaneInfo::CHANNEL_CR, + 1, /* colInc */ + stride / 2, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_V, /* rootIx */ + 0, /* offset */ + }; + return layout; + } + + static C2PlanarLayout YUVSemiPlanarLayout(int32_t stride) { + C2PlanarLayout layout = { + C2PlanarLayout::TYPE_YUV, + 3, /* numPlanes */ + 2, /* rootPlanes */ + {}, /* planes --- to be filled below */ + }; + layout.planes[C2PlanarLayout::PLANE_Y] = { + C2PlaneInfo::CHANNEL_Y, + 1, /* colInc */ + stride, /* rowInc */ + 1, /* colSampling */ + 1, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_Y, /* rootIx */ + 0, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_U] = { + C2PlaneInfo::CHANNEL_CB, + 2, /* colInc */ + stride, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_U, /* rootIx */ + 0, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_V] = { + C2PlaneInfo::CHANNEL_CR, + 2, /* colInc */ + stride, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_U, /* rootIx */ + 1, /* offset */ + }; + return layout; + } + + static C2PlanarLayout YVUSemiPlanarLayout(int32_t stride) { + C2PlanarLayout layout = { + C2PlanarLayout::TYPE_YUV, + 3, /* numPlanes */ + 2, /* rootPlanes */ + {}, /* planes --- to be filled below */ + }; + layout.planes[C2PlanarLayout::PLANE_Y] = { + C2PlaneInfo::CHANNEL_Y, + 1, /* colInc */ + stride, /* rowInc */ + 1, /* colSampling */ + 1, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_Y, /* rootIx */ + 0, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_U] = { + C2PlaneInfo::CHANNEL_CB, + 2, /* colInc */ + stride, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_V, /* rootIx */ + 1, /* offset */ + }; + layout.planes[C2PlanarLayout::PLANE_V] = { + C2PlaneInfo::CHANNEL_CR, + 2, /* colInc */ + stride, /* rowInc */ + 2, /* colSampling */ + 2, /* rowSampling */ + 8, /* allocatedDepth */ + 8, /* bitDepth */ + 0, /* rightShift */ + C2PlaneInfo::NATIVE, + C2PlanarLayout::PLANE_V, /* rootIx */ + 0, /* offset */ + }; + return layout; + } + + static std::shared_ptr<C2GraphicBlock> CreateGraphicBlock( + uint32_t width, + uint32_t height, + const C2PlanarLayout &layout, + size_t capacity, + std::vector<size_t> offsets) { + std::shared_ptr<C2GraphicAllocation> alloc = std::make_shared<TestGraphicAllocation>( + width, + height, + layout, + capacity, + offsets); + + return _C2BlockFactory::CreateGraphicBlock(alloc); + } + + static constexpr uint8_t GetPixelValue(uint8_t value, uint32_t row, uint32_t col) { + return (uint32_t(value) * row + col) & 0xFF; + } + + static void FillPlane(C2GraphicView &view, size_t index, uint8_t value) { + C2PlanarLayout layout = view.layout(); + + uint8_t *rowPtr = view.data()[index]; + C2PlaneInfo plane = layout.planes[index]; + for (uint32_t row = 0; row < view.height() / plane.rowSampling; ++row) { + uint8_t *colPtr = rowPtr; + for (uint32_t col = 0; col < view.width() / plane.colSampling; ++col) { + *colPtr = GetPixelValue(value, row, col); + colPtr += plane.colInc; + } + rowPtr += plane.rowInc; + } + } + + static void FillBlock(const std::shared_ptr<C2GraphicBlock> &block) { + C2GraphicView view = block->map().get(); + + FillPlane(view, C2PlanarLayout::PLANE_Y, 'Y'); + FillPlane(view, C2PlanarLayout::PLANE_U, 'U'); + FillPlane(view, C2PlanarLayout::PLANE_V, 'V'); + } + + static bool VerifyPlane( + const MediaImage2 *mediaImage, + const uint8_t *base, + uint32_t index, + uint8_t value, + std::string *errorMsg) { + *errorMsg = ""; + MediaImage2::PlaneInfo plane = mediaImage->mPlane[index]; + const uint8_t *rowPtr = base + plane.mOffset; + for (uint32_t row = 0; row < mediaImage->mHeight / plane.mVertSubsampling; ++row) { + const uint8_t *colPtr = rowPtr; + for (uint32_t col = 0; col < mediaImage->mWidth / plane.mHorizSubsampling; ++col) { + if (GetPixelValue(value, row, col) != *colPtr) { + *errorMsg = AStringPrintf("row=%u col=%u expected=%02x actual=%02x", + row, col, GetPixelValue(value, row, col), *colPtr).c_str(); + return false; + } + colPtr += plane.mColInc; + } + rowPtr += plane.mRowInc; + } + return true; + } + +public: + static constexpr int32_t kWidth = 320; + static constexpr int32_t kHeight = 240; + static constexpr int32_t kGapLength = kWidth * kHeight * 10; + + static std::shared_ptr<C2Buffer> CreateAndFillBufferFromParam(const ParamType ¶m) { + bool contiguous = std::get<0>(param); + std::string planeOrderStr = std::get<1>(param); + bool planar = std::get<2>(param); + int32_t stride = std::get<3>(param); + + C2PlanarLayout::plane_index_t planeOrder[3]; + C2PlanarLayout layout; + + if (planeOrderStr.size() != 3) { + return nullptr; + } + for (size_t i = 0; i < 3; ++i) { + C2PlanarLayout::plane_index_t planeIndex; + switch (planeOrderStr[i]) { + case 'Y': planeIndex = C2PlanarLayout::PLANE_Y; break; + case 'U': planeIndex = C2PlanarLayout::PLANE_U; break; + case 'V': planeIndex = C2PlanarLayout::PLANE_V; break; + default: return nullptr; + } + planeOrder[i] = planeIndex; + } + + if (planar) { + layout = YUVPlanarLayout(stride); + } else { // semi-planar + for (size_t i = 0; i < 3; ++i) { + if (planeOrder[i] == C2PlanarLayout::PLANE_U) { + layout = YUVSemiPlanarLayout(stride); + break; + } + if (planeOrder[i] == C2PlanarLayout::PLANE_V) { + layout = YVUSemiPlanarLayout(stride); + break; + } + } + } + + size_t yPlaneSize = stride * kHeight; + size_t uvPlaneSize = stride * kHeight / 4; + size_t capacity = yPlaneSize + uvPlaneSize * 2; + std::vector<size_t> offsets(3); + + if (!contiguous) { + if (planar) { + capacity += kGapLength * 2; + } else { // semi-planar + capacity += kGapLength; + } + } + + offsets[planeOrder[0]] = 0; + size_t planeSize = (planeOrder[0] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize; + for (size_t i = 1; i < 3; ++i) { + offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + planeSize; + if (!contiguous) { + offsets[planeOrder[i]] += kGapLength; + } + planeSize = (planeOrder[i] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize; + if (!planar // semi-planar + && planeOrder[i - 1] != C2PlanarLayout::PLANE_Y + && planeOrder[i] != C2PlanarLayout::PLANE_Y) { + offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + 1; + planeSize = uvPlaneSize * 2 - 1; + } + } + + std::shared_ptr<C2GraphicBlock> block = CreateGraphicBlock( + kWidth, + kHeight, + layout, + capacity, + offsets); + FillBlock(block); + return C2Buffer::CreateGraphicBuffer( + block->share(block->crop(), C2Fence())); + } + + static bool VerifyClientBuffer( + const sp<MediaCodecBuffer> &buffer, std::string *errorMsg) { + *errorMsg = ""; + sp<ABuffer> imageData; + if (!buffer->format()->findBuffer("image-data", &imageData)) { + *errorMsg = "Missing image data"; + return false; + } + MediaImage2 *mediaImage = (MediaImage2 *)imageData->data(); + if (mediaImage->mType != MediaImage2::MEDIA_IMAGE_TYPE_YUV) { + *errorMsg = AStringPrintf("Unexpected type: %d", mediaImage->mType).c_str(); + return false; + } + std::string planeErrorMsg; + if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::Y, 'Y', &planeErrorMsg)) { + *errorMsg = "Y plane does not match: " + planeErrorMsg; + return false; + } + if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::U, 'U', &planeErrorMsg)) { + *errorMsg = "U plane does not match: " + planeErrorMsg; + return false; + } + if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::V, 'V', &planeErrorMsg)) { + *errorMsg = "V plane does not match: " + planeErrorMsg; + return false; + } + + int32_t width, height, stride; + buffer->format()->findInt32(KEY_WIDTH, &width); + buffer->format()->findInt32(KEY_HEIGHT, &height); + buffer->format()->findInt32(KEY_STRIDE, &stride); + + MediaImage2 legacyYLayout = { + MediaImage2::MEDIA_IMAGE_TYPE_Y, + 1, // mNumPlanes + uint32_t(width), + uint32_t(height), + 8, + 8, + {}, // mPlane + }; + legacyYLayout.mPlane[MediaImage2::Y] = { + 0, // mOffset + 1, // mColInc + stride, // mRowInc + 1, // mHorizSubsampling + 1, // mVertSubsampling + }; + if (!VerifyPlane(&legacyYLayout, buffer->data(), MediaImage2::Y, 'Y', &planeErrorMsg)) { + *errorMsg = "Y plane by legacy layout does not match: " + planeErrorMsg; + return false; + } + return true; + } + +}; + +TEST_P(LayoutTest, VerifyLayout) { + std::shared_ptr<RawGraphicOutputBuffers> buffers = + GetRawGraphicOutputBuffers(kWidth, kHeight); + + std::shared_ptr<C2Buffer> c2Buffer = CreateAndFillBufferFromParam(GetParam()); + ASSERT_NE(nullptr, c2Buffer); + sp<MediaCodecBuffer> clientBuffer; + size_t index; + ASSERT_EQ(OK, buffers->registerBuffer(c2Buffer, &index, &clientBuffer)); + ASSERT_NE(nullptr, clientBuffer); + std::string errorMsg; + ASSERT_TRUE(VerifyClientBuffer(clientBuffer, &errorMsg)) << errorMsg; +} + +INSTANTIATE_TEST_SUITE_P( + RawGraphicOutputBuffersTest, + LayoutTest, + ::testing::Combine( + ::testing::Bool(), /* contiguous */ + ::testing::Values("YUV", "YVU", "UVY", "VUY"), + ::testing::Bool(), /* planar */ + ::testing::Values(320, 512)), + [](const ::testing::TestParamInfo<LayoutTest::ParamType> &info) { + std::string contiguous = std::get<0>(info.param) ? "Contiguous" : "Noncontiguous"; + std::string planar = std::get<2>(info.param) ? "Planar" : "SemiPlanar"; + return contiguous + + std::get<1>(info.param) + + planar + + std::to_string(std::get<3>(info.param)); + }); + } // namespace android diff --git a/media/codec2/vndk/C2AllocatorBlob.cpp b/media/codec2/vndk/C2AllocatorBlob.cpp index 50c9e59af8..aa054998f8 100644 --- a/media/codec2/vndk/C2AllocatorBlob.cpp +++ b/media/codec2/vndk/C2AllocatorBlob.cpp @@ -17,6 +17,8 @@ // #define LOG_NDEBUG 0 #define LOG_TAG "C2AllocatorBlob" +#include <set> + #include <C2AllocatorBlob.h> #include <C2PlatformSupport.h> @@ -67,6 +69,10 @@ public: private: const std::shared_ptr<C2GraphicAllocation> mGraphicAllocation; const C2Allocator::id_t mAllocatorId; + + std::mutex mMapLock; + std::multiset<std::pair<size_t, size_t>> mMappedOffsetSize; + uint8_t *mMappedAddr; }; C2AllocationBlob::C2AllocationBlob( @@ -74,20 +80,74 @@ C2AllocationBlob::C2AllocationBlob( C2Allocator::id_t allocatorId) : C2LinearAllocation(capacity), mGraphicAllocation(std::move(graphicAllocation)), - mAllocatorId(allocatorId) {} + mAllocatorId(allocatorId), + mMappedAddr(nullptr) {} -C2AllocationBlob::~C2AllocationBlob() {} +C2AllocationBlob::~C2AllocationBlob() { + if (mMappedAddr) { + C2Rect rect(capacity(), kLinearBufferHeight); + mGraphicAllocation->unmap(&mMappedAddr, rect, nullptr); + } +} c2_status_t C2AllocationBlob::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence, void** addr /* nonnull */) { + *addr = nullptr; + if (size > capacity() || offset > capacity() || offset > capacity() - size) { + ALOGV("C2AllocationBlob: map: bad offset / size: offset=%zu size=%zu capacity=%u", + offset, size, capacity()); + return C2_BAD_VALUE; + } + std::unique_lock<std::mutex> lock(mMapLock); + if (mMappedAddr) { + *addr = mMappedAddr + offset; + mMappedOffsetSize.insert({offset, size}); + ALOGV("C2AllocationBlob: mapped from existing mapping: offset=%zu size=%zu capacity=%u", + offset, size, capacity()); + return C2_OK; + } C2PlanarLayout layout; - C2Rect rect = C2Rect(size, kLinearBufferHeight).at(offset, 0u); - return mGraphicAllocation->map(rect, usage, fence, &layout, reinterpret_cast<uint8_t**>(addr)); + C2Rect rect = C2Rect(capacity(), kLinearBufferHeight); + c2_status_t err = mGraphicAllocation->map(rect, usage, fence, &layout, &mMappedAddr); + if (err != C2_OK) { + ALOGV("C2AllocationBlob: map failed: offset=%zu size=%zu capacity=%u err=%d", + offset, size, capacity(), err); + mMappedAddr = nullptr; + return err; + } + *addr = mMappedAddr + offset; + mMappedOffsetSize.insert({offset, size}); + ALOGV("C2AllocationBlob: new map succeeded: offset=%zu size=%zu capacity=%u", + offset, size, capacity()); + return C2_OK; } c2_status_t C2AllocationBlob::unmap(void* addr, size_t size, C2Fence* fenceFd) { - C2Rect rect(size, kLinearBufferHeight); - return mGraphicAllocation->unmap(reinterpret_cast<uint8_t**>(&addr), rect, fenceFd); + std::unique_lock<std::mutex> lock(mMapLock); + uint8_t *u8Addr = static_cast<uint8_t *>(addr); + if (u8Addr < mMappedAddr || mMappedAddr + capacity() < u8Addr + size) { + ALOGV("C2AllocationBlob: unmap: Bad addr / size: addr=%p size=%zu capacity=%u", + addr, size, capacity()); + return C2_BAD_VALUE; + } + auto it = mMappedOffsetSize.find(std::make_pair(u8Addr - mMappedAddr, size)); + if (it == mMappedOffsetSize.end()) { + ALOGV("C2AllocationBlob: unrecognized map: addr=%p size=%zu capacity=%u", + addr, size, capacity()); + return C2_BAD_VALUE; + } + mMappedOffsetSize.erase(it); + if (!mMappedOffsetSize.empty()) { + ALOGV("C2AllocationBlob: still maintain mapping: addr=%p size=%zu capacity=%u", + addr, size, capacity()); + return C2_OK; + } + C2Rect rect(capacity(), kLinearBufferHeight); + c2_status_t err = mGraphicAllocation->unmap(&mMappedAddr, rect, fenceFd); + ALOGV("C2AllocationBlob: last unmap: addr=%p size=%zu capacity=%u err=%d", + addr, size, capacity(), err); + mMappedAddr = nullptr; + return err; } /* ====================================== BLOB ALLOCATOR ====================================== */ diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp index e26a83160a..bf98822b53 100644 --- a/media/libaudioclient/AudioSystem.cpp +++ b/media/libaudioclient/AudioSystem.cpp @@ -43,6 +43,7 @@ Mutex AudioSystem::gLockAPS; sp<IAudioFlinger> AudioSystem::gAudioFlinger; sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient; std::set<audio_error_callback> AudioSystem::gAudioErrorCallbacks; +audio_session_callback AudioSystem::gAudioSessionCallback = NULL; dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL; record_config_callback AudioSystem::gRecordConfigCallback = NULL; @@ -753,6 +754,17 @@ status_t AudioSystem::AudioFlingerClient::removeAudioDeviceCallback( gRecordConfigCallback = cb; } +/*static*/ status_t AudioSystem::setAudioSessionCallback(audio_session_callback cb) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + + Mutex::Autolock _l(gLock); + gAudioSessionCallback = cb; + + return NO_ERROR; +} + // client singleton for AudioPolicyService binder interface // protected by gLockAPS sp<IAudioPolicyService> AudioSystem::gAudioPolicyService; @@ -1812,6 +1824,32 @@ void AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate( } } +// --------------------------------------------------------------------------- + +status_t AudioSystem::listAudioSessions(audio_stream_type_t stream, + Vector< sp<AudioSessionInfo>> &sessions) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + return aps->listAudioSessions(stream, sessions); +} + +void AudioSystem::AudioPolicyServiceClient::onOutputSessionEffectsUpdate( + sp<AudioSessionInfo>& info, bool added) +{ + ALOGV("AudioPolicyServiceClient::onOutputSessionEffectsUpdate(%d, %d, %d)", + info->mStream, info->mSessionId, added); + audio_session_callback cb = NULL; + { + Mutex::Autolock _l(AudioSystem::gLock); + cb = gAudioSessionCallback; + } + + if (cb != NULL) { + cb(AUDIO_OUTPUT_SESSION_EFFECTS_UPDATE, info, added); + } +} + void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused) { { diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp index 43a5369220..223ebbbfa4 100644 --- a/media/libaudioclient/IAudioPolicyService.cpp +++ b/media/libaudioclient/IAudioPolicyService.cpp @@ -120,6 +120,7 @@ enum { AUDIO_MODULES_UPDATED, // oneway SET_CURRENT_IME_UID, REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER, + LIST_AUDIO_SESSIONS, }; #define MAX_ITEMS_PER_LIST 1024 @@ -1496,6 +1497,29 @@ public: if (status != NO_ERROR) return status; return NO_ERROR; } + + virtual status_t listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.writeInt32(streams); + status_t status = remote()->transact(LIST_AUDIO_SESSIONS, data, &reply); + if (status != NO_ERROR) { + return status; + } + + status = reply.readInt32(); + if (status == NO_ERROR) { + size_t size = (size_t)reply.readUint32(); + for (size_t i = 0; i < size && reply.dataAvail() > 0; i++) { + sp<AudioSessionInfo> info = new AudioSessionInfo(); + info->readFromParcel(reply); + sessions.push_back(info); + } + } + return status; + } }; IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService"); @@ -1570,7 +1594,8 @@ status_t BnAudioPolicyService::onTransact( case SET_ALLOWED_CAPTURE_POLICY: case AUDIO_MODULES_UPDATED: case SET_CURRENT_IME_UID: - case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER: { + case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER: + case LIST_AUDIO_SESSIONS: { if (!isServiceUid(IPCThreadState::self()->getCallingUid())) { ALOGW("%s: transaction %d received from PID %d unauthorized UID %d", __func__, code, IPCThreadState::self()->getCallingPid(), @@ -2231,6 +2256,23 @@ status_t BnAudioPolicyService::onTransact( return NO_ERROR; } break; + case LIST_AUDIO_SESSIONS: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + audio_stream_type_t streams = (audio_stream_type_t)data.readInt32(); + + Vector< sp<AudioSessionInfo>> sessions; + status_t status = listAudioSessions(streams, sessions); + + reply->writeInt32(status); + if (status == NO_ERROR) { + reply->writeUint32(static_cast<uint32_t>(sessions.size())); + for (size_t i = 0; i < sessions.size(); i++) { + sessions[i]->writeToParcel(reply); + } + } + return NO_ERROR; + } + case ACQUIRE_SOUNDTRIGGER_SESSION: { CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_session_t session = AUDIO_SESSION_NONE; diff --git a/media/libaudioclient/IAudioPolicyServiceClient.cpp b/media/libaudioclient/IAudioPolicyServiceClient.cpp index 0f9580c3ff..eeefff9551 100644 --- a/media/libaudioclient/IAudioPolicyServiceClient.cpp +++ b/media/libaudioclient/IAudioPolicyServiceClient.cpp @@ -33,6 +33,7 @@ enum { MIX_STATE_UPDATE, RECORDING_CONFIGURATION_UPDATE, VOLUME_GROUP_CHANGED, + OUTPUT_SESSION_EFFECTS_UPDATE, }; // ---------------------------------------------------------------------- @@ -149,6 +150,19 @@ public: data.writeInt32((int32_t) source); remote()->transact(RECORDING_CONFIGURATION_UPDATE, data, &reply, IBinder::FLAG_ONEWAY); } + + void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor()); + data.writeInt32(info->mStream); + data.writeInt32(info->mSessionId); + data.writeInt32(info->mFlags); + data.writeInt32(info->mChannelMask); + data.writeInt32(info->mUid); + data.writeInt32(added ? 1 : 0); + remote()->transact(OUTPUT_SESSION_EFFECTS_UPDATE, data, &reply, IBinder::FLAG_ONEWAY); + } }; IMPLEMENT_META_INTERFACE(AudioPolicyServiceClient, "android.media.IAudioPolicyServiceClient"); @@ -202,6 +216,20 @@ status_t BnAudioPolicyServiceClient::onTransact( &deviceConfig, effects, patchHandle, source); return NO_ERROR; } break; + case OUTPUT_SESSION_EFFECTS_UPDATE: { + CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply); + audio_stream_type_t stream = static_cast<audio_stream_type_t>(data.readInt32()); + audio_session_t sessionId = static_cast<audio_session_t>(data.readInt32()); + audio_output_flags_t flags = static_cast<audio_output_flags_t>(data.readInt32()); + audio_channel_mask_t channelMask = static_cast<audio_channel_mask_t>(data.readInt32()); + uid_t uid = static_cast<uid_t>(data.readInt32()); + bool added = data.readInt32() > 0; + + sp<AudioSessionInfo> info = new AudioSessionInfo( + sessionId, stream, flags, channelMask, uid); + onOutputSessionEffectsUpdate(info, added); + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h index 00fe278951..b7474fbf0e 100644 --- a/media/libaudioclient/include/media/AudioPolicy.h +++ b/media/libaudioclient/include/media/AudioPolicy.h @@ -25,6 +25,7 @@ #include <utils/String8.h> #include <utils/Vector.h> #include <cutils/multiuser.h> +#include <media/AudioSession.h> namespace android { @@ -48,6 +49,7 @@ namespace android { // AudioSystem's implementation of the AudioPolicyClient interface // keep in sync with AudioSystem.java #define DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE 0 +#define AUDIO_OUTPUT_SESSION_EFFECTS_UPDATE 10 #define MIX_STATE_DISABLED (-1) #define MIX_STATE_IDLE 0 diff --git a/media/libaudioclient/include/media/AudioSession.h b/media/libaudioclient/include/media/AudioSession.h new file mode 100644 index 0000000000..2bae5212cf --- /dev/null +++ b/media/libaudioclient/include/media/AudioSession.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2016 The CyanogenMod Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIOSESSION_H +#define ANDROID_AUDIOSESSION_H + +#include <stdint.h> +#include <sys/types.h> + +#include <system/audio.h> + +#include <utils/RefBase.h> +#include <utils/Errors.h> +#include <binder/Parcel.h> + +namespace android { + +// class to store streaminfo +class AudioSessionInfo : public RefBase { +public: + AudioSessionInfo(audio_session_t session, audio_stream_type_t stream, audio_output_flags_t flags, + audio_channel_mask_t channelMask, uid_t uid) : + mSessionId(session), mStream(stream), mFlags(flags), mChannelMask(channelMask), + mUid(uid), mRefCount(0) {} + + AudioSessionInfo() : mSessionId((audio_session_t) 0), mStream(AUDIO_STREAM_DEFAULT), mFlags(AUDIO_OUTPUT_FLAG_NONE), mChannelMask(AUDIO_CHANNEL_NONE), mUid(0) {} + + /*virtual*/ ~AudioSessionInfo() {} + + audio_session_t mSessionId; + audio_stream_type_t mStream; + audio_output_flags_t mFlags; + audio_channel_mask_t mChannelMask; + uid_t mUid; + + // AudioPolicyManager keeps mLock, no need for lock on reference count here + int mRefCount; + + void readFromParcel(const Parcel &parcel) { + mSessionId = (audio_session_t) parcel.readInt32(); + mStream = static_cast<audio_stream_type_t>(parcel.readInt32()); + mFlags = static_cast<audio_output_flags_t>(parcel.readInt32()); + mChannelMask = static_cast<audio_channel_mask_t>(parcel.readInt32()); + mUid = static_cast<uid_t>(parcel.readInt32()); + } + + void writeToParcel(Parcel *parcel) const { + parcel->writeInt32(mSessionId); + parcel->writeInt32(mStream); + parcel->writeInt32(mFlags); + parcel->writeInt32(mChannelMask); + parcel->writeInt32(mUid); + } +}; + +}; // namespace android + +#endif // ANDROID_AUDIOSESSION_H diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h index 19c2cbd165..dfd8b45a25 100644 --- a/media/libaudioclient/include/media/AudioSystem.h +++ b/media/libaudioclient/include/media/AudioSystem.h @@ -47,6 +47,8 @@ typedef void (*record_config_callback)(int event, std::vector<effect_descriptor_t> effects, audio_patch_handle_t patchHandle, audio_source_t source); +typedef void (*audio_session_callback)(int event, + sp<AudioSessionInfo>& session, bool added); class IAudioFlinger; class IAudioPolicyService; @@ -120,6 +122,7 @@ public: static void setDynPolicyCallback(dynamic_policy_callback cb); static void setRecordConfigCallback(record_config_callback); + static status_t setAudioSessionCallback(audio_session_callback cb); // helper function to obtain AudioFlinger service handle static const sp<IAudioFlinger> get_audio_flinger(); @@ -460,6 +463,9 @@ public: static status_t registerSoundTriggerCaptureStateListener( const sp<CaptureStateListener>& listener); + static status_t listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions); + // ---------------------------------------------------------------------------- class AudioVolumeGroupCallback : public RefBase @@ -593,6 +599,7 @@ private: std::vector<effect_descriptor_t> effects, audio_patch_handle_t patchHandle, audio_source_t source); + virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added); private: Mutex mLock; @@ -619,6 +626,7 @@ private: static std::set<audio_error_callback> gAudioErrorCallbacks; static dynamic_policy_callback gDynPolicyCallback; static record_config_callback gRecordConfigCallback; + static audio_session_callback gAudioSessionCallback; static size_t gInBuffSize; // previous parameters for recording buffer size queries diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h index 376c6eb6e6..b1001a89a2 100644 --- a/media/libaudioclient/include/media/IAudioPolicyService.h +++ b/media/libaudioclient/include/media/IAudioPolicyService.h @@ -254,6 +254,9 @@ public: virtual status_t registerSoundTriggerCaptureStateListener( const sp<media::ICaptureStateListener>& listener, bool* result) = 0; + + virtual status_t listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions) = 0; }; diff --git a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h index 47b31eedc3..bc36f749d3 100644 --- a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h +++ b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h @@ -25,6 +25,7 @@ #include <system/audio_effect.h> #include <media/AudioPolicy.h> #include <media/AudioVolumeGroup.h> +#include <media/AudioSession.h> namespace android { @@ -65,6 +66,8 @@ public: std::vector<effect_descriptor_t> effects, audio_patch_handle_t patchHandle, audio_source_t source) = 0; + // Notifies when a default effect set is attached to a session/stream + virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added) = 0; }; diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp index 637322f60a..8be961c9fe 100644 --- a/media/libmedia/MediaProfiles.cpp +++ b/media/libmedia/MediaProfiles.cpp @@ -240,7 +240,10 @@ MediaProfiles::createVideoCodec(const char **atts, MediaProfiles *profiles) const size_t nMappings = sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]); const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createVideoCodec failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::VideoCodec *videoCodec = new MediaProfiles::VideoCodec(static_cast<video_encoder>(codec), @@ -262,7 +265,10 @@ MediaProfiles::createAudioCodec(const char **atts, MediaProfiles *profiles) !strcmp("channels", atts[6])); const size_t nMappings = sizeof(sAudioEncoderNameMap)/sizeof(sAudioEncoderNameMap[0]); const int codec = findTagForName(sAudioEncoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createAudioCodec failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::AudioCodec *audioCodec = new MediaProfiles::AudioCodec(static_cast<audio_encoder>(codec), @@ -282,7 +288,10 @@ MediaProfiles::createAudioDecoderCap(const char **atts) const size_t nMappings = sizeof(sAudioDecoderNameMap)/sizeof(sAudioDecoderNameMap[0]); const int codec = findTagForName(sAudioDecoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createAudioDecoderCap failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::AudioDecoderCap *cap = new MediaProfiles::AudioDecoderCap(static_cast<audio_decoder>(codec)); @@ -298,7 +307,10 @@ MediaProfiles::createVideoDecoderCap(const char **atts) const size_t nMappings = sizeof(sVideoDecoderNameMap)/sizeof(sVideoDecoderNameMap[0]); const int codec = findTagForName(sVideoDecoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createVideoDecoderCap failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::VideoDecoderCap *cap = new MediaProfiles::VideoDecoderCap(static_cast<video_decoder>(codec)); @@ -322,7 +334,10 @@ MediaProfiles::createVideoEncoderCap(const char **atts) const size_t nMappings = sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]); const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createVideoEncoderCap failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::VideoEncoderCap *cap = new MediaProfiles::VideoEncoderCap(static_cast<video_encoder>(codec), @@ -346,7 +361,10 @@ MediaProfiles::createAudioEncoderCap(const char **atts) const size_t nMappings = sizeof(sAudioEncoderNameMap)/sizeof(sAudioEncoderNameMap[0]); const int codec = findTagForName(sAudioEncoderNameMap, nMappings, atts[1]); - CHECK(codec != -1); + if (codec == -1) { + ALOGE("MediaProfiles::createAudioEncoderCap failed to locate codec %s", atts[1]); + return nullptr; + } MediaProfiles::AudioEncoderCap *cap = new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]), @@ -386,11 +404,17 @@ MediaProfiles::createCamcorderProfile(int cameraId, const char **atts, Vector<in const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/ sizeof(sCamcorderQualityNameMap[0]); const int quality = findTagForName(sCamcorderQualityNameMap, nProfileMappings, atts[1]); - CHECK(quality != -1); + if (quality == -1) { + ALOGE("MediaProfiles::createCamcorderProfile failed to locate quality %s", atts[1]); + return nullptr; + } const size_t nFormatMappings = sizeof(sFileFormatMap)/sizeof(sFileFormatMap[0]); const int fileFormat = findTagForName(sFileFormatMap, nFormatMappings, atts[3]); - CHECK(fileFormat != -1); + if (fileFormat == -1) { + ALOGE("MediaProfiles::createCamcorderProfile failed to locate file format %s", atts[1]); + return nullptr; + } MediaProfiles::CamcorderProfile *profile = new MediaProfiles::CamcorderProfile; profile->mCameraId = cameraId; @@ -462,24 +486,39 @@ MediaProfiles::startElementHandler(void *userData, const char *name, const char createAudioCodec(atts, profiles); } else if (strcmp("VideoEncoderCap", name) == 0 && strcmp("true", atts[3]) == 0) { - profiles->mVideoEncoders.add(createVideoEncoderCap(atts)); + MediaProfiles::VideoEncoderCap* cap = createVideoEncoderCap(atts); + if (cap != nullptr) { + profiles->mVideoEncoders.add(cap); + } } else if (strcmp("AudioEncoderCap", name) == 0 && strcmp("true", atts[3]) == 0) { - profiles->mAudioEncoders.add(createAudioEncoderCap(atts)); + MediaProfiles::AudioEncoderCap* cap = createAudioEncoderCap(atts); + if (cap != nullptr) { + profiles->mAudioEncoders.add(cap); + } } else if (strcmp("VideoDecoderCap", name) == 0 && strcmp("true", atts[3]) == 0) { - profiles->mVideoDecoders.add(createVideoDecoderCap(atts)); + MediaProfiles::VideoDecoderCap* cap = createVideoDecoderCap(atts); + if (cap != nullptr) { + profiles->mVideoDecoders.add(cap); + } } else if (strcmp("AudioDecoderCap", name) == 0 && strcmp("true", atts[3]) == 0) { - profiles->mAudioDecoders.add(createAudioDecoderCap(atts)); + MediaProfiles::AudioDecoderCap* cap = createAudioDecoderCap(atts); + if (cap != nullptr) { + profiles->mAudioDecoders.add(cap); + } } else if (strcmp("EncoderOutputFileFormat", name) == 0) { profiles->mEncoderOutputFileFormats.add(createEncoderOutputFileFormat(atts)); } else if (strcmp("CamcorderProfiles", name) == 0) { profiles->mCurrentCameraId = getCameraId(atts); profiles->addStartTimeOffset(profiles->mCurrentCameraId, atts); } else if (strcmp("EncoderProfile", name) == 0) { - profiles->mCamcorderProfiles.add( - createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds)); + MediaProfiles::CamcorderProfile* profile = createCamcorderProfile( + profiles->mCurrentCameraId, atts, profiles->mCameraIds); + if (profile != nullptr) { + profiles->mCamcorderProfiles.add(profile); + } } else if (strcmp("ImageEncoding", name) == 0) { profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts); } diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index 7897959137..a7344de402 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -1663,7 +1663,7 @@ status_t StagefrightRecorder::setupCameraSource( Size videoSize; videoSize.width = mVideoWidth; videoSize.height = mVideoHeight; - if (mCaptureFpsEnable) { + if (mCaptureFpsEnable && mCaptureFps != mFrameRate) { if (!(mCaptureFps > 0.)) { ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps); return BAD_VALUE; @@ -1811,6 +1811,7 @@ status_t StagefrightRecorder::setupVideoEncoder( preferBFrames = false; tsLayers = 2; // use at least two layers as resulting video will likely be sped up } else if (mCaptureFps > maxPlaybackFps) { // slow-mo + format->setInt32("high-frame-rate", 1); maxPlaybackFps = mCaptureFps; // assume video will be played back at full capture speed preferBFrames = false; } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp index c1c4b55400..8a81ef118b 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp @@ -2350,9 +2350,6 @@ void NuPlayer::performDecoderFlush(FlushCommand audio, FlushCommand video) { void NuPlayer::performReset() { ALOGV("performReset"); - CHECK(mAudioDecoder == NULL); - CHECK(mVideoDecoder == NULL); - updatePlaybackTimer(true /* stopping */, "performReset"); updateRebufferingTimer(true /* stopping */, true /* exiting */); diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp index c30f048c2d..7e8fe45121 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp @@ -922,6 +922,11 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { firstEntry = false; int64_t mediaTimeUs; CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); + if (mediaTimeUs < 0) { + ALOGD("fillAudioBuffer: reset negative media time %.2f secs to zero", + mediaTimeUs / 1E6); + mediaTimeUs = 0; + } ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); } diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp index 3163316403..acf96fd9b8 100644 --- a/media/libstagefright/ACodec.cpp +++ b/media/libstagefright/ACodec.cpp @@ -279,6 +279,13 @@ protected: void postFillThisBuffer(BufferInfo *info); + void maybePostExtraOutputMetadataBufferRequest() { + if (!mPendingExtraOutputMetadataBufferRequest) { + (new AMessage(kWhatSubmitExtraOutputMetadataBuffer, mCodec))->post(); + mPendingExtraOutputMetadataBufferRequest = true; + } + } + private: // Handles an OMX message. Returns true iff message was handled. bool onOMXMessage(const sp<AMessage> &msg); @@ -302,6 +309,8 @@ private: void getMoreInputDataIfPossible(); + bool mPendingExtraOutputMetadataBufferRequest; + DISALLOW_EVIL_CONSTRUCTORS(BaseState); }; @@ -555,6 +564,7 @@ ACodec::ACodec() mShutdownInProgress(false), mExplicitShutdown(false), mIsLegacyVP9Decoder(false), + mIsLowLatency(false), mEncoderDelay(0), mEncoderPadding(0), mRotationDegrees(0), @@ -2237,6 +2247,12 @@ status_t ACodec::configureCodec( } err = setupG711Codec(encoder, sampleRate, numChannels); } + } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_OPUS)) { + int32_t numChannels = 1, sampleRate = 48000; + if (msg->findInt32("channel-count", &numChannels) && + msg->findInt32("sample-rate", &sampleRate)) { + err = setupOpusCodec(encoder, sampleRate, numChannels); + } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) { // numChannels needs to be set to properly communicate PCM values. int32_t numChannels = 2, sampleRate = 44100, compressionLevel = -1; @@ -2409,6 +2425,7 @@ status_t ACodec::setLowLatency(int32_t lowLatency) { if (err != OK) { ALOGE("decoder can not set low-latency to %d (err %d)", lowLatency, err); } + mIsLowLatency = (lowLatency && err == OK); return err; } @@ -3110,6 +3127,26 @@ status_t ACodec::setupG711Codec(bool encoder, int32_t sampleRate, int32_t numCha kPortIndexInput, sampleRate, numChannels); } +status_t ACodec::setupOpusCodec(bool encoder, int32_t sampleRate, int32_t numChannels) { + if (encoder) { + return INVALID_OPERATION; + } + OMX_AUDIO_PARAM_ANDROID_OPUSTYPE def; + InitOMXParams(&def); + def.nPortIndex = kPortIndexInput; + status_t err = mOMXNode->getParameter( + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus, &def, sizeof(def)); + if (err != OK) { + ALOGE("setupOpusCodec(): Error %d getting OMX_IndexParamAudioAndroidOpus parameter", err); + return err; + } + def.nSampleRate = sampleRate; + def.nChannels = numChannels; + err = mOMXNode->setParameter( + (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus, &def, sizeof(def)); + return err; +} + status_t ACodec::setupFlacCodec( bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel, AudioEncoding encoding) { @@ -5750,7 +5787,8 @@ status_t ACodec::requestIDRFrame() { ACodec::BaseState::BaseState(ACodec *codec, const sp<AState> &parentState) : AState(parentState), - mCodec(codec) { + mCodec(codec), + mPendingExtraOutputMetadataBufferRequest(false) { } ACodec::BaseState::PortMode ACodec::BaseState::getPortMode( @@ -5851,6 +5889,21 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatSubmitExtraOutputMetadataBuffer: { + mPendingExtraOutputMetadataBufferRequest = false; + if (getPortMode(kPortIndexOutput) == RESUBMIT_BUFFERS && mCodec->mIsLowLatency) { + // Decoders often need more than one output buffer to be + // submitted before processing a single input buffer. + // For low latency codecs, we don't want to wait for more input + // to be queued to get those output buffers submitted. + if (mCodec->submitOutputMetadataBuffer() == OK + && mCodec->mMetadataBuffersToSubmit > 0) { + maybePostExtraOutputMetadataBufferRequest(); + } + } + break; + } + default: return false; } @@ -6207,7 +6260,12 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { (outputMode == FREE_BUFFERS ? "FREE" : outputMode == KEEP_BUFFERS ? "KEEP" : "RESUBMIT")); if (outputMode == RESUBMIT_BUFFERS) { - mCodec->submitOutputMetadataBuffer(); + status_t err = mCodec->submitOutputMetadataBuffer(); + if (mCodec->mIsLowLatency + && err == OK + && mCodec->mMetadataBuffersToSubmit > 0) { + maybePostExtraOutputMetadataBufferRequest(); + } } } info->checkReadFence("onInputBufferFilled"); @@ -7353,6 +7411,9 @@ void ACodec::ExecutingState::submitOutputMetaBuffers() { break; } } + if (mCodec->mIsLowLatency) { + maybePostExtraOutputMetadataBufferRequest(); + } // *** NOTE: THE FOLLOWING WORKAROUND WILL BE REMOVED *** mCodec->signalSubmitOutputMetadataBufferIfEOS_workaround(); @@ -7996,6 +8057,7 @@ status_t ACodec::setVendorParameters(const sp<AMessage> ¶ms) { // don't bother component if we don't have vendor extensions as they may not have implemented // the android vendor extension support, which will lead to unnecessary OMX failure logs. if (vendorKeys.empty()) { + mVendorExtensionsStatus = kExtensionsNone; return OK; } @@ -8287,13 +8349,34 @@ bool ACodec::OutputPortSettingsChangedState::onMessageReceived( FALLTHROUGH_INTENDED; } case kWhatResume: + { + ALOGV("[%s] Deferring resume", mCodec->mComponentName.c_str()); + + mCodec->deferMessage(msg); + handled = true; + break; + } + case kWhatSetParameters: { - if (msg->what() == kWhatResume) { - ALOGV("[%s] Deferring resume", mCodec->mComponentName.c_str()); + sp<AMessage> params; + CHECK(msg->findMessage("params", ¶ms)); + + sp<ABuffer> hdr10PlusInfo; + if (params->findBuffer("hdr10-plus-info", &hdr10PlusInfo)) { + if (hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) { + (void)mCodec->setHdr10PlusInfo(hdr10PlusInfo); + } + params->removeEntryAt(params->findEntryByName("hdr10-plus-info")); + + if (params->countEntries() == 0) { + msg->removeEntryAt(msg->findEntryByName("params")); + } } - mCodec->deferMessage(msg); + if (msg->countEntries() > 0) { + mCodec->deferMessage(msg); + } handled = true; break; } @@ -8408,6 +8491,15 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent( return false; } + case OMX_EventConfigUpdate: + { + CHECK_EQ(data1, (OMX_U32)kPortIndexOutput); + + mCodec->onConfigUpdate((OMX_INDEXTYPE)data2); + + return true; + } + default: return BaseState::onOMXEvent(event, data1, data2); } diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index 9b3f4200d5..be3fde8a0c 100644 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -162,6 +162,10 @@ static int32_t getColorFormat(const char* colorFormat) { return OMX_COLOR_FormatAndroidOpaque; } + if (!strcmp(colorFormat, "YVU420SemiPlanar")) { + return OMX_QCOM_COLOR_FormatYVU420SemiPlanar; + } + ALOGE("Uknown color format (%s), please add it to " "CameraSource::getColorFormat", colorFormat); @@ -338,6 +342,12 @@ status_t CameraSource::isCameraColorFormatSupported( return OK; } +static int32_t getHighSpeedFrameRate(const CameraParameters& params) { + const char* hsr = params.get("video-hsr"); + int32_t rate = (hsr != NULL && strncmp(hsr, "off", 3)) ? strtol(hsr, NULL, 10) : 0; + return std::min(rate, 240); +} + /* * Configure the camera to use the requested video size * (width and height) and/or frame rate. If both width and @@ -385,11 +395,15 @@ status_t CameraSource::configureCamera( } if (frameRate != -1) { - CHECK(frameRate > 0 && frameRate <= 120); + CHECK(frameRate > 0 && frameRate <= 240); const char* supportedFrameRates = params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES); CHECK(supportedFrameRates != NULL); ALOGV("Supported frame rates: %s", supportedFrameRates); + if (getHighSpeedFrameRate(*params)) { + ALOGI("Use default 30fps for HighSpeed %dfps", frameRate); + frameRate = 30; + } char buf[4]; snprintf(buf, 4, "%d", frameRate); if (strstr(supportedFrameRates, buf) == NULL) { @@ -491,6 +505,8 @@ status_t CameraSource::checkFrameRate( ALOGE("Failed to retrieve preview frame rate (%d)", frameRateActual); return UNKNOWN_ERROR; } + int32_t highSpeedRate = getHighSpeedFrameRate(params); + frameRateActual = highSpeedRate ? highSpeedRate : frameRateActual; // Check the actual video frame rate against the target/requested // video frame rate. diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp index e0a6eb3a57..a00a1786ae 100644 --- a/media/libstagefright/CameraSourceTimeLapse.cpp +++ b/media/libstagefright/CameraSourceTimeLapse.cpp @@ -298,7 +298,8 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) { // The first 2 output frames from the encoder are: decoder specific info and // the compressed video frame data for the first input video frame. if (mNumFramesEncoded >= 1 && *timestampUs < - (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenFrameCaptureUs)) { + (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenFrameCaptureUs) && + (mTimeBetweenFrameCaptureUs > mTimeBetweenTimeLapseVideoFramesUs + 1)) { // Skip all frames from last encoded frame until // sufficient time (mTimeBetweenFrameCaptureUs) has passed. // Tell the camera to release its recording frame and return. @@ -313,6 +314,12 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) { mLastTimeLapseFrameRealTimestampUs = *timestampUs; *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs; + // Update start-time once the captured-time reaches the expected start-time. + // Not doing so will result in CameraSource always dropping frames since + // updated-timestamp will never intersect start-timestamp + if ((mNumFramesReceived == 0 && mLastTimeLapseFrameRealTimestampUs >= mStartTimeUs)) { + mStartTimeUs = *timestampUs; + } return false; } return false; diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp index c284ef7d06..1b3816d3b5 100644 --- a/media/libstagefright/SurfaceUtils.cpp +++ b/media/libstagefright/SurfaceUtils.cpp @@ -111,8 +111,9 @@ status_t setNativeWindowSizeFormatAndUsage( } } - int finalUsage = usage | consumerUsage; - ALOGV("gralloc usage: %#x(producer) + %#x(consumer) = %#x", usage, consumerUsage, finalUsage); + uint64_t finalUsage = (usage | consumerUsage) & 0xffffffffLL; + ALOGV("gralloc usage: %#x(producer) + %#x(consumer) = %#" PRIx64, + usage, consumerUsage, finalUsage); err = native_window_set_usage(nativeWindow, finalUsage); if (err != NO_ERROR) { ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err); @@ -126,7 +127,7 @@ status_t setNativeWindowSizeFormatAndUsage( return err; } - ALOGD("set up nativeWindow %p for %dx%d, color %#x, rotation %d, usage %#x", + ALOGD("set up nativeWindow %p for %dx%d, color %#x, rotation %d, usage %#" PRIx64, nativeWindow, width, height, format, rotation, finalUsage); return NO_ERROR; } diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp index 8698d33f9b..9e2a339391 100644 --- a/media/libstagefright/bqhelper/Android.bp +++ b/media/libstagefright/bqhelper/Android.bp @@ -1,5 +1,6 @@ cc_defaults { name: "libstagefright_bufferqueue-defaults", + defaults: ["stagefright_qcom_legacy_defaults"], double_loadable: true, srcs: [ diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp index cff14ac185..d031e91f5a 100644 --- a/media/libstagefright/bqhelper/GraphicBufferSource.cpp +++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp @@ -891,11 +891,13 @@ status_t GraphicBufferSource::submitBuffer_l(const VideoBuffer &item) { return UNKNOWN_ERROR; } +#ifndef QCOM_BSP_LEGACY if ((android_dataspace)item.mDataspace != mLastDataspace) { onDataspaceChanged_l( item.mDataspace, (android_pixel_format)item.mBuffer->getGraphicBuffer()->format); } +#endif std::shared_ptr<AcquiredBuffer> buffer = item.mBuffer; // use a GraphicBuffer for now as component is using GraphicBuffers to hold references diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp index 4f61aa8be2..5bb1879da1 100644 --- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp +++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp @@ -58,6 +58,8 @@ SoftOpus::SoftOpus( mInputBufferCount(0), mDecoder(NULL), mHeader(NULL), + mNumChannels(1), + mSamplingRate(kRate), mCodecDelay(0), mSeekPreRoll(0), mAnchorTimeUs(0), @@ -169,11 +171,11 @@ OMX_ERRORTYPE SoftOpus::internalGetParameter( } opusParams->nAudioBandWidth = 0; - opusParams->nSampleRate = kRate; + opusParams->nSampleRate = mSamplingRate; opusParams->nBitRate = 0; if (!isConfigured()) { - opusParams->nChannels = 1; + opusParams->nChannels = mNumChannels; } else { opusParams->nChannels = mHeader->channels; } @@ -274,7 +276,8 @@ OMX_ERRORTYPE SoftOpus::internalSetParameter( if (opusParams->nPortIndex != 0) { return OMX_ErrorUndefined; } - + mNumChannels = opusParams->nChannels; + mSamplingRate = opusParams->nSampleRate; return OMX_ErrorNone; } @@ -496,6 +499,8 @@ void SoftOpus::onQueueFilled(OMX_U32 /* portIndex */) { *(reinterpret_cast<int64_t*>(inHeader->pBuffer + inHeader->nOffset)), kRate); + mSamplingRate = kRate; + mNumChannels = mHeader->channels; notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; } diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.h b/media/libstagefright/codecs/opus/dec/SoftOpus.h index 91cafa14c7..00058c8212 100644 --- a/media/libstagefright/codecs/opus/dec/SoftOpus.h +++ b/media/libstagefright/codecs/opus/dec/SoftOpus.h @@ -70,6 +70,8 @@ private: OpusMSDecoder *mDecoder; OpusHeader *mHeader; + int32_t mNumChannels; + int32_t mSamplingRate; int64_t mCodecDelay; int64_t mSeekPreRoll; int64_t mSamplesToDiscard; diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h index 83e92b9f43..8ef92783ec 100644 --- a/media/libstagefright/include/media/stagefright/ACodec.h +++ b/media/libstagefright/include/media/stagefright/ACodec.h @@ -147,6 +147,7 @@ private: kWhatReleaseCodecInstance = 'relC', kWhatForceStateTransition = 'fstt', kWhatCheckIfStuck = 'Cstk', + kWhatSubmitExtraOutputMetadataBuffer = 'sbxo', }; enum { @@ -272,6 +273,7 @@ private: bool mShutdownInProgress; bool mExplicitShutdown; bool mIsLegacyVP9Decoder; + bool mIsLowLatency; // If "mKeepComponentAllocated" we only transition back to Loaded state // and do not release the component instance. @@ -499,6 +501,7 @@ private: status_t setupAMRCodec(bool encoder, bool isWAMR, int32_t bitRate); status_t setupG711Codec(bool encoder, int32_t sampleRate, int32_t numChannels); + status_t setupOpusCodec(bool encoder, int32_t sampleRate, int32_t numChannels); status_t setupFlacCodec( bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel, AudioEncoding encoding); diff --git a/media/libstagefright/omx/1.0/OmxStore.cpp b/media/libstagefright/omx/1.0/OmxStore.cpp index 67f478ead0..b5c116656f 100644 --- a/media/libstagefright/omx/1.0/OmxStore.cpp +++ b/media/libstagefright/omx/1.0/OmxStore.cpp @@ -54,6 +54,24 @@ OmxStore::OmxStore( }); } + if (!nodes.empty()) { + auto anyNode = nodes.cbegin(); + std::string::const_iterator first = anyNode->cbegin(); + std::string::const_iterator last = anyNode->cend(); + for (const std::string &name : nodes) { + std::string::const_iterator it1 = first; + for (std::string::const_iterator it2 = name.cbegin(); + it1 != last && it2 != name.cend() && tolower(*it1) == tolower(*it2); + ++it1, ++it2) { + } + last = it1; + } + mPrefix = std::string(first, last); + LOG(INFO) << "omx common prefix: '" << mPrefix.c_str() << "'"; + } else { + LOG(INFO) << "omx common prefix: no nodes"; + } + MediaCodecsXmlParser parser; parser.parseXmlFilesInSearchDirs(xmlNames, searchDirs); if (profilingResultsXmlPath != nullptr) { @@ -112,8 +130,6 @@ OmxStore::OmxStore( mRoleList[i] = std::move(role); ++i; } - - mPrefix = parser.getCommonPrefix(); } OmxStore::~OmxStore() { diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp index 78b4f192d0..a049fc57ec 100644 --- a/media/libstagefright/omx/Android.bp +++ b/media/libstagefright/omx/Android.bp @@ -1,5 +1,6 @@ cc_library_shared { name: "libstagefright_omx", + defaults: ["stagefright_qcom_legacy_defaults"], vendor_available: true, vndk: { enabled: true, diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp index ac42373bf0..d996e1a085 100644 --- a/media/libstagefright/omx/OMXNodeInstance.cpp +++ b/media/libstagefright/omx/OMXNodeInstance.cpp @@ -453,7 +453,11 @@ OMXNodeInstance::OMXNodeInstance( mGraphicBufferEnabled[0] = false; mGraphicBufferEnabled[1] = false; mIsSecure = AString(name).endsWith(".secure"); +#ifdef QCOM_BSP_LEGACY + mLegacyAdaptiveExperiment = true; +#else mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled("legacy-adaptive"); +#endif } OMXNodeInstance::~OMXNodeInstance() { @@ -580,6 +584,10 @@ status_t OMXNodeInstance::freeNode() { break; } + if (mActiveBuffers.size() > 0) { + freeActiveBuffers(); + } + Mutex::Autolock _l(mLock); status_t err = mOwner->freeNode(this); diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp index afca7c4f71..1759ff66f6 100644 --- a/media/mediaserver/Android.bp +++ b/media/mediaserver/Android.bp @@ -11,6 +11,9 @@ cc_library_static { cc_binary { name: "mediaserver", + defaults: [ + "camera_in_mediaserver_defaults", + ], srcs: ["main_mediaserver.cpp"], diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp index 316732bdc3..162153394d 100644 --- a/media/mediaserver/main_mediaserver.cpp +++ b/media/mediaserver/main_mediaserver.cpp @@ -27,6 +27,10 @@ #include "RegisterExtensions.h" // from LOCAL_C_INCLUDES +#ifdef NO_CAMERA_SERVER +#include "CameraService.h" +#include <hidl/HidlTransportSupport.h> +#endif #include "MediaPlayerService.h" #include "ResourceManagerService.h" @@ -36,12 +40,20 @@ int main(int argc __unused, char **argv __unused) { signal(SIGPIPE, SIG_IGN); +#ifdef NO_CAMERA_SERVER + // Set 3 threads for HIDL calls + hardware::configureRpcThreadpool(3, /*willjoin*/ false); +#endif + sp<ProcessState> proc(ProcessState::self()); sp<IServiceManager> sm(defaultServiceManager()); ALOGI("ServiceManager: %p", sm.get()); AIcu_initializeIcuOrDie(); MediaPlayerService::instantiate(); ResourceManagerService::instantiate(); +#ifdef NO_CAMERA_SERVER + CameraService::instantiate(); +#endif registerExtensions(); ::android::hardware::configureRpcThreadpool(16, false); ProcessState::self()->startThreadPool(); diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp index 3dfeb83a4c..ff1f9ac136 100644 --- a/services/audioflinger/Effects.cpp +++ b/services/audioflinger/Effects.cpp @@ -2017,6 +2017,10 @@ void AudioFlinger::EffectChain::clearInputBuffer() { Mutex::Autolock _l(mLock); clearInputBuffer_l(); + + for (size_t i = 0; i < mEffects.size(); i++) { + mEffects[i]->reset_l(); + } } // Must be called with EffectChain::mLock locked diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h index 8d0e5dbed7..a350f6cc22 100644 --- a/services/audiopolicy/AudioPolicyInterface.h +++ b/services/audiopolicy/AudioPolicyInterface.h @@ -421,6 +421,8 @@ public: // sessions to be preempted on modules that do not support sound trigger // recognition concurrently with audio capture. virtual void setSoundTriggerCaptureState(bool active) = 0; + + virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& streamInfo, bool added) = 0; }; extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface); diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h index 0537365ae2..4047d76c0d 100644 --- a/services/audiopolicy/common/include/policy.h +++ b/services/audiopolicy/common/include/policy.h @@ -35,7 +35,7 @@ static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000; // For mixed output and inputs, the policy will use max mixer sampling rates. // Do not limit sampling rate otherwise -#define SAMPLE_RATE_HZ_MAX 192000 +#define SAMPLE_RATE_HZ_MAX 384000 // Used when a client opens a capture stream, without specifying a desired sample rate. #define SAMPLE_RATE_HZ_DEFAULT 48000 diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h index 39d11401be..daced38b93 100644 --- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h +++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h @@ -305,6 +305,7 @@ public: DeviceVector mDevices; /**< current devices this output is routed to */ wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy + audio_io_handle_t mIoHandle; // output handle protected: const sp<PolicyAudioPort> mPolicyAudioPort; @@ -404,7 +405,6 @@ public: DeviceVector filterSupportedDevices(const DeviceVector &devices) const; const sp<IOProfile> mProfile; // I/O profile this output derives from - audio_io_handle_t mIoHandle; // output handle uint32_t mLatency; // audio_output_flags_t mFlags; // sp<SwAudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp index d6d472b82e..e61af6fc60 100644 --- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp +++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp @@ -40,7 +40,7 @@ static const DeviceTypeSet& getAllOutRemoteDevices() { AudioOutputDescriptor::AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort, AudioPolicyClientInterface *clientInterface) - : mPolicyAudioPort(policyAudioPort), mClientInterface(clientInterface) + : mIoHandle(AUDIO_IO_HANDLE_NONE), mPolicyAudioPort(policyAudioPort), mClientInterface(clientInterface) { if (mPolicyAudioPort.get() != nullptr) { mPolicyAudioPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat); @@ -278,7 +278,7 @@ void AudioOutputDescriptor::log(const char* indent) SwAudioOutputDescriptor::SwAudioOutputDescriptor(const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface) : AudioOutputDescriptor(profile, clientInterface), - mProfile(profile), mIoHandle(AUDIO_IO_HANDLE_NONE), mLatency(0), + mProfile(profile), mLatency(0), mFlags((audio_output_flags_t)0), mOutput1(0), mOutput2(0), mDirectOpenCount(0), mDirectClientSession(AUDIO_SESSION_NONE) diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h index 7f339dcfe4..136013a6d7 100755 --- a/services/audiopolicy/engine/common/include/EngineBase.h +++ b/services/audiopolicy/engine/common/include/EngineBase.h @@ -39,6 +39,11 @@ public: audio_mode_t getPhoneState() const override { return mPhoneState; } + void setDpConnAndAllowedForVoice(bool connAndAllowed) override + { + mDpConnAndAllowedForVoice = connAndAllowed; + } + status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) override { mForceUse[usage] = config; @@ -118,6 +123,11 @@ public: return is_state_in_call(getPhoneState()); } + inline bool getDpConnAndAllowedForVoice() const + { + return mDpConnAndAllowedForVoice; + } + VolumeSource toVolumeSource(audio_stream_type_t stream) const { return static_cast<VolumeSource>(getVolumeGroupForStreamType(stream)); @@ -135,6 +145,8 @@ private: VolumeGroupMap mVolumeGroups; LastRemovableMediaDevices mLastRemovableMediaDevices; audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */ + /* if display-port is connected and can be used for voip/voice */ + bool mDpConnAndAllowedForVoice; /** current forced use configuration. */ audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT] = {}; diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h index dfb20b5274..650c15f618 100644 --- a/services/audiopolicy/engine/interface/EngineInterface.h +++ b/services/audiopolicy/engine/interface/EngineInterface.h @@ -73,6 +73,14 @@ public: virtual audio_mode_t getPhoneState() const = 0; /** + * Set whether display-port is connected and is allowed to be used + * for voice usecases + * + * @param[in] connAndAllowed: if display-port is connected and can be used + */ + virtual void setDpConnAndAllowedForVoice(bool connAndAllowed) = 0; + + /** * Set Force Use config for a given usage. * * @param[in] usage for which a configuration shall be forced. diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp index b14d2bbb0b..37f1a9815f 100755 --- a/services/audiopolicy/enginedefault/src/Engine.cpp +++ b/services/audiopolicy/enginedefault/src/Engine.cpp @@ -250,6 +250,10 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES}); if (!devices.isEmpty()) break; } + if (getDpConnAndAllowedForVoice() && isInCall()) { + devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_AUX_DIGITAL); + if (!devices.isEmpty()) break; + } devices = availableOutputDevices.getFirstDevicesFromTypes({ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, AUDIO_DEVICE_OUT_WIRED_HEADSET, AUDIO_DEVICE_OUT_LINE, AUDIO_DEVICE_OUT_USB_HEADSET, @@ -340,6 +344,16 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, } } } + // if display-port is connected and being used in voice usecase, + // play ringtone over speaker and display-port + if ((strategy == STRATEGY_SONIFICATION) && getDpConnAndAllowedForVoice()) { + DeviceVector devices2 = availableOutputDevices.getDevicesFromType( + AUDIO_DEVICE_OUT_AUX_DIGITAL); + if (!devices2.isEmpty()) { + devices.add(devices2); + break; + } + } // The second device used for sonification is the same as the device used by media strategy FALLTHROUGH_INTENDED; @@ -371,6 +385,13 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, // FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now case STRATEGY_REROUTING: case STRATEGY_MEDIA: { + if (isInCall() && devices.isEmpty()) { + // when in call, get the device for Phone strategy + devices = getDevicesForStrategyInt( + STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs); + break; + } + DeviceVector devices2; if (strategy != STRATEGY_SONIFICATION) { // no sonification on remote submix (e.g. WFD) @@ -407,7 +428,8 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, getLastRemovableMediaDevices(GROUP_WIRED)); } } - if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION)) { + if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION) && + (devices.isEmpty())) { // no sonification on aux digital (e.g. HDMI) devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_AUX_DIGITAL); } @@ -416,6 +438,12 @@ DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy, devices2 = availableOutputDevices.getDevicesFromType( AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET); } + if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION) && + (devices.isEmpty())) { + // no sonification on WFD sink + devices2 = availableOutputDevices.getDevicesFromType( + AUDIO_DEVICE_OUT_PROXY); + } if (devices2.isEmpty()) { devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER); } diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h index b588f898d4..b2a670e6a4 100644 --- a/services/audiopolicy/managerdefault/AudioPolicyManager.h +++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h @@ -506,7 +506,7 @@ protected: // when a device is disconnected, checks if an output is not used any more and // returns its handle if any. // transfers the audio tracks and effects from one output thread to another accordingly. - status_t checkOutputsForDevice(const sp<DeviceDescriptor>& device, + virtual status_t checkOutputsForDevice(const sp<DeviceDescriptor>& device, audio_policy_dev_state_t state, SortedVector<audio_io_handle_t>& outputs); @@ -539,7 +539,7 @@ protected: * Must be called before updateDevicesAndOutputs() * @param attr to be considered */ - void checkOutputForAttributes(const audio_attributes_t &attr); + virtual void checkOutputForAttributes(const audio_attributes_t &attr); bool followsSameRouting(const audio_attributes_t &lAttr, const audio_attributes_t &rAttr) const; @@ -685,10 +685,10 @@ protected: uint32_t delayMs); bool isDeviceOfModule(const sp<DeviceDescriptor>& devDesc, const char *moduleId) const; - status_t startSource(const sp<SwAudioOutputDescriptor>& outputDesc, + virtual status_t startSource(const sp<SwAudioOutputDescriptor>& outputDesc, const sp<TrackClientDescriptor>& client, uint32_t *delayMs); - status_t stopSource(const sp<SwAudioOutputDescriptor>& outputDesc, + virtual status_t stopSource(const sp<SwAudioOutputDescriptor>& outputDesc, const sp<TrackClientDescriptor>& client); void clearAudioPatches(uid_t uid); @@ -787,7 +787,7 @@ protected: std::unordered_set<audio_format_t> mManualSurroundFormats; std::unordered_map<uid_t, audio_flags_mask_t> mAllowedCapturePolicies; -private: +protected: void onNewAudioModulesAvailableInt(DeviceVector *newDevices); // Add or remove AC3 DTS encodings based on user preferences. @@ -840,7 +840,7 @@ private: std::vector<sp<AudioPolicyMix>> *secondaryMixes, output_type_t *outputType); // internal method to return the output handle for the given device and format - audio_io_handle_t getOutputForDevices( + virtual audio_io_handle_t getOutputForDevices( const DeviceVector &devices, audio_session_t session, audio_stream_type_t stream, @@ -885,7 +885,7 @@ private: bool isValidAttributes(const audio_attributes_t *paa); // Called by setDeviceConnectionState(). - status_t setDeviceConnectionStateInt(audio_devices_t deviceType, + virtual status_t setDeviceConnectionStateInt(audio_devices_t deviceType, audio_policy_dev_state_t state, const char *device_address, const char *device_name, diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp index 9fa7a5348b..177c3cbe78 100644 --- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp +++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp @@ -236,6 +236,13 @@ void AudioPolicyService::AudioPolicyClient::onAudioVolumeGroupChanged(volume_gro mAudioPolicyService->onAudioVolumeGroupChanged(group, flags); } +void AudioPolicyService::AudioPolicyClient::onOutputSessionEffectsUpdate( + sp<AudioSessionInfo>& info, bool added) +{ + mAudioPolicyService->onOutputSessionEffectsUpdate(info, added); +} + + audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use) { return AudioSystem::newAudioUniqueId(use); diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp index b738633924..a5e5bb84e3 100644 --- a/services/audiopolicy/service/AudioPolicyEffects.cpp +++ b/services/audiopolicy/service/AudioPolicyEffects.cpp @@ -31,6 +31,7 @@ #include <utils/SortedVector.h> #include <cutils/config_utils.h> #include <binder/IPCThreadState.h> +#include "AudioPolicyService.h" #include "AudioPolicyEffects.h" namespace android { @@ -39,7 +40,8 @@ namespace android { // AudioPolicyEffects Implementation // ---------------------------------------------------------------------------- -AudioPolicyEffects::AudioPolicyEffects() +AudioPolicyEffects::AudioPolicyEffects(AudioPolicyService *audioPolicyService) : + mAudioPolicyService(audioPolicyService) { status_t loadResult = loadAudioEffectXmlConfig(); if (loadResult == NO_ERROR) { @@ -238,6 +240,8 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output, { status_t status = NO_ERROR; + ALOGV("addOutputSessionEffects %d", audioSession); + Mutex::Autolock _l(mLock); // create audio processors according to stream // FIXME: should we have specific post processing settings for internal streams? @@ -245,6 +249,22 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output, if (stream >= AUDIO_STREAM_PUBLIC_CNT) { stream = AUDIO_STREAM_MUSIC; } + + // send the streaminfo notification only once + ssize_t sidx = mOutputAudioSessionInfo.indexOfKey(audioSession); + if (sidx >= 0) { + // AudioSessionInfo is existing and we just need to increase ref count + sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(sidx); + info->mRefCount++; + + if (info->mRefCount == 1) { + mAudioPolicyService->onOutputSessionEffectsUpdate(info, true); + } + ALOGV("addOutputSessionEffects(): session info %d refCount=%d", audioSession, info->mRefCount); + } else { + ALOGV("addOutputSessionEffects(): no output stream info found for stream"); + } + ssize_t index = mOutputStreams.indexOfKey(stream); if (index < 0) { ALOGV("addOutputSessionEffects(): no output processing needed for this stream"); @@ -290,6 +310,86 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output, return status; } +status_t AudioPolicyEffects::releaseOutputAudioSessionInfo(audio_io_handle_t /* output */, + audio_stream_type_t stream, + audio_session_t session) +{ + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + + Mutex::Autolock _l(mLock); + + ssize_t idx = mOutputAudioSessionInfo.indexOfKey(session); + if (idx >= 0) { + sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(idx); + if (info->mRefCount == 0) { + mOutputAudioSessionInfo.removeItemsAt(idx); + } + ALOGV("releaseOutputAudioSessionInfo() sessionId=%d refcount=%d", + session, info->mRefCount); + } else { + ALOGV("releaseOutputAudioSessionInfo() no session info found"); + } + return NO_ERROR; +} + +status_t AudioPolicyEffects::updateOutputAudioSessionInfo(audio_io_handle_t /* output */, + audio_stream_type_t stream, + audio_session_t session, + audio_output_flags_t flags, + const audio_config_t *config, uid_t uid) +{ + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + + Mutex::Autolock _l(mLock); + + // TODO: Handle other stream types based on client registration + if (stream != AUDIO_STREAM_MUSIC) { + return NO_ERROR; + } + + // update AudioSessionInfo. This is used in the stream open/close path + // to notify userspace applications about session creation and + // teardown, allowing the app to make decisions about effects for + // a particular stream. This is independent of the current + // output_session_processing feature which forcibly attaches a + // static list of effects to a stream. + ssize_t idx = mOutputAudioSessionInfo.indexOfKey(session); + sp<AudioSessionInfo> info; + if (idx < 0) { + info = new AudioSessionInfo(session, stream, flags, config->channel_mask, uid); + mOutputAudioSessionInfo.add(session, info); + } else { + // the streaminfo may actually change + info = mOutputAudioSessionInfo.valueAt(idx); + info->mFlags = flags; + info->mChannelMask = config->channel_mask; + } + + ALOGV("updateOutputAudioSessionInfo() sessionId=%d, flags=0x%x, channel_mask=0x%x uid=%d refCount=%d", + info->mSessionId, info->mFlags, info->mChannelMask, info->mUid, info->mRefCount); + + return NO_ERROR; +} + +status_t AudioPolicyEffects::listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions) +{ + ALOGV("listAudioSessions() streams %d", streams); + + for (unsigned int i = 0; i < mOutputAudioSessionInfo.size(); i++) { + sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(i); + if (streams == -1 || info->mStream == streams) { + sessions.push_back(info); + } + } + + return NO_ERROR; +} + status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t output, audio_stream_type_t stream, audio_session_t audioSession) @@ -299,7 +399,19 @@ status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t outpu (void) stream; // argument not used for now Mutex::Autolock _l(mLock); - ssize_t index = mOutputSessions.indexOfKey(audioSession); + ssize_t index = mOutputAudioSessionInfo.indexOfKey(audioSession); + if (index >= 0) { + sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(index); + info->mRefCount--; + if (info->mRefCount == 0) { + mAudioPolicyService->onOutputSessionEffectsUpdate(info, false); + } + ALOGV("releaseOutputSessionEffects(): session=%d refCount=%d", info->mSessionId, info->mRefCount); + } else { + ALOGV("releaseOutputSessionEffects: no stream info was attached to this stream"); + } + + index = mOutputSessions.indexOfKey(audioSession); if (index < 0) { ALOGV("releaseOutputSessionEffects: no output processing was attached to this stream"); return NO_ERROR; diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h index 81c728df8d..137e37f768 100644 --- a/services/audiopolicy/service/AudioPolicyEffects.h +++ b/services/audiopolicy/service/AudioPolicyEffects.h @@ -31,6 +31,8 @@ namespace android { +class AudioPolicyService; + // ---------------------------------------------------------------------------- // AudioPolicyEffects class @@ -44,7 +46,7 @@ public: // The constructor will parse audio_effects.conf // First it will look whether vendor specific file exists, // otherwise it will parse the system default file. - AudioPolicyEffects(); + AudioPolicyEffects(AudioPolicyService *audioPolicyService); virtual ~AudioPolicyEffects(); // NOTE: methods on AudioPolicyEffects should never be called with the AudioPolicyService @@ -106,6 +108,19 @@ public: // Remove the default stream effect from wherever it's attached. status_t removeStreamDefaultEffect(audio_unique_id_t id); + status_t updateOutputAudioSessionInfo(audio_io_handle_t output, + audio_stream_type_t stream, + audio_session_t audioSession, + audio_output_flags_t flags, + const audio_config_t *config, uid_t uid); + + status_t releaseOutputAudioSessionInfo(audio_io_handle_t output, + audio_stream_type_t stream, + audio_session_t audioSession); + + status_t listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions); + private: void initDefaultDeviceEffects(); @@ -276,6 +291,11 @@ private: * We must store the reference of the furture garantee real asynchronous operation. */ std::future<void> mDefaultDeviceEffectFuture; + + // Stream info for session events + KeyedVector< audio_session_t, sp<AudioSessionInfo> > mOutputAudioSessionInfo; + + AudioPolicyService *mAudioPolicyService; }; } // namespace android diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp index df27f6e519..0b15713c66 100644 --- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp +++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp @@ -235,61 +235,72 @@ status_t AudioPolicyService::getOutputForAttr(audio_attributes_t *attr, } ALOGV("%s()", __func__); - Mutex::Autolock _l(mLock); + sp<AudioPolicyEffects> audioPolicyEffects; + { + Mutex::Autolock _l(mLock); - const uid_t callingUid = IPCThreadState::self()->getCallingUid(); - if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) { - ALOGW_IF(uid != (uid_t)-1 && uid != callingUid, - "%s uid %d tried to pass itself off as %d", __func__, callingUid, uid); - uid = callingUid; - } - if (!mPackageManager.allowPlaybackCapture(uid)) { - attr->flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION; - } - if (((attr->flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0) - && !bypassInterruptionPolicyAllowed(pid, uid)) { - attr->flags &= ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE); - } - AutoCallerClear acc; - AudioPolicyInterface::output_type_t outputType; - result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, - config, - &flags, selectedDeviceId, portId, - secondaryOutputs, - &outputType); - - // FIXME: Introduce a way to check for the the telephony device before opening the output - if (result == NO_ERROR) { - // enforce permission (if any) required for each type of input - switch (outputType) { - case AudioPolicyInterface::API_OUTPUT_LEGACY: - break; - case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX: - if (!modifyPhoneStateAllowed(pid, uid)) { - ALOGE("%s() permission denied: modify phone state not allowed for uid %d", - __func__, uid); - result = PERMISSION_DENIED; - } - break; - case AudioPolicyInterface::API_OUT_MIX_PLAYBACK: - if (!modifyAudioRoutingAllowed(pid, uid)) { - ALOGE("%s() permission denied: modify audio routing not allowed for uid %d", - __func__, uid); - result = PERMISSION_DENIED; + const uid_t callingUid = IPCThreadState::self()->getCallingUid(); + if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) { + ALOGW_IF(uid != (uid_t)-1 && uid != callingUid, + "%s uid %d tried to pass itself off as %d", __func__, callingUid, uid); + uid = callingUid; + } + if (!mPackageManager.allowPlaybackCapture(uid)) { + attr->flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION; + } + if (((attr->flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0) + && !bypassInterruptionPolicyAllowed(pid, uid)) { + attr->flags &= ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE); + } + AutoCallerClear acc; + AudioPolicyInterface::output_type_t outputType; + result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, + config, + &flags, selectedDeviceId, portId, + secondaryOutputs, + &outputType); + + // FIXME: Introduce a way to check for the the telephony device before opening the output + if (result == NO_ERROR) { + // enforce permission (if any) required for each type of input + switch (outputType) { + case AudioPolicyInterface::API_OUTPUT_LEGACY: + break; + case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX: + if (!modifyPhoneStateAllowed(pid, uid)) { + ALOGE("%s() permission denied: modify phone state not allowed for uid %d", + __func__, uid); + result = PERMISSION_DENIED; + } + break; + case AudioPolicyInterface::API_OUT_MIX_PLAYBACK: + if (!modifyAudioRoutingAllowed(pid, uid)) { + ALOGE("%s() permission denied: modify audio routing not allowed for uid %d", + __func__, uid); + result = PERMISSION_DENIED; + } + break; + case AudioPolicyInterface::API_OUTPUT_INVALID: + default: + LOG_ALWAYS_FATAL("%s() encountered an invalid output type %d", + __func__, (int)outputType); } - break; - case AudioPolicyInterface::API_OUTPUT_INVALID: - default: - LOG_ALWAYS_FATAL("%s() encountered an invalid output type %d", - __func__, (int)outputType); } + + if (result == NO_ERROR) { + sp <AudioPlaybackClient> client = + new AudioPlaybackClient(*attr, *output, uid, pid, session, *portId, *selectedDeviceId, *stream); + mAudioPlaybackClients.add(*portId, client); + } + + audioPolicyEffects = mAudioPolicyEffects; } - if (result == NO_ERROR) { - sp <AudioPlaybackClient> client = - new AudioPlaybackClient(*attr, *output, uid, pid, session, *portId, *selectedDeviceId, *stream); - mAudioPlaybackClients.add(*portId, client); + if (result == NO_ERROR && audioPolicyEffects != 0) { + audioPolicyEffects->updateOutputAudioSessionInfo(*output, *stream, + session, flags, config, uid); } + return result; } @@ -393,11 +404,20 @@ void AudioPolicyService::doReleaseOutput(audio_port_handle_t portId) audioPolicyEffects->releaseOutputSessionEffects( client->io, client->stream, client->session); } - Mutex::Autolock _l(mLock); - mAudioPlaybackClients.removeItem(portId); + { + Mutex::Autolock _l(mLock); + mAudioPlaybackClients.removeItem(portId); - // called from internal thread: no need to clear caller identity - mAudioPolicyManager->releaseOutput(portId); + audioPolicyEffects = mAudioPolicyEffects; + + // called from internal thread: no need to clear caller identity + mAudioPolicyManager->releaseOutput(portId); + } + + if (audioPolicyEffects != 0) { + audioPolicyEffects->releaseOutputAudioSessionInfo(client->io, + client->stream, client->session); + } } status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr, @@ -1534,4 +1554,24 @@ status_t AudioPolicyService::registerSoundTriggerCaptureStateListener( return NO_ERROR; } +status_t AudioPolicyService::listAudioSessions(audio_stream_type_t streams, + Vector< sp<AudioSessionInfo>> &sessions) +{ + sp<AudioPolicyEffects> audioPolicyEffects; + { + Mutex::Autolock _l(mLock); + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + audioPolicyEffects = mAudioPolicyEffects; + } + + if (audioPolicyEffects != 0) { + return audioPolicyEffects->listAudioSessions(streams, sessions); + } + + // no errors here if effects are not available + return NO_ERROR; +} + } // namespace android diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp index a6e8989722..fbd7614c08 100644 --- a/services/audiopolicy/service/AudioPolicyService.cpp +++ b/services/audiopolicy/service/AudioPolicyService.cpp @@ -78,7 +78,7 @@ void AudioPolicyService::onFirstRef() mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient); } // load audio processing modules - sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(); + sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(this); sp<UidPolicy> uidPolicy = new UidPolicy(this); sp<SensorPrivacyPolicy> sensorPrivacyPolicy = new SensorPrivacyPolicy(this); { @@ -293,6 +293,21 @@ status_t AudioPolicyService::clientSetAudioPortConfig(const struct audio_port_co return mAudioCommandThread->setAudioPortConfigCommand(config, delayMs); } +void AudioPolicyService::onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added) +{ + ALOGV("AudioPolicyService::onOutputSessionEffectsUpdate(%d, %d, %d)", + info->mStream, info->mSessionId, added); + mOutputCommandThread->effectSessionUpdateCommand(info, added); +} + +void AudioPolicyService::doOnOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added) +{ + Mutex::Autolock _l(mNotificationClientsLock); + for (size_t i = 0; i < mNotificationClients.size(); i++) { + mNotificationClients.valueAt(i)->onOutputSessionEffectsUpdate(info, added); + } +} + AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service, const sp<IAudioPolicyServiceClient>& client, uid_t uid, @@ -337,6 +352,13 @@ void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_gr } } +void AudioPolicyService::NotificationClient::onOutputSessionEffectsUpdate( + sp<AudioSessionInfo>& info, bool added) +{ + if (mAudioPolicyServiceClient != 0) { + mAudioPolicyServiceClient->onOutputSessionEffectsUpdate(info, added); + } +} void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate( const String8& regId, int32_t state) @@ -1366,6 +1388,20 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() svc->doOnNewAudioModulesAvailable(); mLock.lock(); } break; + case EFFECT_SESSION_UPDATE: { + EffectSessionUpdateData *data = + (EffectSessionUpdateData *)command->mParam.get(); + ALOGV("AudioCommandThread() processing effect session update %d %d %d", + data->mAudioSessionInfo->mStream, data->mAudioSessionInfo->mSessionId, + data->mAdded); + svc = mService.promote(); + if (svc == 0) { + break; + } + mLock.unlock(); + svc->doOnOutputSessionEffectsUpdate(data->mAudioSessionInfo, data->mAdded); + mLock.lock(); + } break; default: ALOGW("AudioCommandThread() unknown command %d", command->mCommand); @@ -1662,6 +1698,20 @@ void AudioPolicyService::AudioCommandThread::audioModulesUpdateCommand() sendCommand(command); } +void AudioPolicyService::AudioCommandThread::effectSessionUpdateCommand( + sp<AudioSessionInfo>& streamInfo, bool added) +{ + sp<AudioCommand> command = new AudioCommand(); + command->mCommand = EFFECT_SESSION_UPDATE; + EffectSessionUpdateData *data = new EffectSessionUpdateData(); + data->mAudioSessionInfo = streamInfo; + data->mAdded = added; + command->mParam = data; + ALOGV("AudioCommandThread() sending effect session update (id=%d) for stream %d (added=%d)", + streamInfo->mStream, streamInfo->mSessionId, added); + sendCommand(command); +} + status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs) { { diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h index 869a963d05..fc61cce52c 100644 --- a/services/audiopolicy/service/AudioPolicyService.h +++ b/services/audiopolicy/service/AudioPolicyService.h @@ -288,6 +288,9 @@ public: status_t doStopOutput(audio_port_handle_t portId); void doReleaseOutput(audio_port_handle_t portId); + virtual status_t listAudioSessions(audio_stream_type_t stream, + Vector< sp<AudioSessionInfo>>& sessions); + status_t clientCreateAudioPatch(const struct audio_patch *patch, audio_patch_handle_t *handle, int delayMs); @@ -327,6 +330,9 @@ public: audio_session_t sessionId, bool suspended); + void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added); + void doOnOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added); + private: AudioPolicyService() ANDROID_API; virtual ~AudioPolicyService(); @@ -475,6 +481,7 @@ private: RECORDING_CONFIGURATION_UPDATE, SET_EFFECT_SUSPENDED, AUDIO_MODULES_UPDATE, + EFFECT_SESSION_UPDATE, }; AudioCommandThread (String8 name, const wp<AudioPolicyService>& service); @@ -522,6 +529,8 @@ private: bool suspended); void audioModulesUpdateCommand(); void insertCommand_l(AudioCommand *command, int delayMs = 0); + void effectSessionUpdateCommand(sp<AudioSessionInfo>& info, bool added); + private: class AudioCommandData; @@ -625,6 +634,12 @@ private: bool mSuspended; }; + class EffectSessionUpdateData : public AudioCommandData { + public: + sp<AudioSessionInfo> mAudioSessionInfo; + bool mAdded; + }; + Mutex mLock; Condition mWaitWorkCV; Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands @@ -743,6 +758,8 @@ private: void setSoundTriggerCaptureState(bool active) override; + virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added); + private: AudioPolicyService *mAudioPolicyService; }; @@ -771,6 +788,8 @@ private: audio_source_t source); void setAudioPortCallbacksEnabled(bool enabled); void setAudioVolumeGroupCallbacksEnabled(bool enabled); + void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, + bool added); uid_t uid() { return mUid; diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp index 501d92226f..53fc1be7eb 100644 --- a/services/camera/libcameraservice/Android.bp +++ b/services/camera/libcameraservice/Android.bp @@ -18,6 +18,10 @@ cc_library_shared { name: "libcameraservice", + defaults: [ + "no_cameraserver_defaults", + "qti_camera_device_defaults", + ], // Camera service source diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 24288d6baf..5f71b80fe1 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -951,7 +951,11 @@ int32_t CameraService::mapToInterface(StatusInternal status) { Status CameraService::initializeShimMetadata(int cameraId) { int uid = CameraThreadState::getCallingUid(); +#ifdef NO_CAMERA_SERVER + String16 internalPackageName("media"); +#else String16 internalPackageName("cameraserver"); +#endif String8 id = String8::format("%d", cameraId); Status ret = Status::ok(); sp<Client> tmp = nullptr; @@ -1032,7 +1036,9 @@ Status CameraService::getLegacyParametersLazy(int cameraId, static bool isTrustedCallingUid(uid_t uid) { switch (uid) { case AID_MEDIA: // mediaserver +#ifndef NO_CAMERA_SERVER case AID_CAMERASERVER: // cameraserver +#endif case AID_RADIO: // telephony return true; default: @@ -1165,6 +1171,7 @@ Status CameraService::validateClientPermissionsLocked(const String8& cameraId, clientName8.string(), clientUid, clientPid, cameraId.string()); } +#ifndef NO_CAMERA_SERVER // Make sure the UID is in an active state to use the camera if (!mUidPolicy->isUidActive(callingUid, String16(clientName8))) { int32_t procState = mUidPolicy->getProcState(callingUid); @@ -1176,6 +1183,7 @@ Status CameraService::validateClientPermissionsLocked(const String8& cameraId, clientName8.string(), clientUid, clientPid, cameraId.string(), callingUid, procState); } +#endif // If sensor privacy is enabled then prevent access to the camera if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) { @@ -2412,7 +2420,8 @@ bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) { ret = true; } } - + //clear the evicted client list before acquring service lock again. + evicted.clear(); // Reacquire mServiceLock mServiceLock.lock(); diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp index 892996c3af..43da23eabc 100644 --- a/services/camera/libcameraservice/api1/CameraClient.cpp +++ b/services/camera/libcameraservice/api1/CameraClient.cpp @@ -55,6 +55,9 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService, mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP; mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT); mPlayShutterSound = true; + + mLongshotEnabled = false; + mBurstCnt = 0; LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId); } @@ -672,6 +675,10 @@ status_t CameraClient::takePicture(int msgType) { CAMERA_MSG_COMPRESSED_IMAGE); enableMsgType(picMsgType); + mBurstCnt = mHardware->getParameters().getInt("num-snaps-per-shutter"); + if(mBurstCnt <= 0) + mBurstCnt = 1; + LOG1("mBurstCnt = %d", mBurstCnt); return mHardware->takePicture(); } @@ -755,6 +762,20 @@ status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) { } else if (cmd == CAMERA_CMD_PING) { // If mHardware is 0, checkPidAndHardware will return error. return OK; + } else if (cmd == CAMERA_CMD_HISTOGRAM_ON) { + enableMsgType(CAMERA_MSG_STATS_DATA); + } else if (cmd == CAMERA_CMD_HISTOGRAM_OFF) { + disableMsgType(CAMERA_MSG_STATS_DATA); + } else if (cmd == CAMERA_CMD_METADATA_ON) { + enableMsgType(CAMERA_MSG_META_DATA); + } else if (cmd == CAMERA_CMD_METADATA_OFF) { + disableMsgType(CAMERA_MSG_META_DATA); + } else if ( cmd == CAMERA_CMD_LONGSHOT_ON ) { + mLongshotEnabled = true; + } else if ( cmd == CAMERA_CMD_LONGSHOT_OFF ) { + mLongshotEnabled = false; + disableMsgType(CAMERA_MSG_SHUTTER); + disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE); } return mHardware->sendCommand(cmd, arg1, arg2); @@ -954,7 +975,9 @@ void CameraClient::handleShutter(void) { c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0); if (!lockIfMessageWanted(CAMERA_MSG_SHUTTER)) return; } - disableMsgType(CAMERA_MSG_SHUTTER); + if ( !mLongshotEnabled ) { + disableMsgType(CAMERA_MSG_SHUTTER); + } // Shutters only happen in response to takePicture, so mark device as // idle now, until preview is restarted @@ -1040,7 +1063,13 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) { // picture callback - compressed picture ready void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) { - disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE); + if (mBurstCnt) + mBurstCnt--; + + if (!mBurstCnt && !mLongshotEnabled) { + LOG1("handleCompressedPicture mBurstCnt = %d", mBurstCnt); + disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE); + } sp<hardware::ICameraClient> c = mRemoteCallback; mLock.unlock(); diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h index a7eb960ca7..12d6ad5a01 100644 --- a/services/camera/libcameraservice/api1/CameraClient.h +++ b/services/camera/libcameraservice/api1/CameraClient.h @@ -183,6 +183,9 @@ private: // This function keeps trying to grab mLock, or give up if the message // is found to be disabled. It returns true if mLock is grabbed. bool lockIfMessageWanted(int32_t msgType); + + bool mLongshotEnabled; + int mBurstCnt; }; } diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp index 62ef681668..61cada61a9 100644 --- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp +++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp @@ -136,6 +136,27 @@ hardware::Return<void> CameraHardwareInterface::dataCallback( return hardware::Void(); } +#ifdef QTI_CAMERA_DEVICE +hardware::Return<void> CameraHardwareInterface::QDataCallback( + DataCallbackMsg msgType, uint32_t data, uint32_t bufferIndex, + const vendor::qti::hardware::camera::device::V1_0::QCameraFrameMetadata& metadata) { + camera_memory_t* mem = nullptr; + { + std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock); + if (mHidlMemPoolMap.count(data) == 0) { + ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data); + return hardware::Void(); + } + mem = mHidlMemPoolMap.at(data); + } + camera_frame_metadata_t md; + md.number_of_faces = metadata.faces.size(); + md.faces = (camera_face_t*) metadata.faces.data(); + sDataCb((int32_t) msgType, mem, bufferIndex, &md, this); + return hardware::Void(); +} +#endif + hardware::Return<void> CameraHardwareInterface::dataCallbackTimestamp( DataCallbackMsg msgType, uint32_t data, uint32_t bufferIndex, int64_t timestamp) { @@ -591,12 +612,16 @@ void CameraHardwareInterface::releaseRecordingFrame(const sp<IMemory>& mem) // Either document why it is safe in this case or address the // issue (e.g. by copying). VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer(); - // Caching the handle here because md->pHandle will be subject to HAL's edit - native_handle_t* nh = md->pHandle; - hidl_handle frame = nh; - mHidlDevice->releaseRecordingFrameHandle(heapId, bufferIndex, frame); - native_handle_close(nh); - native_handle_delete(nh); + if (md->eType == kMetadataBufferTypeNativeHandleSource) { + // Caching the handle here because md->pHandle will be subject to HAL's edit + native_handle_t* nh = md->pHandle; + hidl_handle frame = nh; + mHidlDevice->releaseRecordingFrameHandle(heapId, bufferIndex, frame); + native_handle_close(nh); + native_handle_delete(nh); + } else { + mHidlDevice->releaseRecordingFrame(heapId, bufferIndex); + } } else { mHidlDevice->releaseRecordingFrame(heapId, bufferIndex); } diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h index e519b04cdd..4a20b6a597 100644 --- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h +++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h @@ -29,6 +29,9 @@ #include <hardware/camera.h> #include <common/CameraProviderManager.h> +#ifdef QTI_CAMERA_DEVICE +#include <vendor/qti/hardware/camera/device/1.0/IQCameraDeviceCallback.h> +#endif namespace android { @@ -85,7 +88,11 @@ typedef void (*data_callback_timestamp_batch)( class CameraHardwareInterface : public virtual RefBase, +#ifdef QTI_CAMERA_DEVICE + public virtual vendor::qti::hardware::camera::device::V1_0::IQCameraDeviceCallback, +#else public virtual hardware::camera::device::V1_0::ICameraDeviceCallback, +#endif public virtual hardware::camera::device::V1_0::ICameraDevicePreviewCallback { public: @@ -395,6 +402,12 @@ private: hardware::camera::device::V1_0::DataCallbackMsg msgType, const hardware::hidl_vec< hardware::camera::device::V1_0::HandleTimestampMessage>&) override; +#ifdef QTI_CAMERA_DEVICE + hardware::Return<void> QDataCallback( + hardware::camera::device::V1_0::DataCallbackMsg msgType, + uint32_t data, uint32_t bufferIndex, + const vendor::qti::hardware::camera::device::V1_0::QCameraFrameMetadata& metadata) override; +#endif /** * Implementation of android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback |