diff options
Diffstat (limited to 'camera/device/3.4/default/ExternalCameraDeviceSession.cpp')
-rw-r--r-- | camera/device/3.4/default/ExternalCameraDeviceSession.cpp | 596 |
1 files changed, 102 insertions, 494 deletions
diff --git a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp index 9ff0d74687..5f8674219c 100644 --- a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp +++ b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp @@ -81,8 +81,6 @@ bool tryLock(std::mutex& mutex) return locked; } -buffer_handle_t sEmptyBuffer = nullptr; - } // Anonymous namespace // Static instances @@ -119,8 +117,8 @@ bool ExternalCameraDeviceSession::initialize() { std::string make, model; if (ret < 0) { ALOGW("%s v4l2 QUERYCAP failed", __FUNCTION__); - make = "Generic UVC webcam"; - model = "Generic UVC webcam"; + mExifMake = "Generic UVC webcam"; + mExifModel = "Generic UVC webcam"; } else { // capability.card is UTF-8 encoded char card[32]; @@ -134,11 +132,11 @@ bool ExternalCameraDeviceSession::initialize() { } } if (j == 0 || card[j - 1] != '\0') { - make = "Generic UVC webcam"; - model = "Generic UVC webcam"; + mExifMake = "Generic UVC webcam"; + mExifModel = "Generic UVC webcam"; } else { - make = card; - model = card; + mExifMake = card; + mExifModel = card; } } @@ -147,7 +145,7 @@ bool ExternalCameraDeviceSession::initialize() { ALOGE("%s: init OutputThread failed!", __FUNCTION__); return true; } - mOutputThread->setExifMakeModel(make, model); + mOutputThread->setExifMakeModel(mExifMake, mExifModel); status_t status = initDefaultRequests(); if (status != OK) { @@ -161,7 +159,7 @@ bool ExternalCameraDeviceSession::initialize() { ALOGE("%s: invalid request fmq", __FUNCTION__); return true; } - mResultMetadataQueue = std::make_shared<RequestMetadataQueue>( + mResultMetadataQueue = std::make_shared<ResultMetadataQueue>( kMetadataMsgQueueSize, false /* non blocking */); if (!mResultMetadataQueue->isValid()) { ALOGE("%s: invalid result fmq", __FUNCTION__); @@ -183,7 +181,7 @@ bool ExternalCameraDeviceSession::isInitFailed() { } void ExternalCameraDeviceSession::initOutputThread() { - mOutputThread = new OutputThread(this, mCroppingType); + mOutputThread = new OutputThread(this, mCroppingType, mCameraCharacteristics); } void ExternalCameraDeviceSession::closeOutputThread() { @@ -518,35 +516,9 @@ Status ExternalCameraDeviceSession::importBufferLocked(int32_t streamId, uint64_t bufId, buffer_handle_t buf, /*out*/buffer_handle_t** outBufPtr, bool allowEmptyBuf) { - - if (buf == nullptr && bufId == BUFFER_ID_NO_BUFFER) { - if (allowEmptyBuf) { - *outBufPtr = &sEmptyBuffer; - return Status::OK; - } else { - ALOGE("%s: bufferId %" PRIu64 " has null buffer handle!", __FUNCTION__, bufId); - return Status::ILLEGAL_ARGUMENT; - } - } - - CirculatingBuffers& cbs = mCirculatingBuffers[streamId]; - if (cbs.count(bufId) == 0) { - if (buf == nullptr) { - ALOGE("%s: bufferId %" PRIu64 " has null buffer handle!", __FUNCTION__, bufId); - return Status::ILLEGAL_ARGUMENT; - } - // Register a newly seen buffer - buffer_handle_t importedBuf = buf; - sHandleImporter.importBuffer(importedBuf); - if (importedBuf == nullptr) { - ALOGE("%s: output buffer for stream %d is invalid!", __FUNCTION__, streamId); - return Status::INTERNAL_ERROR; - } else { - cbs[bufId] = importedBuf; - } - } - *outBufPtr = &cbs[bufId]; - return Status::OK; + return importBufferImpl( + mCirculatingBuffers, sHandleImporter, streamId, + bufId, buf, outBufPtr, allowEmptyBuf); } Status ExternalCameraDeviceSession::importRequestLockedImpl( @@ -791,15 +763,32 @@ void ExternalCameraDeviceSession::notifyError( //TODO: refactor with processCaptureResult Status ExternalCameraDeviceSession::processCaptureRequestError( - const std::shared_ptr<HalRequest>& req) { + const std::shared_ptr<HalRequest>& req, + /*out*/std::vector<NotifyMsg>* outMsgs, + /*out*/std::vector<CaptureResult>* outResults) { ATRACE_CALL(); // Return V4L2 buffer to V4L2 buffer queue - enqueueV4l2Frame(req->frameIn); + sp<V3_4::implementation::V4L2Frame> v4l2Frame = + static_cast<V3_4::implementation::V4L2Frame*>(req->frameIn.get()); + enqueueV4l2Frame(v4l2Frame); - // NotifyShutter - notifyShutter(req->frameNumber, req->shutterTs); + if (outMsgs == nullptr) { + notifyShutter(req->frameNumber, req->shutterTs); + notifyError(/*frameNum*/req->frameNumber, /*stream*/-1, ErrorCode::ERROR_REQUEST); + } else { + NotifyMsg shutter; + shutter.type = MsgType::SHUTTER; + shutter.msg.shutter.frameNumber = req->frameNumber; + shutter.msg.shutter.timestamp = req->shutterTs; - notifyError(/*frameNum*/req->frameNumber, /*stream*/-1, ErrorCode::ERROR_REQUEST); + NotifyMsg error; + error.type = MsgType::ERROR; + error.msg.error.frameNumber = req->frameNumber; + error.msg.error.errorStreamId = -1; + error.msg.error.errorCode = ErrorCode::ERROR_REQUEST; + outMsgs->push_back(shutter); + outMsgs->push_back(error); + } // Fill output buffers hidl_vec<CaptureResult> results; @@ -826,16 +815,22 @@ Status ExternalCameraDeviceSession::processCaptureRequestError( mInflightFrames.erase(req->frameNumber); } - // Callback into framework - invokeProcessCaptureResultCallback(results, /* tryWriteFmq */true); - freeReleaseFences(results); + if (outResults == nullptr) { + // Callback into framework + invokeProcessCaptureResultCallback(results, /* tryWriteFmq */true); + freeReleaseFences(results); + } else { + outResults->push_back(result); + } return Status::OK; } Status ExternalCameraDeviceSession::processCaptureResult(std::shared_ptr<HalRequest>& req) { ATRACE_CALL(); // Return V4L2 buffer to V4L2 buffer queue - enqueueV4l2Frame(req->frameIn); + sp<V3_4::implementation::V4L2Frame> v4l2Frame = + static_cast<V3_4::implementation::V4L2Frame*>(req->frameIn.get()); + enqueueV4l2Frame(v4l2Frame); // NotifyShutter notifyShutter(req->frameNumber, req->shutterTs); @@ -923,29 +918,10 @@ void ExternalCameraDeviceSession::invokeProcessCaptureResultCallback( mProcessCaptureResultLock.unlock(); } -void ExternalCameraDeviceSession::freeReleaseFences(hidl_vec<CaptureResult>& results) { - for (auto& result : results) { - if (result.inputBuffer.releaseFence.getNativeHandle() != nullptr) { - native_handle_t* handle = const_cast<native_handle_t*>( - result.inputBuffer.releaseFence.getNativeHandle()); - native_handle_close(handle); - native_handle_delete(handle); - } - for (auto& buf : result.outputBuffers) { - if (buf.releaseFence.getNativeHandle() != nullptr) { - native_handle_t* handle = const_cast<native_handle_t*>( - buf.releaseFence.getNativeHandle()); - native_handle_close(handle); - native_handle_delete(handle); - } - } - } - return; -} - ExternalCameraDeviceSession::OutputThread::OutputThread( - wp<ExternalCameraDeviceSession> parent, - CroppingType ct) : mParent(parent), mCroppingType(ct) {} + wp<OutputThreadInterface> parent, CroppingType ct, + const common::V1_0::helper::CameraMetadata& chars) : + mParent(parent), mCroppingType(ct), mCameraCharacteristics(chars) {} ExternalCameraDeviceSession::OutputThread::~OutputThread() {} @@ -955,88 +931,6 @@ void ExternalCameraDeviceSession::OutputThread::setExifMakeModel( mExifModel = model; } -uint32_t ExternalCameraDeviceSession::OutputThread::getFourCcFromLayout( - const YCbCrLayout& layout) { - intptr_t cb = reinterpret_cast<intptr_t>(layout.cb); - intptr_t cr = reinterpret_cast<intptr_t>(layout.cr); - if (std::abs(cb - cr) == 1 && layout.chromaStep == 2) { - // Interleaved format - if (layout.cb > layout.cr) { - return V4L2_PIX_FMT_NV21; - } else { - return V4L2_PIX_FMT_NV12; - } - } else if (layout.chromaStep == 1) { - // Planar format - if (layout.cb > layout.cr) { - return V4L2_PIX_FMT_YVU420; // YV12 - } else { - return V4L2_PIX_FMT_YUV420; // YU12 - } - } else { - return FLEX_YUV_GENERIC; - } -} - -int ExternalCameraDeviceSession::OutputThread::getCropRect( - CroppingType ct, const Size& inSize, const Size& outSize, IMapper::Rect* out) { - if (out == nullptr) { - ALOGE("%s: out is null", __FUNCTION__); - return -1; - } - - uint32_t inW = inSize.width; - uint32_t inH = inSize.height; - uint32_t outW = outSize.width; - uint32_t outH = outSize.height; - - // Handle special case where aspect ratio is close to input but scaled - // dimension is slightly larger than input - float arIn = ASPECT_RATIO(inSize); - float arOut = ASPECT_RATIO(outSize); - if (isAspectRatioClose(arIn, arOut)) { - out->left = 0; - out->top = 0; - out->width = inW; - out->height = inH; - return 0; - } - - if (ct == VERTICAL) { - uint64_t scaledOutH = static_cast<uint64_t>(outH) * inW / outW; - if (scaledOutH > inH) { - ALOGE("%s: Output size %dx%d cannot be vertically cropped from input size %dx%d", - __FUNCTION__, outW, outH, inW, inH); - return -1; - } - scaledOutH = scaledOutH & ~0x1; // make it multiple of 2 - - out->left = 0; - out->top = ((inH - scaledOutH) / 2) & ~0x1; - out->width = inW; - out->height = static_cast<int32_t>(scaledOutH); - ALOGV("%s: crop %dx%d to %dx%d: top %d, scaledH %d", - __FUNCTION__, inW, inH, outW, outH, out->top, static_cast<int32_t>(scaledOutH)); - } else { - uint64_t scaledOutW = static_cast<uint64_t>(outW) * inH / outH; - if (scaledOutW > inW) { - ALOGE("%s: Output size %dx%d cannot be horizontally cropped from input size %dx%d", - __FUNCTION__, outW, outH, inW, inH); - return -1; - } - scaledOutW = scaledOutW & ~0x1; // make it multiple of 2 - - out->left = ((inW - scaledOutW) / 2) & ~0x1; - out->top = 0; - out->width = static_cast<int32_t>(scaledOutW); - out->height = inH; - ALOGV("%s: crop %dx%d to %dx%d: top %d, scaledW %d", - __FUNCTION__, inW, inH, outW, outH, out->top, static_cast<int32_t>(scaledOutW)); - } - - return 0; -} - int ExternalCameraDeviceSession::OutputThread::cropAndScaleLocked( sp<AllocatedFrame>& in, const Size& outSz, YCbCrLayout* out) { Size inSz = {in->mWidth, in->mHeight}; @@ -1274,265 +1168,6 @@ int ExternalCameraDeviceSession::OutputThread::cropAndScaleThumbLocked( return 0; } -int ExternalCameraDeviceSession::OutputThread::formatConvertLocked( - const YCbCrLayout& in, const YCbCrLayout& out, Size sz, uint32_t format) { - int ret = 0; - switch (format) { - case V4L2_PIX_FMT_NV21: - ret = libyuv::I420ToNV21( - static_cast<uint8_t*>(in.y), - in.yStride, - static_cast<uint8_t*>(in.cb), - in.cStride, - static_cast<uint8_t*>(in.cr), - in.cStride, - static_cast<uint8_t*>(out.y), - out.yStride, - static_cast<uint8_t*>(out.cr), - out.cStride, - sz.width, - sz.height); - if (ret != 0) { - ALOGE("%s: convert to NV21 buffer failed! ret %d", - __FUNCTION__, ret); - return ret; - } - break; - case V4L2_PIX_FMT_NV12: - ret = libyuv::I420ToNV12( - static_cast<uint8_t*>(in.y), - in.yStride, - static_cast<uint8_t*>(in.cb), - in.cStride, - static_cast<uint8_t*>(in.cr), - in.cStride, - static_cast<uint8_t*>(out.y), - out.yStride, - static_cast<uint8_t*>(out.cb), - out.cStride, - sz.width, - sz.height); - if (ret != 0) { - ALOGE("%s: convert to NV12 buffer failed! ret %d", - __FUNCTION__, ret); - return ret; - } - break; - case V4L2_PIX_FMT_YVU420: // YV12 - case V4L2_PIX_FMT_YUV420: // YU12 - // TODO: maybe we can speed up here by somehow save this copy? - ret = libyuv::I420Copy( - static_cast<uint8_t*>(in.y), - in.yStride, - static_cast<uint8_t*>(in.cb), - in.cStride, - static_cast<uint8_t*>(in.cr), - in.cStride, - static_cast<uint8_t*>(out.y), - out.yStride, - static_cast<uint8_t*>(out.cb), - out.cStride, - static_cast<uint8_t*>(out.cr), - out.cStride, - sz.width, - sz.height); - if (ret != 0) { - ALOGE("%s: copy to YV12 or YU12 buffer failed! ret %d", - __FUNCTION__, ret); - return ret; - } - break; - case FLEX_YUV_GENERIC: - // TODO: b/72261744 write to arbitrary flexible YUV layout. Slow. - ALOGE("%s: unsupported flexible yuv layout" - " y %p cb %p cr %p y_str %d c_str %d c_step %d", - __FUNCTION__, out.y, out.cb, out.cr, - out.yStride, out.cStride, out.chromaStep); - return -1; - default: - ALOGE("%s: unknown YUV format 0x%x!", __FUNCTION__, format); - return -1; - } - return 0; -} - -int ExternalCameraDeviceSession::OutputThread::encodeJpegYU12( - const Size & inSz, const YCbCrLayout& inLayout, - int jpegQuality, const void *app1Buffer, size_t app1Size, - void *out, const size_t maxOutSize, size_t &actualCodeSize) -{ - /* libjpeg is a C library so we use C-style "inheritance" by - * putting libjpeg's jpeg_destination_mgr first in our custom - * struct. This allows us to cast jpeg_destination_mgr* to - * CustomJpegDestMgr* when we get it passed to us in a callback */ - struct CustomJpegDestMgr { - struct jpeg_destination_mgr mgr; - JOCTET *mBuffer; - size_t mBufferSize; - size_t mEncodedSize; - bool mSuccess; - } dmgr; - - jpeg_compress_struct cinfo = {}; - jpeg_error_mgr jerr; - - /* Initialize error handling with standard callbacks, but - * then override output_message (to print to ALOG) and - * error_exit to set a flag and print a message instead - * of killing the whole process */ - cinfo.err = jpeg_std_error(&jerr); - - cinfo.err->output_message = [](j_common_ptr cinfo) { - char buffer[JMSG_LENGTH_MAX]; - - /* Create the message */ - (*cinfo->err->format_message)(cinfo, buffer); - ALOGE("libjpeg error: %s", buffer); - }; - cinfo.err->error_exit = [](j_common_ptr cinfo) { - (*cinfo->err->output_message)(cinfo); - if(cinfo->client_data) { - auto & dmgr = - *reinterpret_cast<CustomJpegDestMgr*>(cinfo->client_data); - dmgr.mSuccess = false; - } - }; - /* Now that we initialized some callbacks, let's create our compressor */ - jpeg_create_compress(&cinfo); - - /* Initialize our destination manager */ - dmgr.mBuffer = static_cast<JOCTET*>(out); - dmgr.mBufferSize = maxOutSize; - dmgr.mEncodedSize = 0; - dmgr.mSuccess = true; - cinfo.client_data = static_cast<void*>(&dmgr); - - /* These lambdas become C-style function pointers and as per C++11 spec - * may not capture anything */ - dmgr.mgr.init_destination = [](j_compress_ptr cinfo) { - auto & dmgr = reinterpret_cast<CustomJpegDestMgr&>(*cinfo->dest); - dmgr.mgr.next_output_byte = dmgr.mBuffer; - dmgr.mgr.free_in_buffer = dmgr.mBufferSize; - ALOGV("%s:%d jpeg start: %p [%zu]", - __FUNCTION__, __LINE__, dmgr.mBuffer, dmgr.mBufferSize); - }; - - dmgr.mgr.empty_output_buffer = [](j_compress_ptr cinfo __unused) { - ALOGV("%s:%d Out of buffer", __FUNCTION__, __LINE__); - return 0; - }; - - dmgr.mgr.term_destination = [](j_compress_ptr cinfo) { - auto & dmgr = reinterpret_cast<CustomJpegDestMgr&>(*cinfo->dest); - dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.mgr.free_in_buffer; - ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize); - }; - cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr); - - /* We are going to be using JPEG in raw data mode, so we are passing - * straight subsampled planar YCbCr and it will not touch our pixel - * data or do any scaling or anything */ - cinfo.image_width = inSz.width; - cinfo.image_height = inSz.height; - cinfo.input_components = 3; - cinfo.in_color_space = JCS_YCbCr; - - /* Initialize defaults and then override what we want */ - jpeg_set_defaults(&cinfo); - - jpeg_set_quality(&cinfo, jpegQuality, 1); - jpeg_set_colorspace(&cinfo, JCS_YCbCr); - cinfo.raw_data_in = 1; - cinfo.dct_method = JDCT_IFAST; - - /* Configure sampling factors. The sampling factor is JPEG subsampling 420 - * because the source format is YUV420. Note that libjpeg sampling factors - * are... a little weird. Sampling of Y=2,U=1,V=1 means there is 1 U and - * 1 V value for each 2 Y values */ - cinfo.comp_info[0].h_samp_factor = 2; - cinfo.comp_info[0].v_samp_factor = 2; - cinfo.comp_info[1].h_samp_factor = 1; - cinfo.comp_info[1].v_samp_factor = 1; - cinfo.comp_info[2].h_samp_factor = 1; - cinfo.comp_info[2].v_samp_factor = 1; - - /* Let's not hardcode YUV420 in 6 places... 5 was enough */ - int maxVSampFactor = std::max( { - cinfo.comp_info[0].v_samp_factor, - cinfo.comp_info[1].v_samp_factor, - cinfo.comp_info[2].v_samp_factor - }); - int cVSubSampling = cinfo.comp_info[0].v_samp_factor / - cinfo.comp_info[1].v_samp_factor; - - /* Start the compressor */ - jpeg_start_compress(&cinfo, TRUE); - - /* Compute our macroblock height, so we can pad our input to be vertically - * macroblock aligned. - * TODO: Does it need to be horizontally MCU aligned too? */ - - size_t mcuV = DCTSIZE*maxVSampFactor; - size_t paddedHeight = mcuV * ((inSz.height + mcuV - 1) / mcuV); - - /* libjpeg uses arrays of row pointers, which makes it really easy to pad - * data vertically (unfortunately doesn't help horizontally) */ - std::vector<JSAMPROW> yLines (paddedHeight); - std::vector<JSAMPROW> cbLines(paddedHeight/cVSubSampling); - std::vector<JSAMPROW> crLines(paddedHeight/cVSubSampling); - - uint8_t *py = static_cast<uint8_t*>(inLayout.y); - uint8_t *pcr = static_cast<uint8_t*>(inLayout.cr); - uint8_t *pcb = static_cast<uint8_t*>(inLayout.cb); - - for(uint32_t i = 0; i < paddedHeight; i++) - { - /* Once we are in the padding territory we still point to the last line - * effectively replicating it several times ~ CLAMP_TO_EDGE */ - int li = std::min(i, inSz.height - 1); - yLines[i] = static_cast<JSAMPROW>(py + li * inLayout.yStride); - if(i < paddedHeight / cVSubSampling) - { - crLines[i] = static_cast<JSAMPROW>(pcr + li * inLayout.cStride); - cbLines[i] = static_cast<JSAMPROW>(pcb + li * inLayout.cStride); - } - } - - /* If APP1 data was passed in, use it */ - if(app1Buffer && app1Size) - { - jpeg_write_marker(&cinfo, JPEG_APP0 + 1, - static_cast<const JOCTET*>(app1Buffer), app1Size); - } - - /* While we still have padded height left to go, keep giving it one - * macroblock at a time. */ - while (cinfo.next_scanline < cinfo.image_height) { - const uint32_t batchSize = DCTSIZE * maxVSampFactor; - const uint32_t nl = cinfo.next_scanline; - JSAMPARRAY planes[3]{ &yLines[nl], - &cbLines[nl/cVSubSampling], - &crLines[nl/cVSubSampling] }; - - uint32_t done = jpeg_write_raw_data(&cinfo, planes, batchSize); - - if (done != batchSize) { - ALOGE("%s: compressed %u lines, expected %u (total %u/%u)", - __FUNCTION__, done, batchSize, cinfo.next_scanline, - cinfo.image_height); - return -1; - } - } - - /* This will flush everything */ - jpeg_finish_compress(&cinfo); - - /* Grab the actual code size and set it */ - actualCodeSize = dmgr.mEncodedSize; - - return 0; -} - /* * TODO: There needs to be a mechanism to discover allocated buffer size * in the HAL. @@ -1555,25 +1190,9 @@ Size ExternalCameraDeviceSession::getMaxJpegResolution() const { } Size ExternalCameraDeviceSession::getMaxThumbResolution() const { - Size thumbSize { 0, 0 }; - camera_metadata_ro_entry entry = - mCameraCharacteristics.find(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES); - for(uint32_t i = 0; i < entry.count; i += 2) { - Size sz { static_cast<uint32_t>(entry.data.i32[i]), - static_cast<uint32_t>(entry.data.i32[i+1]) }; - if(sz.width * sz.height > thumbSize.width * thumbSize.height) { - thumbSize = sz; - } - } - - if (thumbSize.width * thumbSize.height == 0) { - ALOGW("%s: non-zero thumbnail size not available", __FUNCTION__); - } - - return thumbSize; + return getMaxThumbnailResolution(mCameraCharacteristics); } - ssize_t ExternalCameraDeviceSession::getJpegBufferSize( uint32_t width, uint32_t height) const { // Constant from camera3.h @@ -1616,7 +1235,7 @@ ssize_t ExternalCameraDeviceSession::getJpegBufferSize( int ExternalCameraDeviceSession::OutputThread::createJpegLocked( HalStreamBuffer &halBuf, - const std::shared_ptr<HalRequest>& req) + const common::V1_0::helper::CameraMetadata& setting) { ATRACE_CALL(); int ret; @@ -1645,17 +1264,17 @@ int ExternalCameraDeviceSession::OutputThread::createJpegLocked( Size thumbSize; bool outputThumbnail = true; - if (req->setting.exists(ANDROID_JPEG_QUALITY)) { - camera_metadata_entry entry = - req->setting.find(ANDROID_JPEG_QUALITY); + if (setting.exists(ANDROID_JPEG_QUALITY)) { + camera_metadata_ro_entry entry = + setting.find(ANDROID_JPEG_QUALITY); jpegQuality = entry.data.u8[0]; } else { return lfail("%s: ANDROID_JPEG_QUALITY not set",__FUNCTION__); } - if (req->setting.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) { - camera_metadata_entry entry = - req->setting.find(ANDROID_JPEG_THUMBNAIL_QUALITY); + if (setting.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) { + camera_metadata_ro_entry entry = + setting.find(ANDROID_JPEG_THUMBNAIL_QUALITY); thumbQuality = entry.data.u8[0]; } else { return lfail( @@ -1663,9 +1282,9 @@ int ExternalCameraDeviceSession::OutputThread::createJpegLocked( __FUNCTION__); } - if (req->setting.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) { - camera_metadata_entry entry = - req->setting.find(ANDROID_JPEG_THUMBNAIL_SIZE); + if (setting.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) { + camera_metadata_ro_entry entry = + setting.find(ANDROID_JPEG_THUMBNAIL_SIZE); thumbSize = Size { static_cast<uint32_t>(entry.data.i32[0]), static_cast<uint32_t>(entry.data.i32[1]) }; @@ -1732,8 +1351,8 @@ int ExternalCameraDeviceSession::OutputThread::createJpegLocked( /* Combine camera characteristics with request settings to form EXIF * metadata */ - common::V1_0::helper::CameraMetadata meta(parent->mCameraCharacteristics); - meta.append(req->setting); + common::V1_0::helper::CameraMetadata meta(mCameraCharacteristics); + meta.append(setting); /* Generate EXIF object */ std::unique_ptr<ExifUtils> utils(ExifUtils::create()); @@ -1838,7 +1457,7 @@ bool ExternalCameraDeviceSession::OutputThread::threadLoop() { // TODO: see if we can save some computation by converting to YV12 here uint8_t* inData; size_t inDataSize; - if (req->frameIn->map(&inData, &inDataSize) != 0) { + if (req->frameIn->getData(&inData, &inDataSize) != 0) { lk.unlock(); return onDeviceError("%s: V4L2 buffer map failed", __FUNCTION__); } @@ -1899,7 +1518,7 @@ bool ExternalCameraDeviceSession::OutputThread::threadLoop() { // Gralloc lockYCbCr the buffer switch (halBuf.format) { case PixelFormat::BLOB: { - int ret = createJpegLocked(halBuf, req); + int ret = createJpegLocked(halBuf, req->setting); if(ret != 0) { lk.unlock(); @@ -1949,8 +1568,8 @@ bool ExternalCameraDeviceSession::OutputThread::threadLoop() { } Size sz {halBuf.width, halBuf.height}; - ATRACE_BEGIN("formatConvertLocked"); - ret = formatConvertLocked(cropAndScaled, outLayout, sz, outputFourcc); + ATRACE_BEGIN("formatConvert"); + ret = formatConvert(cropAndScaled, outLayout, sz, outputFourcc); ATRACE_END(); if (ret != 0) { lk.unlock(); @@ -2055,6 +1674,14 @@ Status ExternalCameraDeviceSession::OutputThread::allocateIntermediateBuffers( return Status::OK; } +void ExternalCameraDeviceSession::OutputThread::clearIntermediateBuffers() { + std::lock_guard<std::mutex> lk(mBufferLock); + mYu12Frame.clear(); + mYu12ThumbFrame.clear(); + mIntermediateBuffers.clear(); + mBlobBufferSize = 0; +} + Status ExternalCameraDeviceSession::OutputThread::submitRequest( const std::shared_ptr<HalRequest>& req) { std::unique_lock<std::mutex> lk(mRequestListLock); @@ -2090,6 +1717,32 @@ void ExternalCameraDeviceSession::OutputThread::flush() { } } +std::list<std::shared_ptr<HalRequest>> +ExternalCameraDeviceSession::OutputThread::switchToOffline() { + ATRACE_CALL(); + std::list<std::shared_ptr<HalRequest>> emptyList; + auto parent = mParent.promote(); + if (parent == nullptr) { + ALOGE("%s: session has been disconnected!", __FUNCTION__); + return emptyList; + } + + std::unique_lock<std::mutex> lk(mRequestListLock); + std::list<std::shared_ptr<HalRequest>> reqs = std::move(mRequestList); + mRequestList.clear(); + if (mProcessingRequest) { + std::chrono::seconds timeout = std::chrono::seconds(kFlushWaitTimeoutSec); + auto st = mRequestDoneCond.wait_for(lk, timeout); + if (st == std::cv_status::timeout) { + ALOGE("%s: wait for inflight request finish timeout!", __FUNCTION__); + } + } + lk.unlock(); + clearIntermediateBuffers(); + ALOGV("%s: returning %zu request for offline processing", __FUNCTION__, reqs.size()); + return reqs; +} + void ExternalCameraDeviceSession::OutputThread::waitForNextRequest( std::shared_ptr<HalRequest>* out) { ATRACE_CALL(); @@ -2733,6 +2386,7 @@ Status ExternalCameraDeviceSession::configureStreams( return Status::INTERNAL_ERROR; } + mBlobBufferSize = blobBufferSize; status = mOutputThread->allocateIntermediateBuffers(v4lSize, mMaxThumbResolution, config.streams, blobBufferSize); if (status != Status::OK) { @@ -2916,16 +2570,6 @@ status_t ExternalCameraDeviceSession::initDefaultRequests() { status_t ExternalCameraDeviceSession::fillCaptureResult( common::V1_0::helper::CameraMetadata &md, nsecs_t timestamp) { - // android.control - // For USB camera, we don't know the AE state. Set the state to converged to - // indicate the frame should be good to use. Then apps don't have to wait the - // AE state. - const uint8_t aeState = ANDROID_CONTROL_AE_STATE_CONVERGED; - UPDATE(md, ANDROID_CONTROL_AE_STATE, &aeState, 1); - - const uint8_t ae_lock = ANDROID_CONTROL_AE_LOCK_OFF; - UPDATE(md, ANDROID_CONTROL_AE_LOCK, &ae_lock, 1); - bool afTrigger = false; { std::lock_guard<std::mutex> lk(mAfTriggerLock); @@ -2951,46 +2595,10 @@ status_t ExternalCameraDeviceSession::fillCaptureResult( } UPDATE(md, ANDROID_CONTROL_AF_STATE, &afState, 1); - // Set AWB state to converged to indicate the frame should be good to use. - const uint8_t awbState = ANDROID_CONTROL_AWB_STATE_CONVERGED; - UPDATE(md, ANDROID_CONTROL_AWB_STATE, &awbState, 1); - - const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF; - UPDATE(md, ANDROID_CONTROL_AWB_LOCK, &awbLock, 1); - - camera_metadata_ro_entry active_array_size = - mCameraCharacteristics.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE); + camera_metadata_ro_entry activeArraySize = + mCameraCharacteristics.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE); - if (active_array_size.count == 0) { - ALOGE("%s: cannot find active array size!", __FUNCTION__); - return -EINVAL; - } - - const uint8_t flashState = ANDROID_FLASH_STATE_UNAVAILABLE; - UPDATE(md, ANDROID_FLASH_STATE, &flashState, 1); - - // This means pipeline latency of X frame intervals. The maximum number is 4. - const uint8_t requestPipelineMaxDepth = 4; - UPDATE(md, ANDROID_REQUEST_PIPELINE_DEPTH, &requestPipelineMaxDepth, 1); - - // android.scaler - const int32_t crop_region[] = { - active_array_size.data.i32[0], active_array_size.data.i32[1], - active_array_size.data.i32[2], active_array_size.data.i32[3], - }; - UPDATE(md, ANDROID_SCALER_CROP_REGION, crop_region, ARRAY_SIZE(crop_region)); - - // android.sensor - UPDATE(md, ANDROID_SENSOR_TIMESTAMP, ×tamp, 1); - - // android.statistics - const uint8_t lensShadingMapMode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF; - UPDATE(md, ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &lensShadingMapMode, 1); - - const uint8_t sceneFlicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE; - UPDATE(md, ANDROID_STATISTICS_SCENE_FLICKER, &sceneFlicker, 1); - - return OK; + return fillCaptureResultCommon(md, timestamp, activeArraySize); } #undef ARRAY_SIZE |