1、前言
本文主要研究展訊平臺Camera驅動和HAL層代碼架構,熟悉展訊Camera的控制流程。
平臺:Sprd-展訊平臺
Hal版本:【HAL3】
知識點以下:
從HAL層到deiver層
1.Camera的打開(open)、初始化(init)和供電(power on)調用流程
2.預覽(preview)調用流程
3.拍照(snapshot)調用流程c++
Camera軟件架構
camera軟件架構架構
2、Camera的打開(open)和初始化(init)調用流程
2.1 framework層的入口
通過App->framework層->jni層->cameraservice這個過程,接着調用到:
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cppapp
status_t Camera3Device::initialize(camera_module_t *module) { ... /** Open HAL device */ status_t res; String8 deviceName = String8::format("%d", mId); camera3_device_t *device; //這裏調用modules->open函數打開攝像頭 res = module->common.methods->open(&module->common, deviceName.string(), reinterpret_cast<hw_device_t**>(&device)); if (res != OK) { SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res); return res; } ... }
這裏調用module->common.methods->open開始操做HAL層,咱們繼續往下看ide
2.2 HAL層
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3Factory.cpp函數
struct hw_module_methods_t SprdCamera3Factory::mModuleMethods = { .open = SprdCamera3Factory::camera_device_open, };
其實是調用的SprdCamera3Factory::camera_device_open方法。post
int SprdCamera3Factory::camera_device_open(const struct hw_module_t *module, const char *id, struct hw_device_t **hw_device) { ··· if (isSingleIdExposeOnMultiCameraMode(atoi(id))) { return gSprdCamera3Wrapper->cameraDeviceOpen(module, id, hw_device); } else { return gSprdCamera3Factory.cameraDeviceOpen(atoi(id), hw_device); } ··· }
這裏open a camera device by its ID,經過ID來打開攝像頭(後主攝:0 後副攝:2 前主攝:1 前副攝:3)ui
IDthis
接着繼續調用gSprdCamera3Factory.cameraDeviceOpen();spa
int SprdCamera3Factory::cameraDeviceOpen(int camera_id, struct hw_device_t **hw_device) { ··· SprdCamera3HWI *hw = new SprdCamera3HWI(multiCameraModeIdToPhyId(camera_id)); rc = hw->openCamera(hw_device); ··· return rc; }
這裏new了一個SprdCamera3HWI的實例,而後調用openCamera(hw_device)方法。線程
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3HWI.cpp
int SprdCamera3HWI::openCamera(struct hw_device_t **hw_device) { ··· ret = openCamera(); if (ret == 0) { *hw_device = &mCameraDevice.common; mCameraSessionActive++; } else *hw_device = NULL; ··· return ret; }
接着繼續調用空構造方法openCamera();
int SprdCamera3HWI::openCamera() { ··· //new SprdCamera3OEMIf的實例 mOEMIf = new SprdCamera3OEMIf(mCameraId, mSetting); mOEMIf->camera_ioctrl(CAMERA_IOCTRL_SET_MULTI_CAMERAMODE, &mMultiCameraMode, NULL); //打開mOEMIf->openCamera()方法 ret = mOEMIf->openCamera(); mCameraOpened = true; ··· if (mOEMIf->isIspToolMode()) { mOEMIf->ispToolModeInit();//初始化ispToolMode startispserver(mCameraId); ispvideo_RegCameraFunc(1, ispVideoStartPreview);//註冊ispVideoStartPreview函數 ispvideo_RegCameraFunc(2, ispVideoStopPreview);//註冊ispVideoStopPreview 函數 ispvideo_RegCameraFunc(3, ispVideoTakePicture);//註冊 ispVideoTakePicture函數 ispvideo_RegCameraFunc(4, ispVideoSetParam);//註冊 ispVideoSetParam函數 } ··· return NO_ERROR; }
這裏new SprdCamera3OEMIf的實例,繼續調用mOEMIf->openCamera()方法。
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3OEMIf.cpp
int SprdCamera3OEMIf::openCamera() { ··· //設置寬和高 mSetting->getLargestPictureSize(mCameraId, &picW, &picH); mSetting->getLargestSensorSize(mCameraId, &snsW, &snsH); if (picW * picH > snsW * snsH) { mLargestPictureWidth = picW; mLargestPictureHeight = picH; } else { mLargestPictureWidth = snsW; mLargestPictureHeight = snsH; } //設置最大尺寸 mHalOem->ops->camera_set_largest_picture_size( mCameraId, mLargestPictureWidth, mLargestPictureHeight); //調用startCameraIfNecessary繼續啓動攝像頭 if (!startCameraIfNecessary()) { ret = UNKNOWN_ERROR; HAL_LOGE("start failed"); goto exit; } //零延時模式線程初始化 ZSLMode_monitor_thread_init((void *)this); #ifdef CONFIG_CAMERA_GYRO gyro_monitor_thread_init((void *)this); #endif property_get("persist.sys.camera.raw.mode", value, "jpeg"); if (!strcmp(value, "raw") || !strcmp(value, "bin")) { is_raw_capture = 1; } property_get("persist.sys.isptool.mode.enable", value, "false"); if (!strcmp(value, "true") || is_raw_capture) { mIsIspToolMode = 1; } ··· }
SprdCamera3OEMIf::openCamera主要作了如下事情:
1.設置圖像的最大尺寸
2.調用startCameraIfNecessary繼續啓動攝像頭
3.零延時模式線程初始化
4.根據persist.sys.camera.raw.mode和persist.sys.isptool.mode.enable設置屬性
繼續根據startCameraIfNecessary方法,這個方法作了不少事情,有些複雜。
bool SprdCamera3OEMIf::startCameraIfNecessary() { ··· //若是camera沒有初始化,進行初始化 if (!isCameraInit()) { HAL_LOGI("wait for camera_init"); if (CMR_CAMERA_SUCCESS != mHalOem->ops->camera_init(mCameraId, camera_cb, this, 0, &mCameraHandle, (void *)Callback_Malloc, (void *)Callback_Free)) { setCameraState(SPRD_INIT); HAL_LOGE("CameraIfNecessary: fail to camera_init()."); return false; } else { setCameraState(SPRD_IDLE); } ··· //得到零延時快拍的相關參數 mHalOem->ops->camera_get_zsl_capability(mCameraHandle, &is_support_zsl, &max_width, &max_height); //判斷是否支持零延時 if (!is_support_zsl) { mParameters.setZSLSupport("false"); } // 獲取抓取能力,包含3dnr能力 mHalOem->ops->camera_ioctrl( mCameraHandle, CAMERA_IOCTRL_GET_GRAB_CAPABILITY, &grab_capability); /*從oem層獲取傳感器和鏡頭信息*/ mHalOem->ops->camera_get_sensor_exif_info(mCameraHandle, &exif_info); mSetting->getLENSTag(&lensInfo); lensInfo.aperture = exif_info.aperture; mSetting->setLENSTag(lensInfo); /*從oem層獲取傳感器otp*/ /*開始讀取refoucs模式*/ if (MODE_SINGLE_CAMERA != mMultiCameraMode && MODE_3D_CAPTURE != mMultiCameraMode && MODE_BLUR != mMultiCameraMode && MODE_BOKEH != mMultiCameraMode) { mSprdRefocusEnabled = true; CMR_LOGI("mSprdRefocusEnabled %d", mSprdRefocusEnabled); } /*結束讀取refoucs模式*/ /*從oem層 獲取OPT信息 開始 */ if ((MODE_BOKEH == mMultiCameraMode || mSprdRefocusEnabled == true) && mCameraId == 0) { OTP_Tag otpInfo; memset(&otpInfo, 0, sizeof(OTP_Tag)); mSetting->getOTPTag(&otpInfo); ··· struct sensor_otp_cust_info otp_info; memset(&otp_info, 0, sizeof(struct sensor_otp_cust_info)); mHalOem->ops->camera_get_sensor_otp_info(mCameraHandle, &otp_info); ··· } /*從oem 層獲取OTP信息 結束*/ /**添加3d校準,獲取最大傳感器尺寸*/ mSetting->getSPRDDEFTag(&sprddefInfo); mHalOem->ops->camera_get_sensor_info_for_raw(mCameraHandle, mode_info); for (i = SENSOR_MODE_PREVIEW_ONE; i < SENSOR_MODE_MAX; i++) { HAL_LOGD("trim w=%d, h=%d", mode_info[i].trim_width, mode_info[i].trim_height); if (mode_info[i].trim_width * mode_info[i].trim_height >= sprddefInfo.sprd_3dcalibration_cap_size[0] * sprddefInfo.sprd_3dcalibration_cap_size[1]) { sprddefInfo.sprd_3dcalibration_cap_size[0] = mode_info[i].trim_width; sprddefInfo.sprd_3dcalibration_cap_size[1] = mode_info[i].trim_height; } } ··· return true; }
該函數已經在相應位置添加代碼註釋,咱們繼續關注調用流程,經過mHalOem->ops->camera_init最終會調用到SprdOEMCamera.c代碼的camera_init進行初始化
vendor/sprd/modules/libcamera/oem2v1/src/SprdOEMCamera.c
cmr_int camera_init(cmr_u32 camera_id, camera_cb_of_type callback, void *client_data, cmr_uint is_autotest, cmr_handle *camera_handle, void *cb_of_malloc, void *cb_of_free) { ··· //初始化OEM的log oem_init_log_level(); //調用camera_local_int繼續進行初始化 ret = camera_local_int(camera_id, callback, client_data, is_autotest, camera_handle, cb_of_malloc, cb_of_free); ··· //其餘的一些初始化 camera_lls_enable(*camera_handle, 0); camera_set_lls_shot_mode(*camera_handle, 0); camera_vendor_hdr_enable(*camera_handle, 0); ··· return ret; }
vendor/sprd/modules/libcamera/oem2v1/src/cmr_oem.c
cmr_int camera_local_int(cmr_u32 camera_id, camera_cb_of_type callback, void *client_data, cmr_uint is_autotest, cmr_handle *oem_handle, void *cb_of_malloc, void *cb_of_free) { ··· //內存申請 struct camera_context *cxt = NULL; *oem_handle = (cmr_handle)0; cxt = (struct camera_context *)malloc(sizeof(struct camera_context)); //參數賦值 cmr_bzero(cxt, sizeof(*cxt)); cxt->camera_id = camera_id; cxt->camera_cb = callback; cxt->client_data = client_data; cxt->hal_malloc = cb_of_malloc; cxt->hal_free = cb_of_free; cxt->hal_gpu_malloc = NULL; cxt->is_multi_mode = is_multi_camera_mode_oem; cxt->blur_facebeauty_flag = 0; //調用camera_init_internal進行下一步初始化 ret = camera_init_internal((cmr_handle)cxt, is_autotest); ··· return ret; }
調用camera_init_internal進行下一步初始化
cmr_int camera_init_internal(cmr_handle oem_handle, cmr_uint is_autotest) { ··· //sensor初始化 ret = camera_sensor_init(oem_handle, is_autotest); if (ret) { CMR_LOGE("failed to init sensor %ld", ret); goto exit; } //grab初始化 ret = camera_grab_init(oem_handle); if (ret) { CMR_LOGE("failed to init grab %ld", ret); goto sensor_deinit; } //res初始化 ret = camera_res_init(oem_handle); if (ret) { CMR_LOGE("failed to init res %ld", ret); goto grab_deinit; } //isp初始化 ret = camera_isp_init(oem_handle); if (ret) { CMR_LOGE("failed to init isp %ld", ret); goto res_deinit; } //初始化完成 ret = camera_res_init_done(oem_handle); ··· return ret; }
該函數主要作了如下事情:
1.sensor初始化
2.grab初始化
3.res初始化
4.isp初始化
咱們繼續關注camera_sensor_init這個函數
cmr_int camera_sensor_init(cmr_handle oem_handle, cmr_uint is_autotest) { ··· ret = cmr_sensor_init(&init_param, &sensor_handle); ret = cmr_sensor_open(sensor_handle, camera_id_bits); ··· }
該函數分別調用了cmr_sensor_init初始化和cmr_sensor_open打開Camera
vendor/sprd/modules/libcamera/oem2v1/src/cmr_sensor.c
cmr_int cmr_sensor_init(struct sensor_init_param *init_param_ptr, cmr_handle *sensor_handle) { ··· /*save init param*/ handle->oem_handle = init_param_ptr->oem_handle; handle->sensor_bits = init_param_ptr->sensor_bits; handle->private_data = init_param_ptr->private_data; handle->is_autotest = init_param_ptr->is_autotest; /*create thread*/ ret = cmr_sns_create_thread(handle); ··· return ret; }
該函數 對一些參數進行賦值,而後調用cmr_sns_create_thread方法建立cmr_sns_thread_proc線程。以下:
ret = cmr_thread_create(&handle->thread_cxt.thread_handle,
SENSOR_MSG_QUEUE_SIZE,cmr_sns_thread_proc,
(void *)handle)
cmr_int cmr_sensor_open(cmr_handle sensor_handle, cmr_u32 sensor_id_bits) { ··· struct cmr_sensor_handle *handle = (struct cmr_sensor_handle *)sensor_handle; /*the open&close function should be sync*/ message.msg_type = CMR_SENSOR_EVT_OPEN; message.sync_flag = CMR_MSG_SYNC_PROCESSED; message.data = (void *)((unsigned long)sensor_id_bits); //這裏發送msg消息,去啓動在cmr_sensor_init建立的 ret = cmr_thread_msg_send(handle->thread_cxt.thread_handle, &message); ··· return ret; }
在cmr_sensor_open中,發送了msg消息,去啓動在cmr_sensor_init建立的線程cmr_sns_thread_proc。
這裏的消息類型是message.msg_type = CMR_SENSOR_EVT_OPEN;
cmr_int cmr_sns_thread_proc(struct cmr_msg *message, void *p_data) { ··· switch (evt) { case CMR_SENSOR_EVT_INIT: /*common control info config*/ CMR_LOGI("INIT DONE!"); break; case CMR_SENSOR_EVT_OPEN: /*camera sensor open for every bits*/ ops_param = (cmr_u32)((unsigned long)message->data); ret = cmr_sns_open(handle, ops_param); if (ret) { /* notify oem through fd_sensor */ CMR_LOGE("cmr_sns_open failed!"); } return CMR_CAMERA_INVALID_PARAM; } ··· }
所以,接下來會走case CMR_SENSOR_EVT_OPEN這個分支,調用cmr_sns_open方法。
cmr_int cmr_sns_open(struct cmr_sensor_handle *handle, cmr_u32 sensor_id_bits) { ··· /*open all signed camera sensor*/ for (cameraId = 0; cameraId < CAMERA_ID_MAX; cameraId++) { if (0 != (sensor_id_bits & (1 << cameraId))) { ret = sensor_open_common(&handle->sensor_cxt[cameraId], cameraId, handle->is_autotest); if (ret) { CMR_LOGE("camera %u open failed!", cameraId); } else { handle->sensor_bits |= (1 << cameraId); } } } ··· }
cmr_sns_open方法又繼續調用sensor_open_common函數,這個函數比較複雜,主要工做以下:
1.初始化ctx(context)這個結構體
2.初始化exif信息(拍照信息)
3.加載sensor file文件,裏面保存了camera的id
4.根據sensor file裏保存的camera 的id打開攝像頭
咱們來看代碼:
vendor/sprd/modules/libcamera/sensor/sensor_drv_u.c
cmr_int sensor_open_common(struct sensor_drv_context *sensor_cxt, cmr_u32 sensor_id, cmr_uint is_autotest) { ··· /* 調用sensor_context_init 初始化ctx(context)這個結構體*/ ret_val = sensor_context_init(sensor_cxt, sensor_id, is_autotest); /* 建立sensor_ctrl_thread_proc線程. */ ret_val = sensor_create_ctrl_thread(sensor_cxt); /* 初始化內核驅動程序的結構體hw_drv_init_para . */ struct hw_drv_init_para input_ptr; cmr_int fd_sensor = SENSOR_FD_INIT;//SENSOR_FD_INIT =-1 cmr_handle hw_drv_handle = NULL; input_ptr.sensor_id = sensor_id; input_ptr.caller_handle = sensor_cxt; fd_sensor = hw_sensor_drv_create(&input_ptr, &hw_drv_handle); if ( (SENSOR_FD_INIT == fd_sensor) || (NULL == hw_drv_handle) ) { SENSOR_LOGE("sns_device_init %d error, return", sensor_id); ret_val = SENSOR_FAIL; goto init_exit; } //初始化sensor_cxt sensor_cxt->fd_sensor = fd_sensor; sensor_cxt->hw_drv_handle = hw_drv_handle; sensor_cxt->sensor_hw_handler = hw_drv_handle; /* 根據存儲在傳感器idx文件中的索引加載全部傳感器ic信息*/ sensor_load_idx_inf_file(sensor_cxt); if (sensor_cxt->sensor_identified) { if (SENSOR_SUCCESS == sns_load_drv(sensor_cxt, SENSOR_MAIN)){ sensor_num++; } ··· SENSOR_LOGI("1 is identify, register OK"); /*讀到id信息,就去open*/ ret_val = sensor_open(sensor_cxt, sensor_id); if (ret_val != SENSOR_SUCCESS) { SENSOR_LOGI("first open sensor failed,start identify"); } } /* 掃描cfg表中的設備,找出正確的傳感器驅動程序 */ if ((!sensor_cxt->sensor_identified) || (ret_val != SENSOR_SUCCESS)) { sensor_num = 0; SENSOR_LOGI("register sensor fail, start identify"); //遍歷的核心函數是sensor_identify if (sensor_identify(sensor_cxt, SENSOR_MAIN)) sensor_num++; ··· //遍歷成功後,繼續執行sensor_open動做 ret_val = sensor_open(sensor_cxt, sensor_id); } sensor_cxt->sensor_identified = SCI_TRUE;//設置sensor_id的狀態爲TRUE sensor_save_idx_inf_file(sensor_cxt);//把識別到的id信息保存到/data/misc/cameraserver/sensor.file //把節點信息保存到/sys/devices/virtual/misc/sprd_sensor/camera_sensor_name sensor_rid_save_sensor_info(sensor_cxt); ··· return ret_val; }
分析:首先進行一些必要的初始化,而後調用sensor_load_idx_inf_file函數去加載/data/misc/cameraserver/路徑下的sensor.file文件,
1.若是讀到了sensor_id, sensor_cxt->sensor_identified 設置爲SCI_TRUE(這個值是1),走sns_load_drv(sensor_cxt, SENSOR_MAIN)函數去註冊驅動程序,接着直接執行sensor_open動做。
2.不然,調用sensor_identify(sensor_cxt, SENSOR_MAIN)遍歷sensor list,掃描cfg表中的設備,找出正確的傳感器驅動程序。
以上成功後,調用sensor_open函數進行:
1.AF的初始化 sensor_af_init()
2.OTP的讀取 otp_module_init()
3.拍照信息的設置 sensor_set_export_Info()
流程圖以下:
sensor_open_common流程
sensor_identify掃描流程
LOCAL cmr_int sensor_identify(struct sensor_drv_context *sensor_cxt, SENSOR_ID_E sensor_id) { ··· ret = sensor_get_match_info(sensor_cxt, sensor_id); ret = sensor_ic_identify(sensor_cxt, sensor_id); retValue = sensor_identify_search(sensor_cxt, sensor_id); return retValue; }
分析:
1.首先調用sensor_get_match_info去獲取咱們本身配置的camera驅動,流程是:
sensor_get_match_info -> sensor_get_module_tab -> back_sensor_infor_tab(以下所示)
vendor/sprd/modules/libcamera/sensor/sensor_cfg.c
這就是爲啥咱們驅動工程師添加新的Camea時,都要在這個cfg列表裏添加咱們的驅動
const SENSOR_MATCH_T back_sensor_infor_tab[] = { // gc area #ifdef GC5005 {MODULE_SUNNY, "gc5005", &g_gc5005_mipi_raw_info, {&dw9714_drv_entry, 0}, NULL}, #endif #ifdef GC8024 {MODULE_SUNNY, "gc8024", &g_gc8024_mipi_raw_info, {&dw9714_drv_entry, 0}, NULL}, #endif #ifdef GC030A {MODULE_SUNNY, "gc030a", &g_gc030a_mipi_raw_info, {NULL, 0}, NULL}, #endif #ifdef GC2385 {MODULE_SUNNY, "gc2385", &g_gc2385_mipi_raw_info, {NULL, 0}, NULL}, #endif ··· }
2.而後調用sensor_ic_identify去識別ic信息。
識別步驟以下:
1.創建sensor IC驅動結構體
2.配置I2C總線,傳感器ID, I2C時鐘,從addr, reg addr lenth,數據長度
3.給sensor IC 上電
4.識別sensor IC 的PID和VID
5.刪除sensor IC驅動結構體
sensor_ic_identify遍歷的流程圖以下,
經過 sns_ops->power(sensor_cxt->sns_ic_drv_handle, power_on);調用到sensor驅動的power_on接口,
如ov8856_drv_power_on()
sns_ops->identify(sensor_cxt->sns_ic_drv_handle,SENSOR_ZERO_I2C);調用到sensor驅動的identify接口,
如ov8856_drv_identify()
identify流程
static cmr_int sensor_ic_identify(struct sensor_drv_context *sensor_cxt, cmr_u32 sensor_id) { ··· //1.創建sensor IC驅動結構體 struct sensor_ic_ops *sns_ops = PNULL; struct sensor_ic_drv_init_para sns_init_para; register_info = &sensor_cxt->sensor_register_info; sns_ops = sensor_cxt->sensor_info_ptr->sns_ops; sensor_cxt->i2c_addr = mod_cfg_info->major_i2c_addr; /* 建立 sensor ic handle */ ret = sensor_ic_create(sensor_cxt, sensor_id); try: /*sensor has backup addr*/ if (sns_ops && sns_ops->identify) { /*2.初始化 i2c配置*/ hw_drv_cfg.i2c_bus_config = mod_cfg_info->reg_addr_value_bits; hw_sensor_drv_cfg(sensor_cxt->hw_drv_handle, &hw_drv_cfg); sensor_i2c_init(sensor_cxt, sensor_id); //設置i2c地址 hw_sensor_i2c_set_addr(sensor_cxt->hw_drv_handle, sensor_cxt->i2c_addr); //設置i2c時鐘 hw_sensor_i2c_set_clk(sensor_cxt->hw_drv_handle); ··· //3.給sensor 上電 sensor_power_on(sensor_cxt, SCI_TRUE); /*power on*/ //調用具體的驅動進行identify ret = sns_ops->identify(sensor_cxt->sns_ic_drv_handle, SENSOR_ZERO_I2C); if (SENSOR_SUCCESS == ret) { /**if the following is SCI_FALSE,that is,now is in identify *process * should delete sensor ic handle **/ if (register_info->is_register[sensor_id] != SCI_TRUE) { sensor_power_on(sensor_cxt, SCI_FALSE); sensor_i2c_deinit(sensor_cxt, sensor_id); sensor_ic_delete(sensor_cxt); } sensor_cxt->sensor_list_ptr[sensor_id] = sensor_cxt->sensor_info_ptr; register_info->is_register[sensor_id] = SCI_TRUE; register_info->img_sensor_num++; } else { // register_info->is_register[sensor_id] = SCI_FALSE; sensor_power_on(sensor_cxt, SCI_FALSE); if ((sensor_cxt->i2c_addr != mod_cfg_info->minor_i2c_addr) && mod_cfg_info->minor_i2c_addr != 0x00) { sensor_cxt->i2c_addr = mod_cfg_info->minor_i2c_addr; SENSOR_LOGI("use backup i2c address,try again!"); goto try ; } SENSOR_LOGI("identify failed!"); //若是identify failed就刪除sensor IC信息 sensor_ic_delete(sensor_cxt); return SENSOR_FAIL; } } return ret; }
PS: power on 流程 也是咱們驅動工程師常常修改的地方,這裏囉嗦幾句,以ov8856的上電爲例子
這的主要是三路電壓,avdd,dvdd,iovdd設置供電,具體參考我以前寫的文章: 你應該瞭解的Camera HW-硬件知識
1.供電部分
camera包含的三路電壓爲模擬電壓(VCAMA),數字電壓(VCAMD),IO口電壓(VCAMIO)
a) VCAMD 就是 DVDD 數字供電,主要給 ISP 供電
b) VCAM_IO 就是 VDDIO 數字 IO 電源主要給 I2C 部分供電;
c) VCAMA 就是 AVDD 模擬供電,主要給感光區和 ADC 部分供電;
d) VCAM_AF 是對 Camera 自動對焦馬達的供電
/*============================================================================== * Description: * sensor power on * please modify this function acording your spec *============================================================================*/ static cmr_int ov8856_drv_power_on(cmr_handle handle, cmr_u32 power_on) { SENSOR_IC_CHECK_HANDLE(handle); ··· if (SENSOR_TRUE == power_on) { //上電流程 //先拉低pnd腳 hw_sensor_power_down(sns_drv_cxt->hw_handle, power_down); //拉低reset腳 hw_sensor_set_reset_level(sns_drv_cxt->hw_handle, reset_level); usleep(500);//延遲500微秒,ps這裏的延遲要根據規格書來 //設置av電壓,主要給感官區和adc部分供電 hw_sensor_set_avdd_val(sns_drv_cxt->hw_handle, avdd_val); //設置DVDD 電壓,主要給ISP供電 hw_sensor_set_dvdd_val(sns_drv_cxt->hw_handle, dvdd_val); //設置IO電壓,IO 電源主要給 I2C 部分供電 hw_sensor_set_iovdd_val(sns_drv_cxt->hw_handle, iovdd_val); usleep(500);//延遲500微秒 //拉高PND腳 hw_sensor_power_down(sns_drv_cxt->hw_handle, !power_down); //拉高rst腳 hw_sensor_set_reset_level(sns_drv_cxt->hw_handle, !reset_level); usleep(500);//延遲500微秒 //設置mclk時鐘 hw_sensor_set_mclk(sns_drv_cxt->hw_handle, EX_MCLK); } else {//下電流程,和上電相反 hw_sensor_set_mclk(sns_drv_cxt->hw_handle, SENSOR_DISABLE_MCLK); usleep(500); hw_sensor_set_reset_level(sns_drv_cxt->hw_handle, reset_level); hw_sensor_power_down(sns_drv_cxt->hw_handle, power_down); usleep(200); hw_sensor_set_avdd_val(sns_drv_cxt->hw_handle, SENSOR_AVDD_CLOSED); hw_sensor_set_dvdd_val(sns_drv_cxt->hw_handle, SENSOR_AVDD_CLOSED); hw_sensor_set_iovdd_val(sns_drv_cxt->hw_handle, SENSOR_AVDD_CLOSED); } SENSOR_LOGI("(1:on, 0:off): %d", power_on); return SENSOR_SUCCESS; }
PS2:identify的實現也貼出來,繼續囉嗦幾句,以ov8856的上電爲例子
添加了關鍵代碼註釋,很容易理解!
/*============================================================================== * Description: * identify sensor id * please modify this function acording your spec *============================================================================*/ static cmr_int ov8856_drv_identify(cmr_handle handle, cmr_uint param) { ··· //hw_sensor_read_reg 讀取寄存器信息 pid_value = hw_sensor_read_reg(sns_drv_cxt->hw_handle, ov8856_PID_ADDR); //識別到具體的sendor id if (ov8856_PID_VALUE == pid_value) { ver_value = hw_sensor_read_reg(sns_drv_cxt->hw_handle, ov8856_VER_ADDR); SENSOR_LOGI("Identify: PID = %x, VER = %x", pid_value, ver_value); if (ov8856_VER_VALUE == ver_value) { SENSOR_LOGI("this is ov8856 sensor"); //把id信息保存起來 ov8856_drv_init_fps_info(handle); ret_value = SENSOR_SUCCESS; } else { SENSOR_LOGI("Identify this is %x%x sensor", pid_value, ver_value); } } else { SENSOR_LOGE("sensor identify fail, pid_value = %x", pid_value); } return ret_value; }
3.最後若是identify失敗,則從新執行上面2個步驟,從新遍歷
sensor_identify_search函數實現以下:
LOCAL cmr_u32 sensor_identify_search(struct sensor_drv_context *sensor_cxt, SENSOR_ID_E sensor_id) { ··· //調用sensor_get_match_info去獲取咱們本身配置的camera驅動 module_tab = sensor_get_module_tab(sensor_cxt->is_autotest, sensor_id); ··· //調用sensor_ic_identify去識別ic信息 retValue = sensor_ic_identify(sensor_cxt, sensor_id); ··· return retValue; }
到此文章的第一部分就寫完了,鬆口氣,喝口水,繼續寫第二部份內容!
3、預覽(preview)調用流程
【Hal層】
vendor/sprd/modules/libcamera/hal3_2v1a/SprdCamera3HWI.cpp
int SprdCamera3HWI::openCamera() { ··· //註冊ispVideoStartPreview函數 ispvideo_RegCameraFunc(1, ispVideoStartPreview); ··· }
在openCamera函數中,經過這個ispvideo_RegCameraFunc(1, ispVideoStartPreview);註冊ispVideoStartPreview
static int ispVideoStartPreview(uint32_t param1, uint32_t param2) { ··· rtn = regularChannel->start(dev->mFrameNum); ··· }
接下來調用regularChannel->start(dev->mFrameNum)往下走
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3Channel.cpp
int SprdCamera3RegularChannel::start(uint32_t frame_number) { int ret = NO_ERROR; size_t i = 0; ret = mOEMIf->start(mChannelType, frame_number); return ret; }
這裏的type :
typedef enum {
CAMERA_CHANNEL_TYPE_DEFAULT, / default /
CAMERA_CHANNEL_TYPE_REGULAR, / regular channel /
CAMERA_CHANNEL_TYPE_PICTURE, / picture channel/
CAMERA_CHANNEL_TYPE_RAW_CALLBACK, /YUV888 callback/
CAMERA_CHANNEL_TYPE_MAX,
} camera_channel_type_t;
接着調用 ret = mOEMIf->start(mChannelType, frame_number);
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3OEMIf.cpp
int SprdCamera3OEMIf::start(camera_channel_type_t channel_type, uint32_t frame_number) { ··· switch (channel_type) { case CAMERA_CHANNEL_TYPE_REGULAR: { ··· ret = startPreviewInternal();//這裏繼續跟進去 break; } //如下是拍照部分,咱們下個部分在進行分析 case CAMERA_CHANNEL_TYPE_PICTURE: { if (mTakePictureMode == SNAPSHOT_NO_ZSL_MODE || ret = takePicture(); } else if (mTakePictureMode == SNAPSHOT_ZSL_MODE) { ret = zslTakePicture(); } else if (mTakePictureMode == SNAPSHOT_VIDEO_MODE) { ret = VideoTakePicture(); } break; } default: break; } ··· return ret; }
若是類型爲CAMERA_CHANNEL_TYPE_REGULAR,則調用:
ret = startPreviewInternal();//這裏繼續跟進去
若是類型爲CAMERA_CHANNEL_TYPE_PICTURE,則調用拍照相關:
ret = takePicture();
ret = zslTakePicture();
ret = VideoTakePicture();
int SprdCamera3OEMIf::startPreviewInternal() { ··· //preview的時候,設置照片的thumbnail size(壓縮後的大小)和camera app的大小一致 chooseDefaultThumbnailSize(&jpeg_thumb_size.width, &jpeg_thumb_size.height); ··· ret = mHalOem->ops->camera_start_preview(mCameraHandle, mCaptureMode); ··· }
mHalOem->ops->camera_start_preview(mCameraHandle, mCaptureMode);方法的實如今SprdOEMCamera.c裏
vendor/sprd/modules/libcamera/oem2v1/src/SprdOEMCamera.c
cmr_int camera_start_preview(cmr_handle camera_handle, enum takepicture_mode mode) { ··· ret = camera_local_start_preview(camera_handle, mode, CAMERA_PREVIEW); ··· return ret; }
【OEM層】
該函數很簡單,就繼續調用camera_local_start_preview函數
vendor/sprd/modules/libcamera/oem2v1/src/cmr_oem.c
cmr_int camera_local_start_preview(cmr_handle oem_handle, enum takepicture_mode mode, cmr_uint is_snapshot) { //設置preview的參數 ret = camera_set_preview_param(oem_handle, mode, is_snapshot); //繼續cmr_preview_start ret = cmr_preview_start(prev_cxt->preview_handle, cxt->camera_id); ··· return ret; }
該函數設置preview的參數信息,而後繼續調用cmr_preview_start方法
vendor/sprd/modules/libcamera/oem2v1/src/cmr_preview.c
cmr_int cmr_preview_start(cmr_handle preview_handle, cmr_u32 camera_id) { ··· message.msg_type = PREV_EVT_ASSIST_START;//設置msg的type類型PREV_EVT_ASSIST_START message.sync_flag = CMR_MSG_SYNC_PROCESSED;//設置msg的flag //發送msg消息 ret = cmr_thread_msg_send(handle->thread_cxt.assist_thread_handle, &message); ··· message.msg_type = PREV_EVT_START;//設置msg的type類型PREV_EVT_START message.sync_flag = CMR_MSG_SYNC_PROCESSED;//設置msg的flag message.data = (void *)((unsigned long)camera_id); //發送了msg消息 ret = cmr_thread_msg_send(handle->thread_cxt.thread_handle, &message); ··· return ret; }
這裏主要調用cmr_thread_msg_send發送兩條msg消息,
第一條msg=消息
assist_thread_handle=prev_assist_thread_proc
該handle的建立:
在prev_create_thread(struct prev_handle *handle)調用
ret = cmr_thread_create(&handle>thread_cxt.assist_thread_handle,PREV_MSG_QUEUE_SIZE, prev_assist_thread_proc, (void *)handle);
cmr_int prev_assist_thread_proc(struct cmr_msg *message, void *p_data) { ··· msg_type = (cmr_u32)message->msg_type;//得到msg_tyoe //根據msg_type進行操做 switch (msg_type) { case PREV_EVT_ASSIST_START: handle->frame_active = 1; break; ··· case PREV_EVT_ASSIST_STOP: handle->frame_active = 0; break; ··· return ret; }
當msg_type=PREV_EVT_ASSIST_START:
僅僅操做handle->frame_active = 1;
第二條msg=消息
thread_cxt.thread_handle=prev_thread_proc
cmr_int prev_thread_proc(struct cmr_msg *message, void *p_data) { ··· switch (msg_type) { ··· case PREV_EVT_START: camera_id = (cmr_u32)((unsigned long)message->data); prev_recovery_reset(handle, camera_id); ret = prev_start(handle, camera_id, 0, 0); /*Notify preview started*/ cb_data_info.cb_type = PREVIEW_EXIT_CB_PREPARE; cb_data_info.func_type = PREVIEW_FUNC_START_PREVIEW; cb_data_info.frame_data = NULL; prev_cb_start(handle, &cb_data_info); break; ··· }
分析:
1.ret = prev_start(handle, camera_id, 0, 0)調用流程以下:
ret = handle->ops.channel_start(···);【cmr_preview.c】 ->cmr_int camera_channel_start(···);【cmr_oem.c】 ->cmr_int cmr_grab_cap_start(···)【cmr_grab.c】 ->ret = ioctl(p_grab->fd, SPRD_IMG_IO_SET_CAP_SKIP_NUM, &num);【cmr_grab.c】
【kernel層】
經過ioctl的方式調用kernel層的方法
通過以上一系列複雜流程,後看到cmr_grab_cap_start()調入到kernel目錄執行打開DCAM,cmr_grab_cap_start經過ioctl的方式調用kernel層的方法。
cmr_int cmr_grab_cap_start(cmr_handle grab_handle, cmr_u32 skip_num) { ··· ret = ioctl(p_grab->fd, SPRD_IMG_IO_SET_CAP_SKIP_NUM, &num); ATRACE_BEGIN("dcam_stream_on"); ret = ioctl(p_grab->fd, SPRD_IMG_IO_STREAM_ON, &stream_on); ··· return ret; }
kernel/drivers/misc/sprd_camera/dcam/dcam_if_r4p0/dcam_ioctrl.c
{SPRD_IMG_IO_STREAM_ON, dcamio_stream_on},
static int dcamio_stream_on(struct camera_file *camerafile, unsigned long arg, unsigned int cmd) { ··· ret = sprd_img_get_dcam_dev(camerafile, &dev, &info); ret = sprd_camera_stream_on(camerafile); ··· return ret; }
2.prev_cb_start(handle, &cb_data_info)調用流程以下:
prev_cb_start(handle, &cb_data_info)//cmr_preview.c ->ret = cmr_thread_msg_send(···);//cmr_preview.c //message.msg_type = PREV_EVT_CB_START;cb_thread_handle = prev_cb_thread_proc ->ret = handle->oem_cb(···)//cmr_preview.c //handle->oem_cb = init_param_ptr->oem_cb=camera_preview_cb; ->ret = cmr_thread_msg_send(···);// oem2v1/src/cmr_oem.c //message.sub_msg_type = oem_cb_type; //prev_cb_thr_handle = camera_preview_cb_thread_proc ->callback(···);
vendor/sprd/modules/libcamera/oem2v1/src/cmr_oem.c
cmr_int camera_preview_cb_thread_proc(struct cmr_msg *message, void *data) { ··· callback = cxt->camera_cb; callback(message->sub_msg_type, cxt->client_data, message->msg_type, message->data); ··· return ret; }
這裏callback 爲 cxt->camera_cb;具體實如今SprdCamera3OEMIf::camera_cb(···);
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3OEMIf.cpp
void SprdCamera3OEMIf::camera_cb(enum camera_cb_type cb, const void *client_data, enum camera_func_type func, void *parm4) { ··· switch (func) { case CAMERA_FUNC_START_PREVIEW: obj->HandleStartPreview(cb, parm4); break; ···
這裏在oem_func = CAMERA_FUNC_START_PREVIEW;所以繼續調用HandleStartPreview(cb, parm4);
void SprdCamera3OEMIf::HandleStartPreview(enum camera_cb_type cb, void *parm4) { ··· receivePreviewFrame((struct camera_frame_type *)parm4); ··· }
這裏是調用receivePreviewFrame接收frame data
void SprdCamera3OEMIf::receivePreviewFrame(struct camera_frame_type *frame) { ··· //接收frame data channel->getStream(CAMERA_STREAM_TYPE_PREVIEW, &pre_stream); channel->getStream(CAMERA_STREAM_TYPE_VIDEO, &rec_stream); channel->getStream(CAMERA_STREAM_TYPE_CALLBACK, &callback_stream); HAL_LOGV("pre_stream %p, rec_stream %p, callback_stream %p", pre_stream, rec_stream, callback_stream); //美顏 #ifdef CONFIG_FACE_BEAUTY int sx, sy, ex, ey, angle, pose; struct face_beauty_levels beautyLevels; beautyLevels.blemishLevel = (unsigned char)sprddefInfo.perfect_skin_level[0]; beautyLevels.smoothLevel = (unsigned char)sprddefInfo.perfect_skin_level[1]; beautyLevels.skinColor = (unsigned char)sprddefInfo.perfect_skin_level[2]; beautyLevels.skinLevel = (unsigned char)sprddefInfo.perfect_skin_level[3]; beautyLevels.brightLevel = (unsigned char)sprddefInfo.perfect_skin_level[4]; beautyLevels.lipColor = (unsigned char)sprddefInfo.perfect_skin_level[5]; beautyLevels.lipLevel = (unsigned char)sprddefInfo.perfect_skin_level[6]; beautyLevels.slimLevel = (unsigned char)sprddefInfo.perfect_skin_level[7]; beautyLevels.largeLevel = (unsigned char)sprddefInfo.perfect_skin_level[8]; #endif ··· }
這個函數實現很複雜,主要用來recevie Frame data here , 以及美顏等,具體細節之後分析。
4、拍照(snapshot)調用流程
【Hal層】
咱們直接從SprdCamera3OEMIf::start開始分析,怎麼調用到這個函數的,前面已經分析過了,就不在贅述!
vendor/sprd/modules/libcamera/hal3_2v1/SprdCamera3OEMIf.cpp
int SprdCamera3OEMIf::start(camera_channel_type_t channel_type, uint32_t frame_number) { ··· switch (channel_type) { ··· case CAMERA_CHANNEL_TYPE_PICTURE: { if (···) setCamPreformaceScene(CAM_CAPTURE_S_LEVEL_NH); } if (mTakePictureMode == SNAPSHOT_NO_ZSL_MODE || mTakePictureMode == SNAPSHOT_ONLY_MODE) ··· ret = takePicture(); ··· else if (mTakePictureMode == SNAPSHOT_ZSL_MODE) { mVideoSnapshotFrameNum = frame_number; ··· ret = zslTakePicture(); ··· } else if (mTakePictureMode == SNAPSHOT_VIDEO_MODE) { mVideoSnapshotFrameNum = frame_number; ret = VideoTakePicture(); } break; } ··· } ··· }
分析:首先channel_type=CAMERA_CHANNEL_TYPE_PICTURE,而後進行如下動做:
1.setCamPreformaceScene(CAM_CAPTURE_S_LEVEL_NH);設定Camera的場景,場景類型以下
typedef enum CAMERA_PERFORMACE_SCENE { CAM_OPEN_S, CAM_OPEN_E_LEVEL_H, // DFS:veryhigh CAM_OPEN_E_LEVEL_N, // DFS:normal CAM_OPEN_E_LEVEL_L, // DFS:low CAM_PREVIEW_S_LEVEL_H, // powerhint:performance CAM_PREVIEW_S_LEVEL_N, // powerhint:normal CAM_PREVIEW_S_LEVEL_L, // powerhint:low CAM_CAPTURE_S_LEVEL_HH, // powerhint:performance DFS:veryhigh CAM_CAPTURE_S_LEVEL_HN, // powerhint:performance DFS:normal CAM_CAPTURE_S_LEVEL_NH, // powerhint:normal DFS:veryhigh CAM_CAPTURE_S_LEVEL_NN, // powerhint:normal DFS:normal CAM_CAPTURE_E_LEVEL_NH, // powerhint:normal DFS:veryhigh CAM_CAPTURE_E_LEVEL_NN, // powerhint:normal DFS:normal CAM_CAPTURE_E_LEVEL_NL, // powerhint:normal DFS:low CAM_CAPTURE_E_LEVEL_LN, // powerhint:low DFS:normal CAM_CAPTURE_E_LEVEL_LL, // powerhint:low DFS:low CAM_CAPTURE_E_LEVEL_LH, // powerhint:low DFS:veryhigh CAM_FLUSH_S, CAM_FLUSH_E, CAM_EXIT_S, CAM_EXIT_E, } sys_performance_camera_scene;
2.根據mTakePictureMode調用不一樣的拍照方法
- 第一種:普通拍照模式
mTakePictureMode =SNAPSHOT_NO_ZSL_MODE 或者 SNAPSHOT_ONLY_MODE
ret = takePicture(); - 第二種:零延遲拍照(預覽畫面是啥,拍出來的就是啥,所見即所得)
mTakePictureMode == SNAPSHOT_ZSL_MODE
ret = zslTakePicture(); - 第三種:視頻模式
mTakePictureMode == SNAPSHOT_VIDEO_MODE
ret = VideoTakePicture();
有3條分支,這裏咱們選擇普通的拍照模式分支繼續分析。
int SprdCamera3OEMIf::takePicture() { ··· mHalOem->ops->camera_take_picture(mCameraHandle, mCaptureMode) ··· }
其實takePicture函數有不少操做,好比:相機是否已經preview,沒有的話,先進行preview,其次,相機是否正在capturing(截屏),若是是的話,等待,直到capturing結束等等。最後調用
mHalOem->ops->camera_take_picture(mCameraHandle, mCaptureMode)來調用到oem層。
OEM層(展訊本身封裝的一層,Hal層和驅動層溝通的中間橋樑)
vendor/sprd/modules/libcamera/oem2v1/src/SprdOEMCamera.c
cmr_int camera_take_picture(cmr_handle camera_handle, enum takepicture_mode cap_mode) { ··· ret = camera_local_start_snapshot(camera_handle, cap_mode, CAMERA_SNAPSHOT); if (ret) { CMR_LOGE("failed to start snapshot %ld", ret); } ··· }
分析:這個函很簡單,就直接調用camera_local_start_snapshot進行拍照動做
vendor/sprd/modules/libcamera/oem2v1/src/cmr_oem.c
cmr_int camera_local_set_cap_size(cmr_handle oem_handle, cmr_u32 is_reprocessing, cmr_u32 camera_id, cmr_u32 width, cmr_u32 height) { //1 ret = cmr_snapshot_post_proc(cxt->snp_cxt.snapshot_handle, &snp_param); //2 ret = camera_local_start_capture(oem_handle); //3 ret = cmr_snapshot_receive_data(cxt->snp_cxt.snapshot_handle, SNAPSHOT_EVT_CHANNEL_DONE, (void *)&frame); }
分析:該函數主要作了如下事情:
1.調用cmr_snapshot_post_proc()函數發送一條msg消息
2.調用camera_local_start_capture()函數繼續拍照流程
3.調用cmr_snapshot_receive_data()函數receive拍照的數據
先來看
1.調用cmr_snapshot_post_proc()函數發送一條msg消息
vendor/sprd/modules/libcamera/oem2v1/src/cmr_snapshot.c
cmr_int cmr_snapshot_post_proc(cmr_handle snapshot_handle, struct snapshot_param *param_ptr) { ··· message.msg_type = SNP_EVT_START_PROC; message.sync_flag = CMR_MSG_SYNC_PROCESSED; message.alloc_flag = 0; message.data = param_ptr; ret = cmr_thread_msg_send(cxt->thread_cxt.main_thr_handle, &message); ··· }
消息類型: message.msg_type = SNP_EVT_START_PROC;
線程處理函數爲:snp_main_thread_proc。
咱們來看這個處理函數:
cmr_int snp_main_thread_proc(struct cmr_msg *message, void *p_data) { ··· switch (message->msg_type) { ··· case SNP_EVT_START_PROC: ret = snp_set_post_proc_param(snp_handle, (struct snapshot_param *)message->data); break; ··· } ··· }
分析:直接調用snp_set_post_proc_param函數
cmr_int snp_set_post_proc_param(cmr_handle snp_handle, struct snapshot_param *param_ptr) { ··· ret = cxt->ops.get_sensor_info(cxt->oem_handle, cxt->req_param.camera_id, &cxt->sensor_info); ret = snp_set_jpeg_dec_param(snp_handle); ret = snp_set_isp_proc_param(snp_handle); ret = snp_set_channel_out_param(snp_handle); ret = snp_set_hdr_param(snp_handle); snp_get_is_scaling(snp_handle, is_normal_cap); ret = snp_set_rot_param(snp_handle); ret = snp_set_jpeg_enc_param(snp_handle); ret = snp_set_jpeg_exif_param(snp_handle); ··· }
分析:設置各類參數。
2.調用camera_local_start_capture()函數繼續拍照流程
vendor/sprd/modules/libcamera/oem2v1/src/cmr_oem.c
cmr_int camera_local_start_capture(cmr_handle oem_handle) { //設置拍照的時候是否須要閃光燈 camera_local_snapshot_is_need_flash(oem_handle, cxt->camera_id, &flash_status); //繼續調用cmr_grab_start_capture拍照 ret = cmr_grab_start_capture(cxt->grab_cxt.grab_handle, capture_param); }
這裏繼續調用cmr_grab_start_capture拍照。
vendor/sprd/modules/libcamera/oem2v1/src/cmr_grab.c
cmr_int cmr_grab_start_capture(cmr_handle grab_handle, struct sprd_img_capture_param capture_param) { struct cmr_grab *p_grab; p_grab = (struct cmr_grab *)grab_handle; ret = ioctl(p_grab->fd, SPRD_IMG_IO_START_CAPTURE, &capture_param); ··· }
從這裏開始,就調用到咱們驅動層了,經過ioctl的接口調用驅動的函數。那麼經過SPRD_IMG_IO_START_CAPTURE這個cmd調用的時哪一個函數呢?
【kernel層】
kernel/drivers/misc/sprd_camera/dcam/dcam_if_r4p0/
static struct dcam_io_ctrl_fun s_cam_io_ctrl_fun_tab[] = { ··· {SPRD_IMG_IO_START_CAPTURE, dcamio_start_capture}, ··· }
所以,能夠看出調用的時dcamio_start_capture函數,好吧,咱們就跟到kernel層一探究竟!!!
kernel/drivers/misc/sprd_camera/dcam/dcam_if_r4p0/dcam_ioctrl.c
static int dcamio_start_capture(struct camera_file *camerafile, unsigned long arg, unsigned int cmd) { int ret = 0; unsigned int cap_flag = 0; struct camera_dev *dev = NULL; struct camera_info *info = NULL; struct camera_group *group = NULL; //獲取設備信息 ret = sprd_img_get_dcam_dev(camerafile, &dev, &info); if (ret) { pr_err("fail to get dcam dev\n"); goto exit; } group = camerafile->grp; //從用戶空間得到數據,拷貝到cap_flag變量中 ret = copy_from_user(&cap_flag, (void __user *) arg, sizeof(unsigned int)); if (ret) { pr_err("fail to get user info\n"); ret = -EFAULT; goto exit; } if (dev->cap_flag == DCAM_CAPTURE_STOP) { dev->cap_flag = DCAM_CAPTURE_START; if (dev->dcam_cxt.need_isp_tool) cap_flag = DCAM_CAPTURE_NONE; pr_info("start capture, cap_flag %d\n", cap_flag); //調用該函數進行拍照動做 ret = sprd_isp_start_pipeline_full(dev->isp_dev_handle, cap_flag); if (ret) { pr_err("fail to start offline\n"); goto exit; } } //拍照完成 pr_info("start capture done\n"); exit: return ret; }
註釋添加的很清晰了,這裏簡單說一下
調用sprd_isp_start_pipeline_full動做去執行拍照,最後的數據會保存在p_offline_frame中!【struct camera_frame *p_offline_frame = NULL,p_offline_frame是一個指針】
**p_offline_frame = &dev->offline_frame[ISP_OFF_BUF_FULL];** **memcpy(p_offline_frame, &frame, sizeof(struct camera_frame));** **complete(&dev->offline_full_thread_com);**
最後經過complete喚醒線程,讓線程去接受數據。
PS:【complete是完成量的概念,用於保護共享數據,防止競態,而且告訴另外一個休眠的線程,說我邊完事了,你醒醒,繼續幹你的活去。具體能夠參考LDD這本書或者自行百度】
最後咱們簡單分析一下是如何收取數據的
3.調用cmr_snapshot_receive_data()函數receive拍照的數據
vendor/sprd/modules/libcamera/oem2v1/src/cmr_snapshot.c
cmr_int cmr_snapshot_receive_data(cmr_handle snapshot_handle, cmr_int evt, void *data) { ··· switch (evt) {//normol拍照模式 case SNAPSHOT_EVT_CHANNEL_DONE: malloc_len = sizeof(struct frm_info); CMR_LOGD("video %d zsl %d yaddr_vir 0x%x", cxt->req_param.is_video_snapshot, cxt->req_param.is_zsl_snapshot, frame_info_ptr->yaddr_vir); buffer_id = snp_get_buffer_id(snapshot_handle, data); buffer_id += frame_info_ptr->base; snp_evt = SNP_EVT_CHANNEL_DONE; if (1 == cxt->req_param.is_video_snapshot || 1 == cxt->req_param.is_zsl_snapshot) { flag = 1; width = cxt->req_param.post_proc_setting.chn_out_frm[0].size.width; height = cxt->req_param.post_proc_setting.chn_out_frm[0].size.height; act_width = cxt->req_param.post_proc_setting.actual_snp_size.width; act_height = cxt->req_param.post_proc_setting.actual_snp_size.height; //memcpy指的是c和c++使用的內存拷貝函數,從kernel中經過地址拷貝數據到oem層 memcpy(&chn_data, data, sizeof(struct frm_info)); chn_data.base = CMR_CAP0_ID_BASE; chn_data.frame_id = CMR_CAP0_ID_BASE; if (1 == cxt->req_param.is_zsl_snapshot) { chn_data.base = CMR_CAP1_ID_BASE; chn_data.frame_id = CMR_CAP1_ID_BASE; } } if (1 == cxt->req_param.is_video_snapshot) {//視頻模式 ··· cmr_copy((void *)dst_vir, (void *)src_vir, width * height / 2); cmr_snapshot_memory_flush( cxt, &(cxt->req_param.post_proc_setting.chn_out_frm[0])); ··· } else if (1 == cxt->req_param.is_zsl_snapshot) {//零延遲拍照模式 ··· cmr_copy((void *)dst_vir, (void *)src_vir, width * height / 2); cmr_snapshot_memory_flush( cxt, &(cxt->req_param.post_proc_setting.chn_out_frm[0])); ··· } break; ··· } ··· }
拍完照片後,咱們會受到一個msg消息,type=SNAPSHOT_EVT_CHANNEL_DONE,表示拍照完成!
調用memcpy(&chn_data, data, sizeof(struct frm_info));從kernel獲取數據,還記得kernel中是吧數據保存在
struct camera_frame *p_offline_frame 指針中,咱們經過地址把數據拷貝出來!
即memcpy(&chn_data, data, sizeof(struct frm_info));!