香港科技大學的VINS_MONO初試

簡介

VINS-Mono 是香港科技大學開源的一個VIO,我簡單的測試了,發現效果不錯。作個簡單的筆記,詳細的內容等我畢設搞完再弄。前端

代碼主要分爲前端(feature tracker),後端(sliding window, loop closure),還加了初始化(visual-imu aligment)git

Feature tracker

這部分代碼在feature_tracker包下面,主要是接收圖像topic,使用KLT光流算法跟蹤特徵點,同時保持每一幀圖像有最少的(100-300)個特徵點。github

根據配置文件中的freq,肯定每隔多久的時候,把檢測到的特徵點打包成/feature_tracker/featuretopic 發出去,算法

要是沒有達到發送的時間,這幅圖像的feature就做爲下一時刻的
KLT追蹤的特徵點,就是否是每一副圖像都要處理的,那樣計算時間大了,並且數據感受冗餘,幀與幀之間圖像的差距不會那麼明顯。後端

這裏的freq配置文件建議至少設置10,爲了保證好的前端。app

void img_callback(const sensor_msgs::ImageConstPtr &img_msg)
{
    for (int i = 0; i < NUM_OF_CAM; i++)
    {
        ROS_DEBUG("processing camera %d", i);
        if (i != 1 || !STEREO_TRACK)
        //調用FeatureTracker的readImage
            trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)));
    }

    for (unsigned int i = 0;; i++)
    {
        bool completed = false;
        for (int j = 0; j < NUM_OF_CAM; j++)
            if (j != 1 || !STEREO_TRACK)
            //更新feature的ID
                completed |= trackerData[j].updateID(i);
        if (!completed)
            break;
    }
    
    //發佈特徵點topic
    if (round(1.0 * pub_count / (img_msg->header.stamp.toSec() - first_image_time)) <= FREQ)
    {
        sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
        //特徵點的id,圖像的(u,v)座標
        sensor_msgs::ChannelFloat32 id_of_point;
        sensor_msgs::ChannelFloat32 u_of_point;
        sensor_msgs::ChannelFloat32 v_of_point;

        pub_img.publish(feature_points);

    }

     if (SHOW_TRACK)
     {
        //根據特徵點被追蹤的次數,顯示他的顏色,越紅表示這個特徵點看到的越久,一幅圖像要是大部分特徵點是藍色,前端tracker效果不好了,估計要掛了
        double len = std::min(1.0, 1.0 * trackerData[i].track_cnt[j] / WINDOW_SIZE);
        cv::circle(tmp_img, trackerData[i].cur_pts[j], 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2);
     }


}
void FeatureTracker::readImage(const cv::Mat &_img)
{
    //直方圖均勻化
    //if image is too dark or light, trun on equalize to find enough features
    if (EQUALIZE)
    {
        cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(3.0, cv::Size(8, 8));
        TicToc t_c;
        clahe->apply(_img, img);
        ROS_DEBUG("CLAHE costs: %fms", t_c.toc());
    }

    if (cur_pts.size() > 0)
    {
        TicToc t_o;
        vector<uchar> status;
        vector<float> err;
        //根據上一時刻的cur_img,cur_pts,尋找當前時刻的forw_pts,
        cv::calcOpticalFlowPyrLK(cur_img, forw_img, cur_pts, forw_pts, status, err, cv::Size(21, 21), 3);
    }

    if (img_cnt == 0)
    {
        //根據fundamentalMatrix中的ransac去除一些outlier
        rejectWithF();
        //跟新特徵點track的次數
        for (auto &n : track_cnt)
            n++;
        //爲下面的goodFeaturesToTrack保證相鄰的特徵點之間要相隔30個像素,設置mask image
        setMask();

        int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size());
        if (n_max_cnt > 0)
        {
            //保證每一個image有足夠的特徵點,不夠就新提取
            cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.1, MIN_DIST, mask);
        }


    }
}

Slide Window

主要是對imu的數據進行預積分,vision重投影偏差的構造,loop-closure的檢測,slide-window的維護 ,marginzation prior的維護,東西比較多。dom

loop-closure的檢測是使用視覺詞帶的,這裏的特徵不是feature-tracker的,那樣子太少了。是經過訂閱IMAGE_TOPIC,傳遞到閉環檢測部分,從新檢測的,這個我尚未認真看(作了不少限制,爲了搜索的速度,詞帶不會很大,作了不少限制,從論文上看優化的方程只是加了幾個vision重投影的限制,速度不會太慢)。ide

是隻有4個自由度的優化,roll, pitch因爲重力對齊的緣由是可觀測的,就不去優化。函數

最主要的仍是下面這個最小二乘法方程構建,主要的代碼我列出來。oop

void Estimator::processIMU(double dt, const Vector3d &linear_acceleration, const Vector3d &angular_velocity)
{

    if (frame_count != 0)
    {
        pre_integrations[frame_count]->push_back(dt, linear_acceleration, angular_velocity);
        //調用imu的預積分,propagation ,計算對應的雅可比矩陣
        //if(solver_flag != NON_LINEAR)
            tmp_pre_integration->push_back(dt, linear_acceleration, angular_velocity);

        dt_buf[frame_count].push_back(dt);
        linear_acceleration_buf[frame_count].push_back(linear_acceleration);
        angular_velocity_buf[frame_count].push_back(angular_velocity);

        //提供imu計算的當前位置,速度,做爲優化的初值
        int j = frame_count;         
        Vector3d un_acc_0 = Rs[j] * (acc_0 - Bas[j]) - g;
        Vector3d un_gyr = 0.5 * (gyr_0 + angular_velocity) - Bgs[j];
        Rs[j] *= Utility::deltaQ(un_gyr * dt).toRotationMatrix();
        Vector3d un_acc_1 = Rs[j] * (linear_acceleration - Bas[j]) - g;
        Vector3d un_acc = 0.5 * (un_acc_0 + un_acc_1);
        Ps[j] += dt * Vs[j] + 0.5 * dt * dt * un_acc;
        Vs[j] += dt * un_acc;
    }

}

void Estimator::processImage(const map<int, vector<pair<int, Vector3d>>> &image, const std_msgs::Header &header)
{
    //根據視差判斷是否是關鍵幀,
    if (f_manager.addFeatureCheckParallax(frame_count, image))
        marginalization_flag = MARGIN_OLD;
    else
        marginalization_flag = MARGIN_SECOND_NEW;

    ImageFrame imageframe(image, header.stamp.toSec());
    imageframe.pre_integration = tmp_pre_integration;
    all_image_frame.insert(make_pair(header.stamp.toSec(), imageframe));
    tmp_pre_integration = new IntegrationBase{acc_0, gyr_0, Bas[frame_count], Bgs[frame_count]};

//參數要是設置imu-camera的外參數未知,也能夠幫你求解的
    if(ESTIMATE_EXTRINSIC == 2)
    {
    }

//初始化的流程
    if (solver_flag == INITIAL)
    {
        if (frame_count == WINDOW_SIZE)
        {
            bool result = false;
            if( ESTIMATE_EXTRINSIC != 2 && (header.stamp.toSec() - initial_timestamp) > 0.1)
            {
            //構造sfm,優化imu誤差,加速度g,尺度的肯定
               result = initialStructure();
               initial_timestamp = header.stamp.toSec();
            }
            if(result)
            {
                solver_flag = NON_LINEAR;
                solveOdometry();
                slideWindow();
                f_manager.removeFailures();
                ROS_INFO("Initialization finish!");
                last_R = Rs[WINDOW_SIZE];
                last_P = Ps[WINDOW_SIZE];
                last_R0 = Rs[0];
                last_P0 = Ps[0];
                
            }
            else
                slideWindow();
        }
    //先湊夠window-size的數量的Frame
        else
            frame_count++;
    }
    else
    {
       
        solveOdometry();

//失敗的檢測
        if (failureDetection())
        {
            clearState();
            setParameter();
            return;
        }

        slideWindow();
        f_manager.removeFailures();
        // prepare output of VINS
        key_poses.clear();
        for (int i = 0; i <= WINDOW_SIZE; i++)
            key_poses.push_back(Ps[i]);

        last_R = Rs[WINDOW_SIZE];
        last_P = Ps[WINDOW_SIZE];
        last_R0 = Rs[0];
        last_P0 = Ps[0];
    }

}
void Estimator::slideWindow()
{
//WINDOW_SIZE中的參數的之間調整,同時FeatureManager進行管理feature,有些點要刪除掉,有些點的深度要在下一frame表示(start frame已經刪除了)


    Headers[frame_count - 1] = Headers[frame_count];
    Ps[frame_count - 1] = Ps[frame_count];
    Vs[frame_count - 1] = Vs[frame_count];
    Rs[frame_count - 1] = Rs[frame_count];
    Bas[frame_count - 1] = Bas[frame_count];
    Bgs[frame_count - 1] = Bgs[frame_count];

    delete pre_integrations[WINDOW_SIZE];
    pre_integrations[WINDOW_SIZE] = new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE]};
//清楚數據,給下一副圖像提供空間
    dt_buf[WINDOW_SIZE].clear();
    linear_acceleration_buf[WINDOW_SIZE].clear();
    angular_velocity_buf[WINDOW_SIZE].clear();    
}

void Estimator::solveOdometry()
{
    if (frame_count < WINDOW_SIZE)
        return;
    if (solver_flag == NON_LINEAR)
    {
        //三角化點
        f_manager.triangulate(Ps, tic, ric);
        ROS_DEBUG("triangulation costs %f", t_tri.toc());
        optimization();
    }
}
void Estimator::optimization()
{
    //添加frame的state,(p,v,q,b_a,b_g),就是ceres要優化的參數
    for (int i = 0; i < WINDOW_SIZE + 1; i++)
    {
        ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
        problem.AddParameterBlock(para_Pose[i], SIZE_POSE, local_parameterization);
        problem.AddParameterBlock(para_SpeedBias[i], SIZE_SPEEDBIAS);
    }
    //添加camera-imu的外參數
    for (int i = 0; i < NUM_OF_CAM; i++)
    {
        ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
        problem.AddParameterBlock(para_Ex_Pose[i], SIZE_POSE, local_parameterization);
    }

    //爲ceres參數賦予初值
    vector2double();

    //添加margination residual, 先驗知識
    //他的Evaluate函數看好,固定了線性化的點,First Jacobian Estimate
    if (last_marginalization_info)
    {
        // construct new marginlization_factor
        MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
        problem.AddResidualBlock(marginalization_factor, NULL,
                                 last_marginalization_parameter_blocks);
    } 

    //添加imu的residual
    for (int i = 0; i < WINDOW_SIZE; i++)
    {
        int j = i + 1;
        if (pre_integrations[j]->sum_dt > 10.0)
            continue;
        IMUFactor* imu_factor = new IMUFactor(pre_integrations[j]);
        problem.AddResidualBlock(imu_factor, NULL, para_Pose[i], para_SpeedBias[i], para_Pose[j], para_SpeedBias[j]);
    }

    //添加vision的residual
    for (auto &it_per_id : f_manager.feature)
    {
        for (auto &it_per_frame : it_per_id.feature_per_frame)
        {
            imu_j++;
            if (imu_i == imu_j)
            {
                continue;
            }
            Vector3d pts_j = it_per_frame.point;
            ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
            problem.AddResidualBlock(f, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]);
            f_m_cnt++;
        }
    }

    //添加閉環的參數和residual
    if(LOOP_CLOSURE)
    {
        ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
        problem.AddParameterBlock(front_pose.loop_pose, SIZE_POSE, local_parameterization);
        
        if(front_pose.features_ids[retrive_feature_index] == it_per_id.feature_id)
        {
            Vector3d pts_j = Vector3d(front_pose.measurements[retrive_feature_index].x, front_pose.measurements[retrive_feature_index].y, 1.0);
            Vector3d pts_i = it_per_id.feature_per_frame[0].point;
            
            ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
            problem.AddResidualBlock(f, loss_function, para_Pose[start], front_pose.loop_pose, para_Ex_Pose[0], para_Feature[feature_index]);
        
            retrive_feature_index++;
            loop_factor_cnt++;
        }
    }


    //設置了優化的最長時間,保證明時性
    if (marginalization_flag == MARGIN_OLD)
        options.max_solver_time_in_seconds = SOLVER_TIME * 4.0 / 5.0;
    else
        options.max_solver_time_in_seconds = SOLVER_TIME;

    // 求解
    ceres::Solve(options, &problem, &summary);

// http://blog.csdn.net/heyijia0327/article/details/53707261#comments
// http://blog.csdn.net/heyijia0327/article/details/52822104
    if (marginalization_flag == MARGIN_OLD)
    {
        //若是當前幀是關鍵幀的,把oldest的frame全部的信息margination,做爲下一時刻的先驗知識,參考上面的兩個網址,大神的解釋很明白

    }
    else{
        //若是當前幀不是關鍵幀的,把second newest的frame全部的視覺信息丟棄掉,imu信息不丟棄,記住不是作margination,是爲了保持矩陣的稀疏性
    }
    
}

後續

imu的參數很重要,還有就是硬件同步,global shutter的攝像頭很重要。我要是動做快的話,效果就不行了。但人家的視頻感受效果很不錯。

這個還要繼續弄硬件和代碼原理,代碼中最小二乘法優化中的FOCAL_LENGTH感受要根據本身的攝像頭設置,尚未具體看,視覺信息矩陣的設置尚未看。

工程中具體的狀況仍是要本身解決,

相關文章
相關標籤/搜索