|
|
|
@ -519,13 +519,14 @@ class PointBase {
|
|
|
|
|
if (xLoc + xWitdth > windowWidth) xWitdth = windowWidth - xLoc;
|
|
|
|
|
if (yLoc + yHeight > windowHeight) yHeight = windowHeight - yLoc;
|
|
|
|
|
|
|
|
|
|
//create the ROI
|
|
|
|
|
Mat roi;
|
|
|
|
|
roi = img(Rect(xLoc, yLoc, xWitdth, yHeight));
|
|
|
|
|
/*imshow("roi",roi);
|
|
|
|
|
imwrite("temp.jpg", roi);
|
|
|
|
|
waitKey(0);*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//detect the keypoints from both the ROI and the img we want to add
|
|
|
|
|
cv::Ptr<SIFT> siftPtr = SIFT::create();
|
|
|
|
|
std::vector<KeyPoint> keypointsROI, keypointsImg;
|
|
|
|
|
cv::Ptr<SiftDescriptorExtractor> siftExtrPtr;
|
|
|
|
@ -535,13 +536,7 @@ class PointBase {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Add results to image and save.
|
|
|
|
|
/*
|
|
|
|
|
cv::Mat output;
|
|
|
|
|
cv::drawKeypoints(imgSmall, keypointsImg, output);
|
|
|
|
|
imshow("sift_result.jpg", output);
|
|
|
|
|
waitKey(0);*/
|
|
|
|
|
|
|
|
|
|
//compute descriptors from found keypoints
|
|
|
|
|
cout<<" SIZE \n"<<cv::typeToString(roi.type()) <<" \n";
|
|
|
|
|
siftPtr->compute(roi,
|
|
|
|
|
keypointsROI,
|
|
|
|
@ -550,6 +545,7 @@ class PointBase {
|
|
|
|
|
keypointsImg,
|
|
|
|
|
descriptorsImg);
|
|
|
|
|
|
|
|
|
|
//match the descriptors
|
|
|
|
|
cv::FlannBasedMatcher matcher;
|
|
|
|
|
std::vector<cv::DMatch> matches;
|
|
|
|
|
|
|
|
|
@ -567,33 +563,38 @@ class PointBase {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
double max_dist = 0; double min_dist = 100;
|
|
|
|
|
cout<<"ROWS DESC:"<<descriptorsImg.rows<<"\n";
|
|
|
|
|
cout<<"SIZE DESC:"<<descriptorsImg.size()<<"\n";
|
|
|
|
|
//-- Quick calculation of max and min distances between keypoints
|
|
|
|
|
//cout<<"ROWS DESC:"<<descriptorsImg.rows<<"\n";
|
|
|
|
|
//cout<<"SIZE DESC:"<<descriptorsImg.size()<<"\n";
|
|
|
|
|
|
|
|
|
|
//Quick calculation of max and min distances between keypoints
|
|
|
|
|
for( int i = 0; i < descriptorsImg.rows; i++ )
|
|
|
|
|
{ double dist = matches[i].distance;
|
|
|
|
|
cout<<"DIST:"<<dist<<"\n";
|
|
|
|
|
if( dist < min_dist ) min_dist = dist;
|
|
|
|
|
if( dist > max_dist ) max_dist = dist;
|
|
|
|
|
{
|
|
|
|
|
double dist = matches[i].distance;
|
|
|
|
|
cout<<"DIST:"<<dist<<"\n";
|
|
|
|
|
if( dist < min_dist ) min_dist = dist;
|
|
|
|
|
if( dist > max_dist ) max_dist = dist;
|
|
|
|
|
}
|
|
|
|
|
printf("-- Max dist : %f \n", max_dist );
|
|
|
|
|
printf("-- Min dist : %f \n", min_dist );
|
|
|
|
|
|
|
|
|
|
std::vector< DMatch > good_matches;
|
|
|
|
|
|
|
|
|
|
//find the GOOD matches from all descriptor matches
|
|
|
|
|
//e.g. the ones, that have a distance LESS than 3* the smallest computed distance
|
|
|
|
|
std::vector< DMatch > good_matches;
|
|
|
|
|
for( int i = 0; i < descriptorsImg.rows; i++ )
|
|
|
|
|
{ if( matches[i].distance < 3*min_dist )
|
|
|
|
|
{ good_matches.push_back( matches[i]); }
|
|
|
|
|
{
|
|
|
|
|
if( matches[i].distance < 3*min_dist ) { good_matches.push_back( matches[i]); }
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::vector< Point2d > obj;
|
|
|
|
|
std::vector< Point2d > scene;
|
|
|
|
|
|
|
|
|
|
for( int i = 0; i < good_matches.size(); i++ )
|
|
|
|
|
{
|
|
|
|
|
//-- Get the keypoints from the good matches
|
|
|
|
|
obj.push_back( static_cast<cv::Point2i>( keypointsImg[ good_matches[i].queryIdx ].pt ));
|
|
|
|
|
scene.push_back( static_cast<cv::Point2i>(keypointsROI[ good_matches[i].trainIdx ].pt ));
|
|
|
|
|
//Get the keypoints from the good matches
|
|
|
|
|
obj.push_back( static_cast<cv::Point2i>( keypointsImg[ good_matches[i].trainIdx ].pt ));
|
|
|
|
|
scene.push_back( static_cast<cv::Point2i>(keypointsROI[ good_matches[i].queryIdx ].pt ));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*cv::Mat imageMatches2;
|
|
|
|
@ -602,18 +603,84 @@ class PointBase {
|
|
|
|
|
cv::imshow("good_matches",imageMatches2);
|
|
|
|
|
cv::waitKey(0);*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//find a transformation based on good matches
|
|
|
|
|
//we do not need a Homography, since we deal with affine transformations (no viewport transf. are expected)
|
|
|
|
|
//Mat H = findHomography( Mat(obj), Mat(scene), RANSAC );
|
|
|
|
|
Mat H = estimateAffinePartial2D( Mat(obj), Mat(scene), noArray(),RANSAC );
|
|
|
|
|
|
|
|
|
|
cv::Mat result;
|
|
|
|
|
cv::Mat resultWarp;
|
|
|
|
|
cv::Mat resultBlend,roiMask,imgWarpedMask,overlapMask,overlapDST,roiDT, imgDT;
|
|
|
|
|
//warpPerspective(imgSmall,roi,H,cv::Size(roi.cols,roi.rows));
|
|
|
|
|
warpAffine(imgSmall,result,H,cv::Size(roi.cols,roi.rows));
|
|
|
|
|
warpAffine(imgSmall,resultWarp,H,cv::Size(roi.cols,roi.rows));
|
|
|
|
|
/*cv::Mat half(result,cv::Rect(0,0,imgSmall.cols,imgSmall.rows));
|
|
|
|
|
result.copyTo(roi);*/
|
|
|
|
|
imshow("imgSmall", imgSmall);
|
|
|
|
|
imshow( "Result", result );
|
|
|
|
|
imshow( "resultWarp", resultWarp );
|
|
|
|
|
imshow("roi", roi);
|
|
|
|
|
cv::waitKey(0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//cv::addWeighted( roi, 0.5, resultWarp, 0.5, 0.0, resultBlend);
|
|
|
|
|
|
|
|
|
|
//thresholding roi and img, in order to create image masks
|
|
|
|
|
threshold(roi,roiMask,1,255,CV_8U);
|
|
|
|
|
threshold(resultWarp,imgWarpedMask,1,255,CV_8U);
|
|
|
|
|
|
|
|
|
|
multiply(roiMask,imgWarpedMask,overlapMask);
|
|
|
|
|
//we need to change type in order for img multiplication to work in OpenCV
|
|
|
|
|
cv::cvtColor(overlapMask,overlapMask,COLOR_BGR2GRAY);
|
|
|
|
|
cv::cvtColor(roiMask,roiMask,COLOR_BGR2GRAY);
|
|
|
|
|
cv::cvtColor(imgWarpedMask,imgWarpedMask,COLOR_BGR2GRAY);
|
|
|
|
|
cv::cvtColor(roi,roi,COLOR_BGR2GRAY);
|
|
|
|
|
cv::cvtColor(resultWarp,resultWarp,COLOR_BGR2GRAY);
|
|
|
|
|
|
|
|
|
|
//the blending function is DistanceTransform(img1)/(DT(img1)+DT(img2)) - DT is created from the masks
|
|
|
|
|
distanceTransform(roiMask,roiDT,DIST_L2, 3);
|
|
|
|
|
distanceTransform(imgWarpedMask,imgDT,DIST_L2, 3);
|
|
|
|
|
//normalize(overlapDST, overlapDST, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cout<<cv::typeToString(roiDT.type())<< "ROI TYPE \n";
|
|
|
|
|
cout<<cv::typeToString(imgDT.type())<< "img TYPE \n";
|
|
|
|
|
//in order for imshow to work, we need to normalize into 0-1 range
|
|
|
|
|
normalize(roiDT, roiDT, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
normalize(imgDT, imgDT, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
|
|
|
|
|
Mat roiOverlapAlpha, imgOverlapAlpha, resultRoi;
|
|
|
|
|
|
|
|
|
|
//blended images of ROI and img, now we need to only add them together
|
|
|
|
|
cv::divide(roiDT,(roiDT+imgDT),roiOverlapAlpha);
|
|
|
|
|
cv::divide(imgDT,(roiDT+imgDT),imgOverlapAlpha);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//imshow("roiMask", roiMask);
|
|
|
|
|
//imshow("imgOverlapAlpha", imgOverlapAlpha);
|
|
|
|
|
|
|
|
|
|
roi.convertTo(roi,CV_32FC1);
|
|
|
|
|
resultWarp.convertTo(resultWarp,CV_32FC1);
|
|
|
|
|
cout<<cv::typeToString(resultWarp.type())<< "resultWarp type \n";
|
|
|
|
|
cout<<cv::typeToString(roi.type())<< "roi type \n";
|
|
|
|
|
|
|
|
|
|
multiply(roiOverlapAlpha,roi,roiOverlapAlpha);
|
|
|
|
|
multiply(imgOverlapAlpha,resultWarp,imgOverlapAlpha);
|
|
|
|
|
normalize(imgOverlapAlpha, imgOverlapAlpha, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
normalize(roiOverlapAlpha, roiOverlapAlpha, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
imshow("imgOverlapAlpha", imgOverlapAlpha);
|
|
|
|
|
cout<<"AAAAAAAAAAAAA \n";
|
|
|
|
|
cv::add(roiOverlapAlpha,imgOverlapAlpha, resultRoi);
|
|
|
|
|
|
|
|
|
|
normalize(resultRoi, resultRoi, 0.0, 1.0, NORM_MINMAX);
|
|
|
|
|
imshow("resultRoi", resultRoi);
|
|
|
|
|
cv::waitKey(0);
|
|
|
|
|
|
|
|
|
|
//cv::cvtColor(roiMask,roiMask,COLOR_GRAY2BGR);
|
|
|
|
|
//cout<<cv::typeToString(roiMask.type())<< "img TYPE \n";
|
|
|
|
|
|
|
|
|
|
//multiply(roiOverlapAlpha,roiMask,roiOverlapAlpha);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cv::waitKey(0);
|
|
|
|
|
|
|
|
|
|
//cout<<CV_VERSION;
|
|
|
|
|