Skip to content

Commit

Permalink
Visual Odemetry working...with some errors
Browse files Browse the repository at this point in the history
  • Loading branch information
“Akbonline” committed Oct 31, 2020
1 parent eee29cd commit 7045a87
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 34 deletions.
Binary file removed .descriptor.py.swp
Binary file not shown.
2 changes: 1 addition & 1 deletion Camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def featureMapping(image):
return np.array([(kp.pt[0], kp.pt[1]) for kp in key_pts]), descriptors

def normalize(count_inv, pts):
return np.dot(count_inv, add_ones(pts).T).T[:, 0:2]
return np.dot(count_inv, np.concatenate([pts, np.ones((pts.shape[0], 1))], axis=1).T).T[:, 0:2]

def denormalize(count, pt):
ret = np.dot(count, np.array([pt[0], pt[1], 1.0]))
Expand Down
Binary file modified __pycache__/Camera.cpython-38.pyc
Binary file not shown.
Binary file modified __pycache__/descriptor.cpython-38.pyc
Binary file not shown.
Binary file added __pycache__/match_frames.cpython-38.pyc
Binary file not shown.
18 changes: 11 additions & 7 deletions match_frames.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
from skimage.transform import FundamentalMatrixTransform
from skimage.transform import EssentialMatrixTransform

#GEtting rid of the outliers:
# turn [[x,y]] -> [[x,y,1]]
def add_ones(x):
return np.concatenate([x, np.ones((x.shape[0], 1))], axis=1)

#Getting rid of the outliers:
def extractRt(F):
W = np.mat([[0,-1,0],[1,0,0],[0,0,1]],dtype=float)
U,d,Vt = np.linalg.svd(F)
Expand All @@ -24,7 +28,7 @@ def extractRt(F):
return ret

#We make use of the Lowe's Ratio Test:
def match_frames(f1, f2):
def generate_match(f1, f2):
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(f1.descriptors, f2.descriptors, k=2)

Expand All @@ -34,16 +38,16 @@ def match_frames(f1, f2):

for m,n in matches:
if m.distance < 0.75*n.distance:
pts1 = f1.key_pts[m.old_x]
pts2 = f2.key_pts[m.new_x]
pts1 = f1.key_pts[m.queryIdx]
pts2 = f2.key_pts[m.trainIdx]

# travel less than 10% of diagonal and be within orb distance 32
if np.linalg.norm((pts1-pts2)) < 0.1*np.linalg.norm([f1.w, f1.h]) and m.distance < 32:
# keep around indices
# TODO: refactor this to not be O(N^2)
if m.old_x not in x1 and m.new_x not in x2:
x1.append(m.old_x)
x2.append(m.new_x)
if m.queryIdx not in x1 and m.trainIdx not in x2:
x1.append(m.queryIdx)
x2.append(m.trainIdx)

ret.append((pts1, pts2))

Expand Down
54 changes: 28 additions & 26 deletions slam.py
Original file line number Diff line number Diff line change
@@ -1,88 +1,90 @@
import numpy as np
import cv2
import os,sys,time,g2o
import triangulation
from triangulation import triangulate
from Camera import denormalize, normalize, Camera
from display import Display
from match_frames import generate_match
from descriptor import Descriptor, Point

W,H = 1080,720
K = np.array([[0,0,0],[0,0,0],[0,0,0]])
desc_dict = Descriptor()

if os.getenv("D3D") is not None:
desc_dict.create_viewer()
F= int(os.getenv("F","500"))
W, H = 1920//2, 1080//2
K = np.array([[F,0,W//2],[0,F,H//2],[0,0,1]])
desc_dict = Descriptor()
# if os.getenv("D3D") is not None:
desc_dict.create_viewer()

disp = None
if os.getenv("D2D") is not None:
disp = Display(W, H)
# disp = None
# if os.getenv("D2D") is not None:
disp = Display(W, H)


def calibrate():
# camera intrinsics
W, H = 1920//2, 1080//2
K = np.array([[F,0,W//2],[0,F,H//2],[0,0,1]])
Kinv = np.linalg.inv(K)
def calibrate(image):
# camera intrinsics...<================Check this
image = cv2.resize(image, (W,H))
return image

def generate_SLAM(image):
image = calibrate(image)
print("Thisis a test0")
frame = Camera(desc_dict, image, K)
if frame.id == 0:
return
frame1 = desc_dict.frames[-1]
frame2 = desc_dict.frames[-2]

x1,x2,Id = match_frame(frame1,frame2)
x1,x2,Id = generate_match(frame1,frame2)
frame1.pose =np.dot(Id,frame2.pose)
for i,idx in enumerate(x2):
if frame2.pts[idx] is not None:
frame2.pts[idx].add_observation(frame1,x1[i])
# homogeneous 3-D coords
pts4d = triangulate([frame1.pose, frame2.pose, frame1.kps[x1], frame2.kps[x2]])
# homogeneous 3-D coords
print("Thisis a test1")
pts4d = triangulate(frame1.pose, frame2.pose, frame1.key_pts[x1], frame2.key_pts[x2])
pts4d /= pts4d[:, 3:]

# reject pts without enough "parallax" (this right?)
# reject points behind the camera
unmatched_points = np.array([frame1.pts[i] is None for i in x1])
print("Adding: %d points" % np.sum(unmatched_points))
good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0) & unmatched_points
#print(sum(good_pts4d), len(good_pts4d))

for i,p in enumerate(pts4d):
if not good_pts4d[i]:
continue
pt = Point(mapp, p)
pt = Point(desc_dict, p)
pt.add_observation(frame1, x1[i])
pt.add_observation(frame2, x2[i])

for pt1, pt2 in zip(f1.kps[x1], f2.kps[x2]):
for pt1, pt2 in zip(frame1.key_pts[x1], frame2.key_pts[x2]):
u1, v1 = denormalize(K, pt1)
u2, v2 = denormalize(K, pt2)
cv2.circle(image, (u1, v1), color=(0,255,0), radius=1)
cv2.line(image, (u1, v1), (u2, v2), color=(255, 255,0))

# 2-D display
if disp is not None:
disp.paint(image)
disp.display2D(image)
# 3-D display
desc_dict.display()

if __name__ == "__main__":
if len(sys.argv) < 2:
print("%s takes in .mp4 as an arg" %sys.argv[0])
exit(-1)
print("Thisis a test-1")
cap = cv2.VideoCapture(sys.argv[1]) # Can try Realtime(highly unlikely though)
test= Display(W,H)
print("Thisis a test-2")
while cap.isOpened():
ret, frame = cap.read()
print("Thisis a test-3")
frame1 = cv2.resize(frame, (720,400)) #Resizing the original window
if ret == True:
print("Thisis a test")
cv2.imshow("Frame",frame1)
if cv2.waitKey(1) & 0xFF == ord('q'): #Quit Condition
break
generate_SLAM(frame)
else:
break
cap.release()
cv2.destoryAllWindows()

cv2.destroyAllWindows()

0 comments on commit 7045a87

Please sign in to comment.