import serial
|
|
import threading
|
|
import time
|
|
|
|
class Frame():
|
|
def __init__(self, time, data):
|
|
self.time = time
|
|
self.data = data
|
|
|
|
class GridEye():
|
|
def __init__(self, serialPort, baudrate):
|
|
self.port = serial.Serial(serialPort, baudrate)
|
|
self.frame1 = None
|
|
self.frame2 = None
|
|
self.reading = True
|
|
self.distance = -1
|
|
self.thread = threading.Thread(target = self.reader)
|
|
self.thread.setDaemon(True)
|
|
self.lock = threading.Lock()
|
|
|
|
def start(self):
|
|
self.port.reset_input_buffer()
|
|
self.thread.start()
|
|
|
|
def stop(self):
|
|
self.reading = False
|
|
self.thread.join()
|
|
|
|
def reader(self):
|
|
while (self.reading):
|
|
line = b''
|
|
while (self.reading):
|
|
c = self.port.read()
|
|
if c == b'\n':
|
|
break
|
|
line += c
|
|
#line = self.port.readline()#.decode('utf-8')
|
|
# if line:
|
|
# print (line)
|
|
# time.sleep(0.01)
|
|
# if self.port.in_waiting > 0:
|
|
# print (self.port.in_waiting)
|
|
if b':' in line:
|
|
try:
|
|
tag = line.decode('utf-8').split(':')[0]
|
|
|
|
|
|
if 'Distance' in tag:
|
|
dist = float(line.decode('utf-8').split(':')[1])
|
|
if dist > 200.0:
|
|
dist = 200.0
|
|
self.lock.acquire()
|
|
self.distance = dist
|
|
self.lock.release()
|
|
else:
|
|
values = [int(x, 16)*0.25 for x in line.decode('utf-8').split(':')[1].split()]
|
|
if len(values) == 64:
|
|
#print (data)
|
|
data = []
|
|
for i in range(8):
|
|
data.append(values[i*8:i*8+8])
|
|
self.lock.acquire()
|
|
if '104' in tag:
|
|
self.frame1 = Frame(time.time(), data)
|
|
else:
|
|
self.frame2 = Frame(time.time(), data)
|
|
self.lock.release()
|
|
else:
|
|
print ('something wrong', len(data))
|
|
except Exception as e:
|
|
print (e)
|
|
|
|
class Distance():
|
|
def __init__(self, serialPort, baudrate):
|
|
self.port = serial.Serial(serialPort, baudrate)
|
|
self.distance = 200
|
|
self.reading = True
|
|
self.thread = threading.Thread(target = self.reader)
|
|
self.thread.setDaemon(True)
|
|
self.lock = threading.Lock()
|
|
|
|
def start(self):
|
|
self.port.reset_input_buffer()
|
|
self.thread.start()
|
|
|
|
def stop(self):
|
|
self.reading = False
|
|
self.thread.join()
|
|
|
|
def reader(self):
|
|
while (self.reading):
|
|
line = b''
|
|
while (self.reading):
|
|
c = self.port.read()
|
|
if c == b'\r':
|
|
c = self.port.read()
|
|
break
|
|
if c == b'\n':
|
|
break
|
|
line += c
|
|
if b'Distance' in line:
|
|
try:
|
|
dist = float(line.decode('utf-8').split(':')[1])
|
|
print (dist)
|
|
if dist > 200.0:
|
|
dist = 200.0
|
|
self.lock.acquire()
|
|
self.distance = dist
|
|
self.lock.release()
|
|
except ValueError as e:
|
|
print ('error', e)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
import cv2
|
|
import numpy as np
|
|
import math
|
|
def exponential(img, value):
|
|
tmp = cv2.pow(img.astype(np.double), value)*(255.0/(255.0**value))
|
|
return tmp.astype(np.uint8)
|
|
|
|
SIZE = 128
|
|
AVERAGE_FRAME = 10
|
|
distanceBetweenSensors_w = 2.6 #cm
|
|
distanceBetweenSensors_h = 2.6 #cm
|
|
distance2Object = 60.0 #cm
|
|
ADJUST_BACK = 5
|
|
EXPONENTAL_VALUE = 0.4
|
|
|
|
|
|
grideye = GridEye('COM25', 115200)
|
|
grideye.start()
|
|
grideye2 = GridEye('COM24', 115200)
|
|
grideye2.start()
|
|
|
|
# distanceSensor = Distance('COM18', 9600)
|
|
# distanceSensor.start()
|
|
|
|
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
|
videoWriter = cv2.VideoWriter('output.avi', fourcc, 10.0, (SIZE*4,SIZE*4))
|
|
siftVideoWriter = cv2.VideoWriter('sift.avi', fourcc, 10.0, (SIZE*2,SIZE*1))
|
|
cv2.imshow('sample', np.zeros((SIZE*3,SIZE*2), np.uint8))
|
|
cnt = 0
|
|
avers = []
|
|
while True:
|
|
if grideye.frame1 and grideye.frame2 and grideye2.frame1 and grideye2.frame2:
|
|
grideye.lock.acquire()
|
|
grideye2.lock.acquire()
|
|
frames = [grideye.frame1, grideye.frame2, grideye2.frame1, grideye2.frame2]
|
|
grideye.frame1 = None
|
|
grideye.frame2 = None
|
|
grideye2.frame1 = None
|
|
grideye2.frame2 = None
|
|
distance2Object = grideye.distance + grideye2.distance + 1
|
|
print (distance2Object)
|
|
if distance2Object <= 0:
|
|
distance2Object = 200
|
|
grideye2.lock.release()
|
|
grideye.lock.release()
|
|
|
|
imgs = []
|
|
for frame in frames:
|
|
img = (np.array(frame.data)-15)*10
|
|
img = cv2.resize(img.astype(np.uint8), (SIZE,SIZE), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR, INTER_CUBIC
|
|
imgs.append(img)
|
|
avers.append(np.zeros((SIZE,SIZE), np.uint16))
|
|
|
|
|
|
if cnt < AVERAGE_FRAME:
|
|
cnt += 1
|
|
for i in range(len(imgs)):
|
|
avers[i] += imgs[i]
|
|
if cnt == AVERAGE_FRAME:
|
|
for i in range(len(avers)):
|
|
avers[i] = avers[i]/AVERAGE_FRAME
|
|
avers[i] = avers[i].astype(np.uint8)
|
|
avers[i] += ADJUST_BACK
|
|
continue
|
|
|
|
for i in range(len(imgs)):
|
|
imgs[i] = cv2.subtract(imgs[i], avers[i])
|
|
print ('xdd')
|
|
|
|
out = np.full((SIZE*4, SIZE*4), 255, dtype=np.uint16)
|
|
out[:SIZE, :SIZE] = imgs[0]
|
|
out[:SIZE, SIZE:SIZE*2] = imgs[1]
|
|
out[SIZE:SIZE*2, :SIZE] = imgs[2]
|
|
out[SIZE:SIZE*2, SIZE:SIZE*2] = imgs[3]
|
|
|
|
try:
|
|
overlap_w = int(SIZE - (distanceBetweenSensors_w / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE)
|
|
except:
|
|
overlap_w = 0
|
|
if overlap_w < 0:
|
|
overlap_w = 0
|
|
|
|
try:
|
|
overlap_h = int(SIZE - (distanceBetweenSensors_h / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE)
|
|
except:
|
|
overlap_h = 0
|
|
if overlap_h < 0:
|
|
overlap_h = 0
|
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
|
|
tmp[:, :SIZE] = imgs[0]
|
|
tmp[:, -SIZE:] += imgs[1]
|
|
tmp[:, (SIZE-overlap_w): SIZE] = tmp[:, (SIZE-overlap_w): SIZE]/2
|
|
|
|
tmp2 = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
|
|
tmp2[:, :SIZE] = imgs[2]
|
|
tmp2[:, -SIZE:] += imgs[3]
|
|
tmp2[:, (SIZE-overlap_w): SIZE] = tmp2[:, (SIZE-overlap_w): SIZE]/2
|
|
|
|
merge = np.zeros((SIZE*2-overlap_h, SIZE*2-overlap_w), dtype=np.uint16)
|
|
merge[:SIZE, :] = tmp
|
|
merge[-SIZE:, :] += tmp2
|
|
merge[(SIZE-overlap_h):SIZE, :] = merge[(SIZE-overlap_h):SIZE, :]/2
|
|
# merge = exponential(merge, EXPONENTAL_VALUE)
|
|
|
|
|
|
offset_w = int(overlap_w/2)
|
|
offset_h = int(overlap_h/2)
|
|
print (SIZE*2+offset_h, SIZE*4-overlap_h+offset_h, offset_w, SIZE*2-overlap_w+offset_w)
|
|
out[SIZE*2+offset_h:SIZE*4-overlap_h+offset_h, offset_w: SIZE*2-overlap_w+offset_w] = merge
|
|
|
|
|
|
maxProduct = 0
|
|
overlap_w = 0
|
|
for i in range(80, 128):
|
|
product = sum(imgs[0][:,SIZE-i:].astype(np.uint32)*imgs[1][:,:i].astype(np.uint32))
|
|
product += sum(imgs[2][:,SIZE-i:].astype(np.uint32)*imgs[3][:,:i].astype(np.uint32))
|
|
product = sum(product) / len(product)
|
|
if product > maxProduct:
|
|
maxProduct = product
|
|
overlap_w = i
|
|
|
|
maxProduct = 0
|
|
overlap_h = 0
|
|
for i in range(80, 128):
|
|
product = sum(imgs[0][SIZE-i:, :].astype(np.uint32)*imgs[2][:i,:].astype(np.uint32))
|
|
product += sum(imgs[1][SIZE-i:, :].astype(np.uint32)*imgs[3][:i,:].astype(np.uint32))
|
|
product = sum(product) / len(product)
|
|
if product > maxProduct:
|
|
maxProduct = product
|
|
overlap_h = i
|
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
|
|
tmp[:, :SIZE] = imgs[0]
|
|
tmp[:, -SIZE:] += imgs[1]
|
|
tmp[:, (SIZE-overlap_w): SIZE] = tmp[:, (SIZE-overlap_w): SIZE]/2
|
|
|
|
tmp2 = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
|
|
tmp2[:, :SIZE] = imgs[2]
|
|
tmp2[:, -SIZE:] += imgs[3]
|
|
tmp2[:, (SIZE-overlap_w): SIZE] = tmp2[:, (SIZE-overlap_w): SIZE]/2
|
|
|
|
merge = np.zeros((SIZE*2-overlap_h, SIZE*2-overlap_w), dtype=np.uint16)
|
|
merge[:SIZE, :] = tmp
|
|
merge[-SIZE:, :] += tmp2
|
|
merge[(SIZE-overlap_h):SIZE, :] = merge[(SIZE-overlap_h):SIZE, :]/2
|
|
|
|
|
|
offset_w = int(overlap_w/2)
|
|
offset_h = int(overlap_h/2)
|
|
out[SIZE*2+offset_h:SIZE*4-overlap_h+offset_h, SIZE*2+offset_w: SIZE*4-overlap_w+offset_w] = merge
|
|
|
|
# offset = int(overlap2/2)
|
|
# tmp = np.zeros((SIZE, SIZE*2-overlap2), dtype=np.uint16)
|
|
# tmp[:, :SIZE] = img
|
|
# tmp[:, -SIZE:] += img2
|
|
# tmp[:, (SIZE-overlap2): SIZE] = tmp[:, (SIZE-overlap2): SIZE]/2
|
|
# tmp = exponential(tmp, EXPONENTAL_VALUE)
|
|
# out[SIZE*2:, offset: SIZE*2-overlap2+offset] = tmp
|
|
|
|
|
|
out = out.astype(np.uint8)
|
|
out = exponential(out, EXPONENTAL_VALUE)
|
|
|
|
cv2.imshow('sample', out)
|
|
videoWriter.write(cv2.cvtColor(out,cv2.COLOR_GRAY2BGR))
|
|
try:
|
|
sift = cv2.xfeatures2d.SIFT_create()
|
|
img1 = exponential(imgs[0], EXPONENTAL_VALUE)
|
|
img2 = exponential(imgs[1], EXPONENTAL_VALUE)
|
|
kp_1, desc_1 = sift.detectAndCompute(img1, None)
|
|
kp_2, desc_2 = sift.detectAndCompute(img2, None)
|
|
|
|
index_params = dict(algorithm=0, trees=5)
|
|
search_params = dict()
|
|
flann = cv2.FlannBasedMatcher(index_params, search_params)
|
|
matches = flann.knnMatch(desc_1, desc_2, k=2)
|
|
|
|
good_points = []
|
|
ratio = 0.8
|
|
for m, n in matches:
|
|
if m.distance < ratio*n.distance:
|
|
good_points.append(m)
|
|
result = cv2.drawMatches(img1, kp_1, img2, kp_2, good_points, None)
|
|
cv2.imshow("result", result)
|
|
print (result.shape)
|
|
siftVideoWriter.write(result)
|
|
except:
|
|
pass
|
|
key = cv2.waitKey(1)
|
|
if key == ord('q'):
|
|
break
|
|
elif key == ord('c'):
|
|
cv2.imwrite('out.jpg', out)
|
|
time.sleep(0.001)
|
|
grideye.stop()
|
|
videoWriter.release()
|
|
siftVideoWriter.release()
|
|
cv2.destroyAllWindows()
|
|
|