|
|
@ -10,8 +10,10 @@ class Frame(): |
|
|
|
class GridEye(): |
|
|
|
def __init__(self, serialPort, baudrate): |
|
|
|
self.port = serial.Serial(serialPort, baudrate) |
|
|
|
self.frame = None |
|
|
|
self.frame1 = None |
|
|
|
self.frame2 = None |
|
|
|
self.reading = True |
|
|
|
self.distance = -1 |
|
|
|
self.thread = threading.Thread(target = self.reader) |
|
|
|
self.thread.setDaemon(True) |
|
|
|
self.lock = threading.Lock() |
|
|
@ -25,15 +27,10 @@ class GridEye(): |
|
|
|
self.thread.join() |
|
|
|
|
|
|
|
def reader(self): |
|
|
|
data = [] |
|
|
|
data_time = 0; |
|
|
|
while (self.reading): |
|
|
|
line = b'' |
|
|
|
while (self.reading): |
|
|
|
c = self.port.read() |
|
|
|
if c == b'\r': |
|
|
|
c = self.port.read() |
|
|
|
break |
|
|
|
if c == b'\n': |
|
|
|
break |
|
|
|
line += c |
|
|
@ -43,28 +40,36 @@ class GridEye(): |
|
|
|
# time.sleep(0.01) |
|
|
|
# if self.port.in_waiting > 0: |
|
|
|
# print (self.port.in_waiting) |
|
|
|
if b'#' in line: |
|
|
|
if len(data) == 8: |
|
|
|
#print (data) |
|
|
|
self.lock.acquire() |
|
|
|
self.frame = Frame(data_time, data) |
|
|
|
self.lock.release() |
|
|
|
else: |
|
|
|
print ('something wrong', len(data)) |
|
|
|
data_time = time.time() |
|
|
|
data = [] |
|
|
|
else: |
|
|
|
if b':' in line: |
|
|
|
try: |
|
|
|
row = [float(x) for x in line.split()] |
|
|
|
if len(row) == 8: |
|
|
|
data.append(row) |
|
|
|
except ValueError as e: |
|
|
|
print ('error', e) |
|
|
|
data_time = time.time() |
|
|
|
data = [] |
|
|
|
if len(data) > 8: |
|
|
|
data.pop(0) |
|
|
|
|
|
|
|
tag = line.decode('utf-8').split(':')[0] |
|
|
|
|
|
|
|
|
|
|
|
if 'Distance' in tag: |
|
|
|
dist = float(line.decode('utf-8').split(':')[1]) |
|
|
|
if dist > 200.0: |
|
|
|
dist = 200.0 |
|
|
|
self.lock.acquire() |
|
|
|
self.distance = dist |
|
|
|
self.lock.release() |
|
|
|
else: |
|
|
|
values = [int(x, 16)*0.25 for x in line.decode('utf-8').split(':')[1].split()] |
|
|
|
if len(values) == 64: |
|
|
|
#print (data) |
|
|
|
data = [] |
|
|
|
for i in range(8): |
|
|
|
data.append(values[i*8:i*8+8]) |
|
|
|
self.lock.acquire() |
|
|
|
if '104' in tag: |
|
|
|
self.frame1 = Frame(time.time(), data) |
|
|
|
else: |
|
|
|
self.frame2 = Frame(time.time(), data) |
|
|
|
self.lock.release() |
|
|
|
else: |
|
|
|
print ('something wrong', len(data)) |
|
|
|
except Exception as e: |
|
|
|
print (e) |
|
|
|
|
|
|
|
class Distance(): |
|
|
|
def __init__(self, serialPort, baudrate): |
|
|
|
self.port = serial.Serial(serialPort, baudrate) |
|
|
@ -115,144 +120,195 @@ if __name__ == '__main__': |
|
|
|
return tmp.astype(np.uint8) |
|
|
|
|
|
|
|
SIZE = 128 |
|
|
|
overlap = 120 |
|
|
|
AVERAGE_FRAME = 10 |
|
|
|
distanceBetweenSensors = 7.7 #cm |
|
|
|
distanceBetweenSensors_w = 2.6 #cm |
|
|
|
distanceBetweenSensors_h = 2.6 #cm |
|
|
|
distance2Object = 60.0 #cm |
|
|
|
ADJUST_BACK = 5 |
|
|
|
EXPONENTAL_VALUE = 0.4 |
|
|
|
|
|
|
|
offset = (distanceBetweenSensors / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE |
|
|
|
overlap = int(SIZE - offset) |
|
|
|
print (overlap) |
|
|
|
grideye = GridEye('COM15', 115200) |
|
|
|
|
|
|
|
grideye = GridEye('COM25', 115200) |
|
|
|
grideye.start() |
|
|
|
grideye2 = GridEye('COM17', 115200) |
|
|
|
grideye2 = GridEye('COM24', 115200) |
|
|
|
grideye2.start() |
|
|
|
|
|
|
|
distanceSensor = Distance('COM18', 9600) |
|
|
|
distanceSensor.start() |
|
|
|
# distanceSensor = Distance('COM18', 9600) |
|
|
|
# distanceSensor.start() |
|
|
|
|
|
|
|
fourcc = cv2.VideoWriter_fourcc(*'XVID') |
|
|
|
videoWriter = cv2.VideoWriter('output.avi', fourcc, 10.0, (SIZE*2,SIZE*3)) |
|
|
|
videoWriter = cv2.VideoWriter('output.avi', fourcc, 10.0, (SIZE*4,SIZE*4)) |
|
|
|
siftVideoWriter = cv2.VideoWriter('sift.avi', fourcc, 10.0, (SIZE*2,SIZE*1)) |
|
|
|
cv2.imshow('sample', np.zeros((SIZE*3,SIZE*2), np.uint8)) |
|
|
|
aver1 = np.zeros((SIZE,SIZE), np.uint16) |
|
|
|
aver2 = np.zeros((SIZE,SIZE), np.uint16) |
|
|
|
cnt = 0 |
|
|
|
avers = [] |
|
|
|
while True: |
|
|
|
if grideye.frame and grideye2.frame: |
|
|
|
if grideye.frame1 and grideye.frame2 and grideye2.frame1 and grideye2.frame2: |
|
|
|
grideye.lock.acquire() |
|
|
|
grideye2.lock.acquire() |
|
|
|
distanceSensor.lock.acquire() |
|
|
|
frame = grideye.frame |
|
|
|
grideye.frame = None |
|
|
|
frame2 = grideye2.frame |
|
|
|
grideye2.frame = None |
|
|
|
# frame2 = frame |
|
|
|
distance2Object = distanceSensor.distance |
|
|
|
distanceSensor.lock.release() |
|
|
|
frames = [grideye.frame1, grideye.frame2, grideye2.frame1, grideye2.frame2] |
|
|
|
grideye.frame1 = None |
|
|
|
grideye.frame2 = None |
|
|
|
grideye2.frame1 = None |
|
|
|
grideye2.frame2 = None |
|
|
|
distance2Object = grideye.distance + grideye2.distance + 1 |
|
|
|
print (distance2Object) |
|
|
|
if distance2Object <= 0: |
|
|
|
distance2Object = 200 |
|
|
|
grideye2.lock.release() |
|
|
|
grideye.lock.release() |
|
|
|
p = np.zeros((16,16), np.uint16) |
|
|
|
|
|
|
|
img = (np.array(frame.data)-15)*10 |
|
|
|
img = cv2.resize(img.astype(np.uint8), (SIZE,SIZE), interpolation = cv2.INTER_CUBIC) # INTER_LINEAR, INTER_CUBIC |
|
|
|
img2 = (np.array(frame2.data)-15)*10 |
|
|
|
img2 = cv2.resize(img2.astype(np.uint8), (SIZE,SIZE), interpolation = cv2.INTER_CUBIC) |
|
|
|
imgs = [] |
|
|
|
for frame in frames: |
|
|
|
img = (np.array(frame.data)-15)*10 |
|
|
|
img = cv2.resize(img.astype(np.uint8), (SIZE,SIZE), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR, INTER_CUBIC |
|
|
|
imgs.append(img) |
|
|
|
avers.append(np.zeros((SIZE,SIZE), np.uint16)) |
|
|
|
|
|
|
|
|
|
|
|
if cnt < AVERAGE_FRAME: |
|
|
|
cnt += 1 |
|
|
|
aver1 += img |
|
|
|
aver2 += img2 |
|
|
|
for i in range(len(imgs)): |
|
|
|
avers[i] += imgs[i] |
|
|
|
if cnt == AVERAGE_FRAME: |
|
|
|
aver1 = aver1/AVERAGE_FRAME |
|
|
|
aver1 = aver1.astype(np.uint8) |
|
|
|
aver1 += ADJUST_BACK |
|
|
|
aver2 = aver2/AVERAGE_FRAME |
|
|
|
aver2 = aver2.astype(np.uint8) |
|
|
|
aver2 += ADJUST_BACK |
|
|
|
for i in range(len(avers)): |
|
|
|
avers[i] = avers[i]/AVERAGE_FRAME |
|
|
|
avers[i] = avers[i].astype(np.uint8) |
|
|
|
avers[i] += ADJUST_BACK |
|
|
|
continue |
|
|
|
img = cv2.subtract(img, aver1) |
|
|
|
img2 = cv2.subtract(img2, aver2) |
|
|
|
|
|
|
|
for i in range(len(imgs)): |
|
|
|
imgs[i] = cv2.subtract(imgs[i], avers[i]) |
|
|
|
print ('xdd') |
|
|
|
|
|
|
|
out = np.full((SIZE*3, SIZE*2), 255, dtype=np.uint16) |
|
|
|
out[:SIZE, :SIZE] = img |
|
|
|
out[:SIZE, SIZE:] = img2 |
|
|
|
out = np.full((SIZE*4, SIZE*4), 255, dtype=np.uint16) |
|
|
|
out[:SIZE, :SIZE] = imgs[0] |
|
|
|
out[:SIZE, SIZE:SIZE*2] = imgs[1] |
|
|
|
out[SIZE:SIZE*2, :SIZE] = imgs[2] |
|
|
|
out[SIZE:SIZE*2, SIZE:SIZE*2] = imgs[3] |
|
|
|
|
|
|
|
try: |
|
|
|
overlap = int(SIZE - (distanceBetweenSensors / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE) |
|
|
|
overlap_w = int(SIZE - (distanceBetweenSensors_w / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE) |
|
|
|
except: |
|
|
|
overlap = 0 |
|
|
|
if overlap < 0: |
|
|
|
overlap = 0 |
|
|
|
offset = int(overlap/2) |
|
|
|
# tmp = cv2.resize(img.astype(np.uint8), (SIZE*2-overlap, SIZE)) |
|
|
|
# tmp.astype(np.uint16) |
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap), dtype=np.uint16) |
|
|
|
tmp[:, :SIZE] = img |
|
|
|
tmp[:, -SIZE:] += img2 |
|
|
|
tmp[:, (SIZE-overlap): SIZE] = tmp[:, (SIZE-overlap): SIZE]/2 |
|
|
|
tmp = exponential(tmp, EXPONENTAL_VALUE) |
|
|
|
out[SIZE:SIZE*2, offset: SIZE*2-overlap+offset] = tmp |
|
|
|
# out[SIZE:SIZE*2, offset:SIZE+offset] = img |
|
|
|
# out[SIZE:SIZE*2, (SIZE-overlap)+offset:SIZE+offset] += img2[:,:overlap] |
|
|
|
# out[SIZE:SIZE*2, (SIZE-overlap)+offset:SIZE+offset] = out[SIZE:SIZE*2, (SIZE-overlap)+offset:SIZE+offset]/2 |
|
|
|
# out[SIZE:SIZE*2, SIZE+offset:SIZE+(SIZE-overlap)+offset] = img2[:,overlap:SIZE] |
|
|
|
overlap_w = 0 |
|
|
|
if overlap_w < 0: |
|
|
|
overlap_w = 0 |
|
|
|
|
|
|
|
try: |
|
|
|
overlap_h = int(SIZE - (distanceBetweenSensors_h / (2*distance2Object*math.tan(30.0/180.0*math.pi))) * SIZE) |
|
|
|
except: |
|
|
|
overlap_h = 0 |
|
|
|
if overlap_h < 0: |
|
|
|
overlap_h = 0 |
|
|
|
|
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
tmp[:, :SIZE] = imgs[0] |
|
|
|
tmp[:, -SIZE:] += imgs[1] |
|
|
|
tmp[:, (SIZE-overlap_w): SIZE] = tmp[:, (SIZE-overlap_w): SIZE]/2 |
|
|
|
|
|
|
|
tmp2 = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
tmp2[:, :SIZE] = imgs[2] |
|
|
|
tmp2[:, -SIZE:] += imgs[3] |
|
|
|
tmp2[:, (SIZE-overlap_w): SIZE] = tmp2[:, (SIZE-overlap_w): SIZE]/2 |
|
|
|
|
|
|
|
merge = np.zeros((SIZE*2-overlap_h, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
merge[:SIZE, :] = tmp |
|
|
|
merge[-SIZE:, :] += tmp2 |
|
|
|
merge[(SIZE-overlap_h):SIZE, :] = merge[(SIZE-overlap_h):SIZE, :]/2 |
|
|
|
# merge = exponential(merge, EXPONENTAL_VALUE) |
|
|
|
|
|
|
|
|
|
|
|
offset_w = int(overlap_w/2) |
|
|
|
offset_h = int(overlap_h/2) |
|
|
|
print (SIZE*2+offset_h, SIZE*4-overlap_h+offset_h, offset_w, SIZE*2-overlap_w+offset_w) |
|
|
|
out[SIZE*2+offset_h:SIZE*4-overlap_h+offset_h, offset_w: SIZE*2-overlap_w+offset_w] = merge |
|
|
|
|
|
|
|
|
|
|
|
maxProduct = 0 |
|
|
|
overlap2 = 0 |
|
|
|
overlap_w = 0 |
|
|
|
for i in range(80, 128): |
|
|
|
product = sum(img[:,SIZE-i:].astype(np.uint32)*img2[:,:i].astype(np.uint32)) |
|
|
|
product = sum(imgs[0][:,SIZE-i:].astype(np.uint32)*imgs[1][:,:i].astype(np.uint32)) |
|
|
|
product += sum(imgs[2][:,SIZE-i:].astype(np.uint32)*imgs[3][:,:i].astype(np.uint32)) |
|
|
|
product = sum(product) / len(product) |
|
|
|
if product > maxProduct: |
|
|
|
maxProduct = product |
|
|
|
overlap2 = i |
|
|
|
overlap_w = i |
|
|
|
|
|
|
|
maxProduct = 0 |
|
|
|
overlap_h = 0 |
|
|
|
for i in range(80, 128): |
|
|
|
product = sum(imgs[0][SIZE-i:, :].astype(np.uint32)*imgs[2][:i,:].astype(np.uint32)) |
|
|
|
product += sum(imgs[1][SIZE-i:, :].astype(np.uint32)*imgs[3][:i,:].astype(np.uint32)) |
|
|
|
product = sum(product) / len(product) |
|
|
|
if product > maxProduct: |
|
|
|
maxProduct = product |
|
|
|
overlap_h = i |
|
|
|
|
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
tmp[:, :SIZE] = imgs[0] |
|
|
|
tmp[:, -SIZE:] += imgs[1] |
|
|
|
tmp[:, (SIZE-overlap_w): SIZE] = tmp[:, (SIZE-overlap_w): SIZE]/2 |
|
|
|
|
|
|
|
offset = int(overlap2/2) |
|
|
|
tmp = np.zeros((SIZE, SIZE*2-overlap2), dtype=np.uint16) |
|
|
|
tmp[:, :SIZE] = img |
|
|
|
tmp[:, -SIZE:] += img2 |
|
|
|
tmp[:, (SIZE-overlap2): SIZE] = tmp[:, (SIZE-overlap2): SIZE]/2 |
|
|
|
tmp = exponential(tmp, EXPONENTAL_VALUE) |
|
|
|
out[SIZE*2:, offset: SIZE*2-overlap2+offset] = tmp |
|
|
|
tmp2 = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
tmp2[:, :SIZE] = imgs[2] |
|
|
|
tmp2[:, -SIZE:] += imgs[3] |
|
|
|
tmp2[:, (SIZE-overlap_w): SIZE] = tmp2[:, (SIZE-overlap_w): SIZE]/2 |
|
|
|
|
|
|
|
merge = np.zeros((SIZE*2-overlap_h, SIZE*2-overlap_w), dtype=np.uint16) |
|
|
|
merge[:SIZE, :] = tmp |
|
|
|
merge[-SIZE:, :] += tmp2 |
|
|
|
merge[(SIZE-overlap_h):SIZE, :] = merge[(SIZE-overlap_h):SIZE, :]/2 |
|
|
|
|
|
|
|
# out[SIZE*2:, offset:SIZE+offset] = img |
|
|
|
# out[SIZE*2:, (SIZE-overlap2)+offset:SIZE+offset] += img2[:,:overlap2] |
|
|
|
# out[SIZE*2:, (SIZE-overlap2)+offset:SIZE+offset] = out[SIZE*2:, (SIZE-overlap2)+offset:SIZE+offset]/2 |
|
|
|
# out[SIZE*2:, SIZE+offset:SIZE+(SIZE-overlap2)+offset] = img2[:,overlap2:SIZE] |
|
|
|
out = out.astype(np.uint8) |
|
|
|
|
|
|
|
img = (np.array(frame.data)-15)*10 |
|
|
|
img = img.astype(np.uint8) |
|
|
|
img2 = (np.array(frame2.data)-15)*10 |
|
|
|
img2 = img2.astype(np.uint8) |
|
|
|
p = np.zeros((8,16), np.uint8) |
|
|
|
for i in range(16): |
|
|
|
if i%2 == 0: |
|
|
|
p[:,i] = img[:,int(i/2)] |
|
|
|
else: |
|
|
|
p[:,i] = img2[:,int(i/2)] |
|
|
|
p = cv2.resize(p, (SIZE,SIZE), interpolation = cv2.INTER_CUBIC) |
|
|
|
offset_w = int(overlap_w/2) |
|
|
|
offset_h = int(overlap_h/2) |
|
|
|
out[SIZE*2+offset_h:SIZE*4-overlap_h+offset_h, SIZE*2+offset_w: SIZE*4-overlap_w+offset_w] = merge |
|
|
|
|
|
|
|
# offset = int(overlap2/2) |
|
|
|
# tmp = np.zeros((SIZE, SIZE*2-overlap2), dtype=np.uint16) |
|
|
|
# tmp[:, :SIZE] = img |
|
|
|
# tmp[:, -SIZE:] += img2 |
|
|
|
# tmp[:, (SIZE-overlap2): SIZE] = tmp[:, (SIZE-overlap2): SIZE]/2 |
|
|
|
# tmp = exponential(tmp, EXPONENTAL_VALUE) |
|
|
|
# out[SIZE*2:, offset: SIZE*2-overlap2+offset] = tmp |
|
|
|
|
|
|
|
|
|
|
|
out = out.astype(np.uint8) |
|
|
|
out = exponential(out, EXPONENTAL_VALUE) |
|
|
|
|
|
|
|
cv2.imshow('sample', out) |
|
|
|
cv2.imshow('p', p) |
|
|
|
videoWriter.write(cv2.cvtColor(out,cv2.COLOR_GRAY2BGR)) |
|
|
|
key = cv2.waitKey(1) |
|
|
|
if key == ord('q'): |
|
|
|
break |
|
|
|
elif key == ord('a'): |
|
|
|
overlap += 1 |
|
|
|
elif key == ord('d'): |
|
|
|
overlap -= 1 |
|
|
|
elif key == ord('c'): |
|
|
|
cv2.imwrite('out.jpg', out) |
|
|
|
try: |
|
|
|
sift = cv2.xfeatures2d.SIFT_create() |
|
|
|
img1 = exponential(imgs[0], EXPONENTAL_VALUE) |
|
|
|
img2 = exponential(imgs[1], EXPONENTAL_VALUE) |
|
|
|
kp_1, desc_1 = sift.detectAndCompute(img1, None) |
|
|
|
kp_2, desc_2 = sift.detectAndCompute(img2, None) |
|
|
|
|
|
|
|
index_params = dict(algorithm=0, trees=5) |
|
|
|
search_params = dict() |
|
|
|
flann = cv2.FlannBasedMatcher(index_params, search_params) |
|
|
|
matches = flann.knnMatch(desc_1, desc_2, k=2) |
|
|
|
|
|
|
|
good_points = [] |
|
|
|
ratio = 0.8 |
|
|
|
for m, n in matches: |
|
|
|
if m.distance < ratio*n.distance: |
|
|
|
good_points.append(m) |
|
|
|
result = cv2.drawMatches(img1, kp_1, img2, kp_2, good_points, None) |
|
|
|
cv2.imshow("result", result) |
|
|
|
print (result.shape) |
|
|
|
siftVideoWriter.write(result) |
|
|
|
except: |
|
|
|
pass |
|
|
|
key = cv2.waitKey(1) |
|
|
|
if key == ord('q'): |
|
|
|
break |
|
|
|
elif key == ord('c'): |
|
|
|
cv2.imwrite('out.jpg', out) |
|
|
|
time.sleep(0.001) |
|
|
|
grideye.stop() |
|
|
|
grideye2.stop() |
|
|
|
videoWriter.release() |
|
|
|
siftVideoWriter.release() |
|
|
|
cv2.destroyAllWindows() |
|
|
|
|