555
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

247 lines
10 KiB

  1. import json
  2. class Frame():
  3. def __init__(self, time, data):
  4. self.time = time
  5. self.data = data
  6. class GridEyeData():
  7. def __init__(self, filePath):
  8. self.f = open(filePath, 'r')
  9. self.frames = [None]*4
  10. def readFrame(self):
  11. time = self.f.readline()
  12. if not time:
  13. return False
  14. time = float(time)
  15. for i in range(4):
  16. data = json.loads(self.f.readline())
  17. self.frames[i] = Frame(time, data)
  18. return True
  19. if __name__ == '__main__':
  20. import cv2
  21. import numpy as np
  22. import sys
  23. from functools import reduce
  24. def exponential(img, value):
  25. tmp = cv2.pow(img.astype(np.double), value)*(255.0/(255.0**value))
  26. return tmp.astype(np.uint8)
  27. SIZE = 128
  28. AVERAGE_FRAME = 10
  29. distanceBetweenSensors_w = 2.6 #cm
  30. distanceBetweenSensors_h = 2.6 #cm
  31. distance2Object = 60.0 #cm
  32. ADJUST_BACK = 5
  33. EXPONENTAL_VALUE = 0.4
  34. PRODUCTION_THRESHOLD = 10
  35. MIN_EXIST_TIME = 0.1
  36. cnt = 0
  37. avers = []
  38. raw_aver = np.array([0]*64*4, np.float64)
  39. raw_aver2 = np.array([0]*64*4, np.float64)
  40. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  41. videoWriter = cv2.VideoWriter('output.avi', fourcc, 10.0, (SIZE*2,SIZE*4))
  42. cv2.imshow('sample', np.zeros((SIZE*4,SIZE*2), np.uint8))
  43. gridEye = GridEyeData(sys.argv[1])
  44. hasLastFrame = False
  45. hasPos = False
  46. innerHasPos = False
  47. endTime = 0
  48. startTime = 0
  49. innerEndTime = 0
  50. innerStartTime = 0
  51. path = []
  52. speed = 0
  53. avers.append(np.zeros((SIZE,SIZE), np.uint16))
  54. avers.append(np.zeros((SIZE,SIZE), np.uint16))
  55. avers.append(np.zeros((SIZE,SIZE), np.uint16))
  56. avers.append(np.zeros((SIZE,SIZE), np.uint16))
  57. while gridEye.readFrame():
  58. frames = gridEye.frames
  59. imgs = []
  60. raw = []
  61. for frame in frames:
  62. img = (np.array(frame.data)-15)*10
  63. img = cv2.resize(img.astype(np.uint8), (SIZE,SIZE), interpolation = cv2.INTER_LINEAR) # INTER_LINEAR, INTER_CUBIC
  64. imgs.append(img)
  65. raw += reduce(lambda x,y: x+y, frame.data)
  66. raw_aver += np.array(raw)
  67. raw_aver2 += np.array(raw)**2
  68. if cnt < AVERAGE_FRAME:
  69. cnt += 1
  70. for i in range(len(imgs)):
  71. avers[i] += imgs[i]
  72. if cnt == AVERAGE_FRAME:
  73. b = (raw_aver/AVERAGE_FRAME)**2
  74. a = raw_aver2/AVERAGE_FRAME
  75. print ('aver', raw_aver/AVERAGE_FRAME)
  76. print ((a-b)**0.5)
  77. print (sum((a-b)**0.5)/64/4)
  78. exit()
  79. for i in range(len(avers)):
  80. avers[i] = avers[i]/AVERAGE_FRAME
  81. avers[i] = avers[i].astype(np.uint8)
  82. avers[i] += ADJUST_BACK
  83. continue
  84. for i in range(len(imgs)):
  85. imgs[i] = cv2.subtract(imgs[i], avers[i])
  86. out = np.full((SIZE*4, SIZE*2), 255, dtype=np.uint16)
  87. out[:SIZE, :SIZE] = imgs[0]
  88. out[:SIZE, SIZE:SIZE*2] = imgs[1]
  89. out[SIZE:SIZE*2, :SIZE] = imgs[2]
  90. out[SIZE:SIZE*2, SIZE:SIZE*2] = imgs[3]
  91. # production
  92. '''
  93. maxProduct = 0
  94. overlap_w = 0
  95. for i in range(80, 128):
  96. product = sum(imgs[0][:,SIZE-i:].astype(np.uint32)*imgs[1][:,:i].astype(np.uint32))
  97. product += sum(imgs[2][:,SIZE-i:].astype(np.uint32)*imgs[3][:,:i].astype(np.uint32))
  98. product = sum(product) / len(product)
  99. if product > maxProduct:
  100. maxProduct = product
  101. overlap_w = i
  102. tmp = maxProduct
  103. maxProduct = 0
  104. overlap_h = 0
  105. for i in range(80, 128):
  106. product = sum(imgs[0][SIZE-i:, :].astype(np.uint32)*imgs[2][:i,:].astype(np.uint32))
  107. product += sum(imgs[1][SIZE-i:, :].astype(np.uint32)*imgs[3][:i,:].astype(np.uint32))
  108. product = sum(product) / len(product)
  109. if product > maxProduct:
  110. maxProduct = product
  111. overlap_h = i
  112. maxProduct = (tmp + maxProduct)/2
  113. # fixed overlap_h
  114. '''
  115. maxProduct = 0
  116. overlaps = 125
  117. overlap_w = overlaps
  118. overlap_h = overlaps
  119. '''
  120. product = sum(imgs[0][:,SIZE-overlaps:].astype(np.uint32)*imgs[1][:,:overlaps].astype(np.uint32))
  121. product += sum(imgs[2][:,SIZE-overlaps:].astype(np.uint32)*imgs[3][:,:overlaps].astype(np.uint32))
  122. product = sum(product) / len(product)
  123. maxProduct = product
  124. tmp = maxProduct
  125. maxProduct = 0
  126. product = sum(imgs[0][SIZE-overlaps:, :].astype(np.uint32)*imgs[2][:overlaps,:].astype(np.uint32))
  127. product += sum(imgs[1][SIZE-overlaps:, :].astype(np.uint32)*imgs[3][:overlaps,:].astype(np.uint32))
  128. product = sum(product) / len(product)
  129. maxProduct = product
  130. maxProduct = (tmp + maxProduct)/2
  131. '''
  132. #if maxProduct > PRODUCTION_THRESHOLD:
  133. if True:
  134. tmp = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
  135. tmp[:, :SIZE] = imgs[0]
  136. tmp[:, -SIZE:] += imgs[1]
  137. tmp[:, (SIZE-overlap_w): SIZE] = tmp[:, (SIZE-overlap_w): SIZE]/2
  138. tmp2 = np.zeros((SIZE, SIZE*2-overlap_w), dtype=np.uint16)
  139. tmp2[:, :SIZE] = imgs[2]
  140. tmp2[:, -SIZE:] += imgs[3]
  141. tmp2[:, (SIZE-overlap_w): SIZE] = tmp2[:, (SIZE-overlap_w): SIZE]/2
  142. merge = np.zeros((SIZE*2-overlap_h, SIZE*2-overlap_w), dtype=np.uint16)
  143. merge[:SIZE, :] = tmp
  144. merge[-SIZE:, :] += tmp2
  145. merge[(SIZE-overlap_h):SIZE, :] = merge[(SIZE-overlap_h):SIZE, :]/2
  146. offset_w = int(overlap_w/2)
  147. offset_h = int(overlap_h/2)
  148. out[SIZE*2+offset_h:SIZE*4-overlap_h+offset_h, offset_w: SIZE*2-overlap_w+offset_w] = merge
  149. '''
  150. position = [0,0]
  151. rows,cols = merge.shape
  152. for i in range(rows):
  153. for j in range(cols):
  154. position[0] += i*merge[i][j]
  155. position[1] += j*merge[i][j]
  156. position[0] /= sum(sum(merge))
  157. position[1] /= sum(sum(merge))
  158. pos_w = 1.17*position[0] #distanceBetweenSensors_w/(SIZE-overlap_w)*position[0]
  159. pos_h = 1.17*position[1] #distanceBetweenSensors_h/(SIZE-overlap_h)*position[1]
  160. if not hasPos:
  161. startPos = [pos_w, pos_h]
  162. sp = position
  163. path = []
  164. truePath = []
  165. times = []
  166. startTime = frames[0].time
  167. hasPos = True
  168. if not innerHasPos and pos_w >= 16 and pos_w <= 109 and pos_h >= 16 and pos_h <= 109:
  169. innerStartPos = [pos_w, pos_h]
  170. innerStartTime = frames[0].time
  171. innerHasPos = True
  172. if pos_w >= 16 and pos_w <= 109 and pos_h >= 16 and pos_h <= 109:
  173. innerEndPos = [pos_w, pos_h]
  174. innerEndTime = frames[0].time
  175. elif innerHasPos:
  176. if innerEndTime - innerStartTime > 0:
  177. print (innerStartPos, innerEndPos)
  178. print ('inner speed:', ((innerEndPos[0]-innerStartPos[0])**2+(innerEndPos[1]-innerStartPos[1])**2)**0.5/(innerEndTime - innerStartTime))
  179. print ('time:', innerEndTime-innerStartTime)
  180. innerHasPos = False
  181. endPos = [pos_w, pos_h]
  182. endTime = frames[0].time
  183. path.append(position)
  184. truePath.append(endPos)
  185. times.append(frames[0].time)
  186. '''
  187. elif hasPos:
  188. if endTime - startTime > 0:
  189. print (startPos, endPos)
  190. print ('speed:', ((endPos[0]-startPos[0])**2+(endPos[1]-startPos[1])**2)**0.5/(endTime - startTime))
  191. print ('time:', endTime-startTime)
  192. if innerHasPos and innerEndTime - innerStartTime > 0:
  193. print (innerStartPos, innerEndPos)
  194. print ('inner speed:', ((innerEndPos[0]-innerStartPos[0])**2+(innerEndPos[1]-innerStartPos[1])**2)**0.5/(innerEndTime - innerStartTime))
  195. print ('time:', innerEndTime-innerStartTime)
  196. hasPos = False
  197. innerHasPos = False
  198. out = out.astype(np.uint8)
  199. out = exponential(out, EXPONENTAL_VALUE)
  200. out = cv2.cvtColor(out,cv2.COLOR_GRAY2BGR)
  201. '''
  202. if endTime - startTime > MIN_EXIST_TIME:
  203. speed = ((endPos[0]-startPos[0])**2+(endPos[1]-startPos[1])**2)**0.5/(endTime - startTime)
  204. cv2.putText(out, f'{speed:.2f}',
  205. (0, SIZE*2),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
  206. speed = ((truePath[-1][0]-truePath[-2][0])**2+(truePath[-1][1]-truePath[-2][1])**2)**0.5/(times[-1] - times[-2])
  207. cv2.putText(out, f'{speed:.2f}',
  208. (0, SIZE*2+30),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
  209. if maxProduct > PRODUCTION_THRESHOLD:
  210. cv2.circle(out, (offset_w+int(position[1]), SIZE*2+offset_h+int(position[0])), 10, (255,0,0), 5)
  211. cv2.circle(out, (offset_w+int(sp[1]), SIZE*2+offset_h+int(sp[0])), 10, (0,255,0), 5)
  212. for i in range(len(path)-1):
  213. cv2.line(out, (offset_w+int(path[i][1]), SIZE*2+offset_h+int(path[i][0])), (offset_w+int(path[i+1][1]), SIZE*2+offset_h+int(path[i+1][0])), (0,0,255))
  214. cv2.line
  215. lastFrame = out[SIZE*2:,:]
  216. hasLastFrame = True
  217. elif hasLastFrame:
  218. out[SIZE*2:,:] = lastFrame
  219. '''
  220. cv2.imshow('sample', out)
  221. videoWriter.write(out)
  222. key = cv2.waitKey(1)
  223. if key == ord('q'):
  224. break
  225. videoWriter.release()
  226. cv2.destroyAllWindows()