投稿者「mars」のアーカイブ
M42 オリオン大星雲
ISSの太陽面通過の撮影
国際宇宙ステーションが、太陽や月面の前を通過する日時・場所を、ISS TRANSIT FINDERで知ることができます。
11月25日に、自宅から少し離れた河川敷公園で、 国際宇宙ステーションの太陽面の通過を観測できそうなので、器材一式(カメラ、小型赤道儀、三脚)を持って撮影にトライしてみました。 今回のタイミングでは、ISSまでの距離が1600Km以上と、かなり遠いためISSらしい機影までは、確認に至りませんでしたが、撮影までの一連の流れを確認できました。下の画面では縮小されて見ずらいので、youtubeで表示した方が見やすいと思います。ISSは、右下から左上方向に移動します。
課題
カメラの撮影パラメータの適切化(動画/静止画、Iシャッター速度、撮影モード(連写/高速連写など、、、)
撮影器材
おまけ:撮影の準備中に航空機が太陽面を通過する映像を記録できました。
streamlitで流星観測データを表示
# -*- coding: utf-8 -*-
import streamlit as st
import time
import datetime
import os
import glob
import cv2
import re
import numpy as np
from PIL import Image
from datetime import datetime, date, time
PATH='/home/metro//DATA/'
def comp_b2(A,B):
# 比較明合成処理
# https://nyanpyou.hatenablog.com/entry/2020/03/20/132937
#
gray_img1 = cv2.cvtColor(A, cv2.COLOR_BGR2GRAY)
gray_img2 = cv2.cvtColor(B, cv2.COLOR_BGR2GRAY)
#グレースケールの比較で作成したimg1用のマスク(img1の方が明るい画素を示す)
mask_img1 = np.where(gray_img1>gray_img2, 255, 0).astype(np.uint8)
#img2用のマスク(0と255を入れ替え)(img2の方が明るい画素を示す)
mask_img2 = np.where(mask_img1==255, 0, 255).astype(np.uint8)
#作成したマスクを使って元画像から抜き出し
masked_img1 = cv2.bitwise_and(A, A, mask=mask_img1)
masked_img2 = cv2.bitwise_and(B, B, mask=mask_img2)
img3 = masked_img1 + masked_img2
return img3
def disp(device):
n=0
cap = cv2.VideoCapture(device)
W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
W2=int(W/2)
H2=int(H/2)
image_loc = st.empty()
prev=None
while cap.isOpened:
ret, img = cap.read()
if ret:
if W==1920:
img=cv2.resize(img, dsize=(W2, H2))
#time.sleep(0.01)
if prev is None:
prev = img.copy()
else:
E = comp_b2(prev,img)
prev = E
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
image_loc.image(img)
else:
break
cap.release()
image_loc = st.empty()
img = Image.fromarray(cv2.cvtColor(prev, cv2.COLOR_BGR2RGB))
image_loc.image(img)
#st.button('Replay')
def main():
st.header("流星観測データの表示")
col1, col2, col3 = st.columns([1,1,3])
with col1:
date=st.date_input('DATE')
path=PATH+date.strftime("%Y%m%d")
selected=[]
f_name=[]
TL=[]
if os.path.exists(path):
files=glob.glob(path+'/*avi')
# time filter : m[4] is time field.
if files is not(None):
for opt in files:
m=re.split('[_.]',opt)
TL.append(int(int(m[4])/10000))
TL=list(set(TL)) # sortして重複を削除
# 処理対象の時間帯を選択するセレクトBOXの表示
with col2:
selected_item = st.selectbox('TIME',TL)
selT = int(selected_item)
for opt in files:
m=re.split('[_.]',opt)
if len(m)>=4:
t = int(int(m[4])/10000)
#if not(t>60000 and t<180000):
if t==selT:
selected.append(opt)
if selected is not(None):
for name in selected:
f_name.append(name.rsplit('/',1)[1])
with col3:
option = st.selectbox('FILE to DISPLAY',f_name)
if option is not(None):
disp(path+'/'+option)
else:
st.write('No data exists!')
if __name__ == '__main__':
main()
streamlitで動画を再生
# -*- coding: utf-8 -*-
import streamlit as st
import time
import datetime
import os
import glob
import cv2
from PIL import Image
from datetime import datetime, date, time
PATH='/home/mars/pWork/DATA/'
def disp(device):
cap = cv2.VideoCapture(device)
image_loc = st.empty()
while cap.isOpened:
ret, img = cap.read()
if ret:
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
image_loc.image(img)
else:
break
cap.release()
st.button('Replay')
def main():
st.header("流星観測データの表示")
date=st.date_input('Select date')
path=PATH+date.strftime("%Y%m%d")
#st.write(path)
if os.path.exists(path):
files=glob.glob(path+'/*avi')
option = st.selectbox('Select file:',files)
disp(option)
else:
st.write('No data exists!')
if __name__ == '__main__':
main()
ファイルの選択対象を、様々な条件で絞りこむコードを追加したい。
cudaとThread対応のテスト
複数台のネットワークカメラに対応し、cuda(GPU)とThread機能を活用できるバージョン。
import cv2
import numpy as np
import time
import datetime
import os
import sys
GPU=True
THREAD=True
if len(sys.argv)>1:
if sys.argv[1]=='A':
cPATH='rtsp://5173:6703@192.168.68.74/live'
else:
cPATH='rtsp://admin:@192.168.68.128:554/1/h264major'
else:
cPATH='rtsp://5173:6703@192.168.68.74/live'
PATH="/home/mars/pWork/DATA"
#
class ThreadingVideoCapture:
def __init__(self, src, max_queue_size=256):
self.video = cv2.VideoCapture(src)
self.q = queue.Queue(maxsize=max_queue_size)
self.stopped = False
def start(self):
thread = threading.Thread(target=self.update, daemon=True)
thread.start()
return self
def update(self):
while True:
if self.stopped:
return
if not self.q.full():
ok, frame = self.video.read()
self.q.put((ok, frame))
if not ok:
self.stop()
return
def read(self):
return self.q.get()
def stop(self):
self.stopped = True
def release(self):
self.stopped = True
self.video.release()
def isOpened(self):
return self.video.isOpened()
def get(self, i):
return self.video.get(i)
def key(k):
global th, tc,track,reverse
if k == ord('2'):
th = th - 1
elif k == ord('3'):
th = th + 1
elif k == ord('4'):
tc = tc -5
elif k == ord('5'):
tc = tc +5
elif k == ord('t'):
track = not track
elif k == ord('r'):
reverse = not reverse
fontFace =cv2.FONT_HERSHEY_SIMPLEX
track, reverse = False,False
avg=None
writer = None
th = 30
tc = 30
x,y=0,0
detect_counts = 0
red,blue,green = (0,0,255),(255,0,0),(0,255,0)
time_start = time.time()
frame=0
log=PATH+'/metro.log'
if cPATH=='rtsp://5173:6703@192.168.68.74/live':
TITLE="ATOM"
HEAD ='ATOM'
else:
TITLE="ONVIF"
HEAD='ONVIF'
if THREAD:
import threading
import queue
TITLE=TITLE+"-T"
capture = ThreadingVideoCapture(cPATH)
capture.start()
if not capture.isOpened():
raise RuntimeError
else:
capture=cv2.VideoCapture(cPATH)
if GPU:
TITLE=TITLE+"-G"
img_gpu_src = cv2.cuda_GpuMat() # Allocate device memory only once, as memory allocation seems to take time...
img_gpu_dst = cv2.cuda_GpuMat()
img_gpu_gray= cv2.cuda_GpuMat()
W = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
H = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2=int(W/2)
H2=int(H/2)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
print('Camera:',cPATH)
print('Size:',W,H)
while(True):
ret, img = capture.read()
if ret:
org = img.copy()
if GPU:
img_gpu_src.upload(img)
img_gpu_dst = cv2.cuda.resize(img_gpu_src, dsize=(W2, H2))
img_gpu_dst = cv2.cuda_GpuMat(img_gpu_dst,[0,int(H2*0.85)],[0,W2])
img_gpu_gray=cv2.cuda.cvtColor(img_gpu_dst,cv2.COLOR_BGR2GRAY)
org_img=img_gpu_dst.download()
gray = img_gpu_gray.download()
else:
org_img = cv2.resize(img, dsize=(W2, H2))
org_img=img[0:int(H2*0.85),0:W2]
gray = cv2.cvtColor(org_img, cv2.COLOR_BGR2GRAY)
if reverse:
gray=cv2.bitwise_not(gray)
avg=cv2.bitwise_not(avg)
if avg is None:
avg = gray.copy().astype("float")
continue
#wtiter,fname = moving(img,avg)
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]
contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
detect=False
for i in range(0,len(contours)):
if len(contours[i]) > 0:
if cv2.contourArea(contours[i]) > tc:
detect=True
time_start = time.time()
if writer is None and track:
detect_counts = 0
now=datetime.datetime.today()
date=now.strftime("%Y%m%d")
cDIR=PATH+'/'+date
if not(os.path.exists(cDIR)):
os.mkdir(cDIR)
fname=cDIR+'/'+ HEAD + now .strftime("%Y%m%d_%H%M%S")+".avi"
writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
rect = contours[i]
x, y, w, h = cv2.boundingRect(rect)
cv2.rectangle(org_img, (x-w, y-h), (x + w*2, y + h*2), red, 3)
if detect:
detect_counts=detect_counts + 1
if time.time() - time_start > 5:
if writer is not None:
writer.release()
new_name=fname.replace(HEAD,HEAD+'_' + f'{detect_counts:04}'+'_')
os.rename(fname,new_name)
frame=0
writer = None
now=datetime.datetime.today()
text=now.strftime("%Y%m%d %H%M%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
org_img = cv2.putText(org_img, text, (30,50), fontFace,1,color=green)
org = cv2.putText(org, text, (30,50), fontFace,1,color=green)
text1="REC:"+str(track) + " reverse:" + str(reverse)
if writer is not None:
frame=frame+1
text1=fname+' '+text1
org_img = cv2.putText(org_img, text1, (30,80), fontFace,1,color=green)
#cv2.imshow('thresh-level',thresh)
cv2.imshow(TITLE,org_img)
if writer is not None:
writer.write(org)
else:
now=datetime.datetime.today()
date=now.strftime("%Y%m%d_%H%M%S")
print('reconnect:',date)
capture.release()
avg=None
if THREAD:
capture = ThreadingVideoCapture(cPATH)
capture.start()
else:
capture = cv2.VideoCapture(cPATH)
k=cv2.waitKey(1) & 0xFF
key(k)
if k== ord('q'):
break
capture.release()
if writer is not None:
writer.release()
new_name=fname.replace(HEAD,HEAD+'_' + f'{detect_counts:04}'+'_')
os.rename(fname,new_name)
cv2.destroyAllWindows()
moon
jetson nanoでcv2(cuda有効化)
ソースからインストール
python3でcv2をimportすると crashしてコアーダンプ。
問題の解決:export OPENBLAS_CORETYPE=ARMV8
こちらを参照して解決。Jetson NanoのPython3環境でIllegal instruction (cpre dumped)
処理速度の比較測定:約3倍
$ sudo nvpmodel -m 0
$ sudo jetson_clocks
$ python3 opencv_cuda.py
CPU = 2.7655137538909913[msec]
GPU = 1.0501614570617677[msec]
1
$ python3 opencv_cuda.py
CPU = 2.7816075325012206[msec]
GPU = 0.9869620561599731[msec]
1
opencv_cuda.py
import sys
import time
import cv2
### VALUES
NUM_REPEAT = 10000
### Read source image
img_src = cv2.imread("resource/lena.jpg")
cv2.imshow('img_src', img_src)
### Run with CPU
time_start = time.time()
for i in range (NUM_REPEAT):
img_dst = cv2.resize(img_src, (300, 300))
time_end = time.time()
print ("CPU = {0}".format((time_end - time_start) * 1000 / NUM_REPEAT) + "[msec]")
cv2.imshow('CPU', img_dst)
### Run with GPU
img_gpu_src = cv2.cuda_GpuMat() # Allocate device memory only once, as memory allocation seems to take time...
img_gpu_dst = cv2.cuda_GpuMat()
time_start = time.time()
for i in range (NUM_REPEAT):
img_gpu_src.upload(img_src)
img_gpu_dst = cv2.cuda.resize(img_gpu_src, (300, 300))
img_dst = img_gpu_dst.download()
time_end = time.time()
print ("GPU = {0}".format((time_end - time_start) * 1000 / NUM_REPEAT) + "[msec]")
cv2.imshow('GPU', img_dst)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
print(cv2.cuda.getCudaEnabledDeviceCount())
cudaで利用できる機能を表示してみる
import cv2
cv2.__version__
dir(cv2.cuda)
['ALPHA_ATOP', 'ALPHA_ATOP_PREMUL', 'ALPHA_IN', 'ALPHA_IN_PREMUL', 'ALPHA_OUT', 'ALPHA_OUT_PREMUL', 'ALPHA_OVER', 'ALPHA_OVER_PREMUL', 'ALPHA_PLUS', 'ALPHA_PLUS_PREMUL', 'ALPHA_PREMUL', 'ALPHA_XOR', 'ALPHA_XOR_PREMUL', 'BroxOpticalFlow_create', 'COLOR_BAYER_BG2BGR_MHT', 'COLOR_BAYER_BG2GRAY_MHT', 'COLOR_BAYER_BG2RGB_MHT', 'COLOR_BAYER_GB2BGR_MHT', 'COLOR_BAYER_GB2GRAY_MHT', 'COLOR_BAYER_GB2RGB_MHT', 'COLOR_BAYER_GR2BGR_MHT', 'COLOR_BAYER_GR2GRAY_MHT', 'COLOR_BAYER_GR2RGB_MHT', 'COLOR_BAYER_RG2BGR_MHT', 'COLOR_BAYER_RG2GRAY_MHT', 'COLOR_BAYER_RG2RGB_MHT', 'COLOR_BayerBG2BGR_MHT', 'COLOR_BayerBG2GRAY_MHT', 'COLOR_BayerBG2RGB_MHT', 'COLOR_BayerGB2BGR_MHT', 'COLOR_BayerGB2GRAY_MHT', 'COLOR_BayerGB2RGB_MHT', 'COLOR_BayerGR2BGR_MHT', 'COLOR_BayerGR2GRAY_MHT', 'COLOR_BayerGR2RGB_MHT', 'COLOR_BayerRG2BGR_MHT', 'COLOR_BayerRG2GRAY_MHT', 'COLOR_BayerRG2RGB_MHT', 'CascadeClassifier_create', 'DEVICE_INFO_COMPUTE_MODE_DEFAULT', 'DEVICE_INFO_COMPUTE_MODE_EXCLUSIVE', 'DEVICE_INFO_COMPUTE_MODE_EXCLUSIVE_PROCESS', 'DEVICE_INFO_COMPUTE_MODE_PROHIBITED', 'DYNAMIC_PARALLELISM', 'DensePyrLKOpticalFlow_create', 'DescriptorMatcher_createBFMatcher', 'DeviceInfo_ComputeModeDefault', 'DeviceInfo_ComputeModeExclusive', 'DeviceInfo_ComputeModeExclusiveProcess', 'DeviceInfo_ComputeModeProhibited', 'EVENT_BLOCKING_SYNC', 'EVENT_DEFAULT', 'EVENT_DISABLE_TIMING', 'EVENT_INTERPROCESS', 'Event_BLOCKING_SYNC', 'Event_DEFAULT', 'Event_DISABLE_TIMING', 'Event_INTERPROCESS', 'Event_elapsedTime', 'FEATURE_SET_COMPUTE_10', 'FEATURE_SET_COMPUTE_11', 'FEATURE_SET_COMPUTE_12', 'FEATURE_SET_COMPUTE_13', 'FEATURE_SET_COMPUTE_20', 'FEATURE_SET_COMPUTE_21', 'FEATURE_SET_COMPUTE_30', 'FEATURE_SET_COMPUTE_32', 'FEATURE_SET_COMPUTE_35', 'FEATURE_SET_COMPUTE_50', 'FarnebackOpticalFlow_create', 'FastFeatureDetector_create', 'GLOBAL_ATOMICS', 'GpuMat_defaultAllocator', 'GpuMat_setDefaultAllocator', 'HOG_create', 'HOST_MEM_PAGE_LOCKED', 'HOST_MEM_SHARED', 'HOST_MEM_WRITE_COMBINED', 'HostMem_PAGE_LOCKED', 'HostMem_SHARED', 'HostMem_WRITE_COMBINED', 'NATIVE_DOUBLE', 'NVIDIA_OPTICAL_FLOW_1_0_NV_OF_PERF_LEVEL_FAST', 'NVIDIA_OPTICAL_FLOW_1_0_NV_OF_PERF_LEVEL_MAX', 'NVIDIA_OPTICAL_FLOW_1_0_NV_OF_PERF_LEVEL_MEDIUM', 'NVIDIA_OPTICAL_FLOW_1_0_NV_OF_PERF_LEVEL_SLOW', 'NVIDIA_OPTICAL_FLOW_1_0_NV_OF_PERF_LEVEL_UNDEFINED', 'NvidiaOpticalFlow_1_0_NV_OF_PERF_LEVEL_FAST', 'NvidiaOpticalFlow_1_0_NV_OF_PERF_LEVEL_MAX', 'NvidiaOpticalFlow_1_0_NV_OF_PERF_LEVEL_MEDIUM', 'NvidiaOpticalFlow_1_0_NV_OF_PERF_LEVEL_SLOW', 'NvidiaOpticalFlow_1_0_NV_OF_PERF_LEVEL_UNDEFINED', 'NvidiaOpticalFlow_1_0_create', 'ORB_create', 'OpticalFlowDual_TVL1_create', 'SHARED_ATOMICS', 'SURF_CUDA_ANGLE_ROW', 'SURF_CUDA_HESSIAN_ROW', 'SURF_CUDA_LAPLACIAN_ROW', 'SURF_CUDA_OCTAVE_ROW', 'SURF_CUDA_ROWS_COUNT', 'SURF_CUDA_SIZE_ROW', 'SURF_CUDA_X_ROW', 'SURF_CUDA_Y_ROW', 'SparsePyrLKOpticalFlow_create', 'StereoBeliefPropagation_estimateRecommendedParams', 'StereoConstantSpaceBP_estimateRecommendedParams', 'Stream_Null', 'TargetArchs_has', 'TargetArchs_hasBin', 'TargetArchs_hasEqualOrGreater', 'TargetArchs_hasEqualOrGreaterBin', 'TargetArchs_hasEqualOrGreaterPtx', 'TargetArchs_hasEqualOrLessPtx', 'TargetArchs_hasPtx', 'WARP_SHUFFLE_FUNCTIONS', '__doc__', '__loader__', '__name__', '__package__', '__spec__', 'abs', 'absSum', 'absdiff', 'add', 'addWeighted', 'alphaComp', 'bilateralFilter', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blendLinear', 'buildWarpAffineMaps', 'buildWarpPerspectiveMaps', 'calcAbsSum', 'calcHist', 'calcNorm', 'calcNormDiff', 'calcSqrSum', 'calcSum', 'cartToPolar', 'compare', 'copyMakeBorder', 'countNonZero', 'createBackgroundSubtractorMOG', 'createBackgroundSubtractorMOG2', 'createBoxFilter', 'createBoxMaxFilter', 'createBoxMinFilter', 'createCLAHE', 'createCannyEdgeDetector', 'createColumnSumFilter', 'createContinuous', 'createConvolution', 'createDFT', 'createDerivFilter', 'createDisparityBilateralFilter', 'createGaussianFilter', 'createGeneralizedHoughBallard', 'createGeneralizedHoughGuil', 'createGoodFeaturesToTrackDetector', 'createHarrisCorner', 'createHoughCirclesDetector', 'createHoughLinesDetector', 'createHoughSegmentDetector', 'createLaplacianFilter', 'createLinearFilter', 'createLookUpTable', 'createMedianFilter', 'createMinEigenValCorner', 'createMorphologyFilter', 'createRowSumFilter', 'createScharrFilter', 'createSeparableLinearFilter', 'createSobelFilter', 'createStereoBM', 'createStereoBeliefPropagation', 'createStereoConstantSpaceBP', 'createTemplateMatching', 'cvtColor', 'demosaicing', 'dft', 'divide', 'drawColorDisp', 'ensureSizeIsEnough', 'equalizeHist', 'evenLevels', 'exp', 'findMinMax', 'findMinMaxLoc', 'flip', 'gammaCorrection', 'gemm', 'getCudaEnabledDeviceCount', 'getDevice', 'histEven', 'histRange', 'integral', 'log', 'magnitude', 'magnitudeSqr', 'max', 'meanShiftFiltering', 'meanShiftProc', 'meanShiftSegmentation', 'meanStdDev', 'merge', 'min', 'minMax', 'minMaxLoc', 'mulAndScaleSpectrums', 'mulSpectrums', 'multiply', 'norm', 'normalize', 'phase', 'polarToCart', 'pow', 'printCudaDeviceInfo', 'printShortCudaDeviceInfo', 'pyrDown', 'pyrUp', 'rectStdDev', 'reduce', 'registerPageLocked', 'remap', 'reprojectImageTo3D', 'resetDevice', 'resize', 'rotate', 'setBufferPoolConfig', 'setBufferPoolUsage', 'setDevice', 'split', 'sqr', 'sqrIntegral', 'sqrSum', 'sqrt', 'subtract', 'sum', 'threshold', 'transpose', 'unregisterPageLocked', 'warpAffine', 'warpPerspective']
ATOMcam2の画像から流星を録画
ATOMcam2をベランダへ設置し、RTSP で画像を流す。
処理は、Python(jupyter notebook)のスクリプト。cv2の中の移動検知のライブラリーを利用している。流星の他に、航空機、人工衛星、移動が激しい雲、鳥なども記録されるので、何らかの方法で、フィルタリングしたい。
とりあえず、一定のサイズ以下のファイルを削除するスクリプト(500kBの例)
find . -name "*.avi" -type 'f' -size -500k -delete
import cv2
import numpy as np
import time
import datetime
import os
capture = cv2.VideoCapture('rtsp://4190:2712@192.168.68.74/live')
#capture=cv2.VideoCapture(0)
PATH="/media/mars/ff2880cc-1a99-40bd-88c1-5cdc86fe9eed/home/mars/DATA"
W = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
H = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2=int(W/2)
H2=int(H/2)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
print(W,H)
def key(k):
global th, tc,track,reverse
if k == ord('2'):
th = th - 1
elif k == ord('3'):
th = th + 1
elif k == ord('4'):
tc = tc -5
elif k == ord('5'):
tc = tc +5
elif k == ord('t'):
track = not track
elif k == ord('r'):
reverse = not reverse
fontFace =cv2.FONT_HERSHEY_SIMPLEX
track, reverse = False,False
avg=None
th = 10
tc = 25
x,y=0,0
writer = None
time_start = time.time()
frame=0
while(True):
ret, img = capture.read()
org = img.copy()
#img = cv2.resize(im, dsize=(W2, H2))
img=img[0:int(H*0.9),0:int(W)] # 映像の下側10%を検知範囲から除外。
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if reverse:
gray=cv2.bitwise_not(gray)
avg=cv2.bitwise_not(avg)
if avg is None:
avg = gray.copy().astype("float")
continue
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]
contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
detect=False
for i in range(0,len(contours)):
if len(contours[i]) > 0:
if cv2.contourArea(contours[i]) > tc:
detect=True
time_start = time.time()
if writer is None and track:
now=datetime.datetime.today()
date=now.strftime("%Y%m%d")
cDIR=PATH+'/'+date
if not(os.path.exists(cDIR)):
os.mkdir(cDIR)
fname=cDIR+'/'+'E-'+now.strftime("%Y%m%d_%H:%M:%S")+".avi"
writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
rect = contours[i]
x, y, w, h = cv2.boundingRect(rect)
cv2.rectangle(img, (x-w, y-h), (x + w*2, y + h*2), (0, 0, 255), 2)
if time.time() - time_start > 5:
if writer is not None:
writer.release()
#f.close()
frame=0
writer = None
now=datetime.datetime.today()
text=now.strftime("%Y/%m/%d %H:%M:%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
img = cv2.putText(img, text, (30,50), fontFace,1,color=(0, 255, 0))
org = cv2.putText(org, text, (30,50), fontFace,1,color=(0, 255, 0))
text1="REC:"+str(track) + " reverse:" + str(reverse)
if writer is not None:
frame=frame+1
text1=fname+' '+text1
img = cv2.putText(img, text1, (30,80), fontFace,1,color=(0, 255, 0))
#cv2.imshow('thresh-level',thresh)
cv2.imshow("IMAGE",img)
if writer is not None:
writer.write(org)
k=cv2.waitKey(1) & 0xFF
key(k)
if k== ord('q'):
break
capture.release()
if writer is not None:
writer.release()
cv2.destroyAllWindows()
Thread機能を利用して、処理の効率化を図る。
import cv2
import threading
import queue
import numpy as np
import time
import datetime
import os
cPATH='rtsp://4190:2712@192.168.68.74/live'
#cPATH= 'rtsp://admin:@192.168.68.128:554/1/h264major'
PATH="/home/pi/DATA"
fourcc = cv2.VideoWriter_fourcc(*"XVID")
class ThreadingVideoCapture:
def __init__(self, src, max_queue_size=256):
self.video = cv2.VideoCapture(src)
self.q = queue.Queue(maxsize=max_queue_size)
self.stopped = False
def start(self):
thread = threading.Thread(target=self.update, daemon=True)
thread.start()
return self
def update(self):
while True:
if self.stopped:
return
if not self.q.full():
ok, frame = self.video.read()
self.q.put((ok, frame))
if not ok:
self.stop()
return
def read(self):
return self.q.get()
def stop(self):
self.stopped = True
def release(self):
self.stopped = True
self.video.release()
def isOpened(self):
return self.video.isOpened()
def get(self, i):
return self.video.get(i)
def key(k):
global th, tc,track,reverse,disp
if k == ord('2'):
th = th - 1
elif k == ord('3'):
th = th + 1
elif k == ord('4'):
tc = tc -5
elif k == ord('5'):
tc = tc +5
elif k == ord('t'):
track = not track
elif k == ord('d'):
disp= not disp
elif k == ord('r'):
reverse = not reverse
def detect_mov(contours,detect):
for i in range(0,len(contours)):
if len(contours[i]) > 0:
if cv2.contourArea(contours[i]) > tc:
detect=detect+1
rect = contours[i]
x, y, w, h = cv2.boundingRect(rect)
cv2.rectangle(img, (x-w, y-h), (x + w*2, y + h*2), (0, 0, 255), 3)
return img,detect
fontFace =cv2.FONT_HERSHEY_SIMPLEX
video = ThreadingVideoCapture(cPATH)
video.start()
if not video.isOpened():
raise RuntimeError
W = video.get(cv2.CAP_PROP_FRAME_WIDTH)
H = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2,H2=int(W/2),int(H/2)
cv2.namedWindow('ATOM', cv2.WINDOW_AUTOSIZE)
track, reverse,disp = False,False,False
avg=None
th = 10
tc = 25
x,y=0,0
writer = None
time_start = time.time()
frame=0
fname=None
log=PATH+'/metro.log'
detect=0
while(True):
ret, img = video.read()
if ret:
org = img.copy()
img = cv2.resize(img, dsize=(W2,H2))
img=img[0:int(H2*0.85),0:int(W2)]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if reverse:
gray=cv2.bitwise_not(gray)
avg=cv2.bitwise_not(avg)
if avg is None:
avg = gray.copy().astype("float")
continue
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]
contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img,detect=detect_mov(contours,detect)
if writer is None and track and detect !=0:
time_start = time.time()
now=datetime.datetime.today()
date=now.strftime("%Y%m%d")
cDIR=PATH+'/'+date
if not(os.path.exists(cDIR)):
os.mkdir(cDIR)
fname=cDIR+'/'+'E-'+now .strftime("%Y%m%d_%H%M%S")+".avi"
writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
if time.time() - time_start > 5:
if writer is not None:
writer.release()
#f.close()
frame=0
writer = None
now=datetime.datetime.today()
text=now.strftime("%Y%m%d %H%M%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
img = cv2.putText(img, text, (30,50), fontFace,1,color=(0, 255, 0))
org = cv2.putText(org, text, (30,50), fontFace,1,color=(0, 255, 0))
text1="REC:"+str(track) + " reverse:" + str(reverse)
if writer is not None:
frame=frame+1
text1=fname+' '+text1
img = cv2.putText(img, text1, (30,80), fontFace,1,color=(0, 255, 0))
#cv2.imshow('thresh-level',thresh)
if disp:
cv2.imshow("ATOM",img)
if writer is not None:
writer.write(org)
else:
now=datetime.datetime.today()
date=now.strftime("%Y%m%d_%H%M%S")
print("disconected:",date)
video.release()
avg = None
video = ThreadingVideoCapture(cPATH)
video.start()
k=cv2.waitKey(int(1000 / 30)) & 0xFF
key(k)
if k== ord('q'):
break
video.release()
if writer is not None:
writer.release()
cv2.destroyAllWindows()
print('Done.')
2重星団
ペルセウス座にある散開星団。カシオペヤ座との境界の近く、天の川のほぼ中央に位置している。2つの散開星団が近接しているためこの名で呼ばれる。NGCカタログ/Melカタログでのカタログ番号は、西側の星団が NGC 869/ Mel 13、東側の星団が NGC 884/ Mel 14。