天文」カテゴリーアーカイブ

cudaとThread対応のテスト

複数台のネットワークカメラに対応し、cuda(GPU)とThread機能を活用できるバージョン。

import cv2
import numpy as np
import time
import datetime
import os
import sys
GPU=True
THREAD=True
if len(sys.argv)>1:
    if sys.argv[1]=='A':
        cPATH='rtsp://5173:6703@192.168.68.74/live'
    else:
        cPATH='rtsp://admin:@192.168.68.128:554/1/h264major'
else:
        cPATH='rtsp://5173:6703@192.168.68.74/live'
PATH="/home/mars/pWork/DATA"
#

class ThreadingVideoCapture:
    def __init__(self, src, max_queue_size=256):
        self.video = cv2.VideoCapture(src)
        self.q = queue.Queue(maxsize=max_queue_size)
        self.stopped = False

    def start(self):
        thread = threading.Thread(target=self.update, daemon=True)
        thread.start()
        return self

    def update(self):
        while True:
            if self.stopped:
                return
            if not self.q.full():
                ok, frame = self.video.read()
                self.q.put((ok, frame))
                if not ok:
                    self.stop()
                    return

    def read(self):
        return self.q.get()

    def stop(self):
        self.stopped = True

    def release(self):
        self.stopped = True
        self.video.release()

    def isOpened(self):
        return self.video.isOpened()

    def get(self, i):
        return self.video.get(i)

def key(k):
    global th, tc,track,reverse
    if k == ord('2'):
        th = th - 1
    elif k == ord('3'):
        th = th + 1
    elif k == ord('4'):
        tc = tc -5
    elif k == ord('5'):
        tc = tc +5
    elif k == ord('t'):
        track = not track
    elif k == ord('r'):
        reverse = not reverse

fontFace =cv2.FONT_HERSHEY_SIMPLEX
track, reverse = False,False
avg=None
writer = None
th = 30
tc = 30
x,y=0,0
detect_counts = 0
red,blue,green = (0,0,255),(255,0,0),(0,255,0)
time_start = time.time()
frame=0
log=PATH+'/metro.log'
if cPATH=='rtsp://5173:6703@192.168.68.74/live':
    TITLE="ATOM"
    HEAD ='ATOM'
else:
    TITLE="ONVIF"
    HEAD='ONVIF'

if THREAD:
    import threading
    import queue
    TITLE=TITLE+"-T"
    capture = ThreadingVideoCapture(cPATH)
    capture.start()
    if not capture.isOpened():
        raise RuntimeError
else:
    capture=cv2.VideoCapture(cPATH)

if GPU:
    TITLE=TITLE+"-G"
    img_gpu_src = cv2.cuda_GpuMat() # Allocate device memory only once, as memory allocation seems to take time...
    img_gpu_dst = cv2.cuda_GpuMat()
    img_gpu_gray= cv2.cuda_GpuMat()

W = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
H = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2=int(W/2)
H2=int(H/2)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
print('Camera:',cPATH)
print('Size:',W,H)
while(True):
    ret, img = capture.read()
    if ret:
        org = img.copy()
        if GPU:
            img_gpu_src.upload(img)
            img_gpu_dst = cv2.cuda.resize(img_gpu_src, dsize=(W2, H2))
            img_gpu_dst = cv2.cuda_GpuMat(img_gpu_dst,[0,int(H2*0.85)],[0,W2])
            img_gpu_gray=cv2.cuda.cvtColor(img_gpu_dst,cv2.COLOR_BGR2GRAY)
            org_img=img_gpu_dst.download()
            gray = img_gpu_gray.download()
        else:
            org_img = cv2.resize(img, dsize=(W2, H2))
            org_img=img[0:int(H2*0.85),0:W2]
            gray = cv2.cvtColor(org_img, cv2.COLOR_BGR2GRAY)
        if reverse:
            gray=cv2.bitwise_not(gray)
            avg=cv2.bitwise_not(avg)
        if avg is None:
            avg = gray.copy().astype("float")
            continue

        #wtiter,fname = moving(img,avg)
        cv2.accumulateWeighted(gray, avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
        thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]

        contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        detect=False
        for i in range(0,len(contours)):
            if len(contours[i]) > 0:
                 if cv2.contourArea(contours[i]) > tc:
                    detect=True
                    time_start = time.time()
                    if writer is None and track:
                        detect_counts = 0
                        now=datetime.datetime.today()
                        date=now.strftime("%Y%m%d")
                        cDIR=PATH+'/'+date
                        if not(os.path.exists(cDIR)):
                            os.mkdir(cDIR)
                        fname=cDIR+'/'+ HEAD + now .strftime("%Y%m%d_%H%M%S")+".avi"
                        writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
                    rect = contours[i]
                    x, y, w, h = cv2.boundingRect(rect)
                    cv2.rectangle(org_img, (x-w, y-h), (x + w*2, y + h*2), red, 3)
        if detect:
            detect_counts=detect_counts + 1
        if time.time() -  time_start  > 5:
            if writer is not None:
                writer.release()
                new_name=fname.replace(HEAD,HEAD+'_' + f'{detect_counts:04}'+'_')
                os.rename(fname,new_name)
                frame=0
                writer = None
        now=datetime.datetime.today()
        text=now.strftime("%Y%m%d %H%M%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
        org_img = cv2.putText(org_img, text, (30,50), fontFace,1,color=green)
        org = cv2.putText(org, text, (30,50), fontFace,1,color=green)
        text1="REC:"+str(track) + "  reverse:" + str(reverse)
        if writer is not None:
            frame=frame+1
            text1=fname+' '+text1
        org_img = cv2.putText(org_img, text1, (30,80), fontFace,1,color=green)
        #cv2.imshow('thresh-level',thresh)

        cv2.imshow(TITLE,org_img)

        if writer is not None:
            writer.write(org)
    else:
        now=datetime.datetime.today()
        date=now.strftime("%Y%m%d_%H%M%S")
        print('reconnect:',date)
        capture.release()
        avg=None
        if THREAD:
            capture = ThreadingVideoCapture(cPATH)
            capture.start()
        else:
            capture = cv2.VideoCapture(cPATH)
    k=cv2.waitKey(1) & 0xFF
    key(k)
    if k== ord('q'):
        break

capture.release()
if writer is not None:
    writer.release()
    new_name=fname.replace(HEAD,HEAD+'_' + f'{detect_counts:04}'+'_')
    os.rename(fname,new_name)
cv2.destroyAllWindows()

ATOMcam2の画像から流星を録画

ATOMcam2をベランダへ設置し、RTSP
で画像を流す。
小規模な火球をキャッチ(左上)。動画を拡大すると、画面上部中央に、おおぐま座(北斗七星)の一部が写っている。

処理は、Python(jupyter notebook)のスクリプト。cv2の中の移動検知のライブラリーを利用している。流星の他に、航空機、人工衛星、移動が激しい雲、鳥なども記録されるので、何らかの方法で、フィルタリングしたい。

とりあえず、一定のサイズ以下のファイルを削除するスクリプト(500kBの例)

find . -name "*.avi" -type 'f' -size -500k -delete
import cv2
import numpy as np
import time
import datetime
import os

capture = cv2.VideoCapture('rtsp://4190:2712@192.168.68.74/live')
#capture=cv2.VideoCapture(0)
PATH="/media/mars/ff2880cc-1a99-40bd-88c1-5cdc86fe9eed/home/mars/DATA"
W = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
H = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2=int(W/2)
H2=int(H/2)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
print(W,H)

def key(k):
    global th, tc,track,reverse
    if k == ord('2'):
        th = th - 1
    elif k == ord('3'):
        th = th + 1
    elif k == ord('4'):
        tc = tc -5
    elif k == ord('5'):
        tc = tc +5
    elif k == ord('t'):
        track = not track
    elif k == ord('r'):
        reverse = not reverse

fontFace =cv2.FONT_HERSHEY_SIMPLEX
track, reverse = False,False
avg=None
th = 10
tc = 25
x,y=0,0
writer = None
time_start = time.time()
frame=0
while(True):
    ret, img = capture.read()
    org = img.copy()
    #img = cv2.resize(im, dsize=(W2, H2))
    img=img[0:int(H*0.9),0:int(W)]    # 映像の下側10%を検知範囲から除外。
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    if reverse:
        gray=cv2.bitwise_not(gray)
        avg=cv2.bitwise_not(avg)
    if avg is None:
        avg = gray.copy().astype("float")
        continue

    cv2.accumulateWeighted(gray, avg, 0.5)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
    thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]
    contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    detect=False
    for i in range(0,len(contours)):
        if len(contours[i]) > 0:
             if cv2.contourArea(contours[i]) > tc:
                detect=True
                time_start = time.time()
                if writer is None and track:
                    now=datetime.datetime.today()
                    date=now.strftime("%Y%m%d")
                    cDIR=PATH+'/'+date
                    if not(os.path.exists(cDIR)):
                           os.mkdir(cDIR)
                    fname=cDIR+'/'+'E-'+now.strftime("%Y%m%d_%H:%M:%S")+".avi"
                    writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
                rect = contours[i]
                x, y, w, h = cv2.boundingRect(rect)

                cv2.rectangle(img, (x-w, y-h), (x + w*2, y + h*2), (0, 0, 255), 2)
                
    if time.time() -  time_start  > 5:
        if writer is not None:
            writer.release()
            #f.close()
            frame=0
            writer = None
    now=datetime.datetime.today()    
    text=now.strftime("%Y/%m/%d %H:%M:%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
    img = cv2.putText(img, text, (30,50), fontFace,1,color=(0, 255, 0))
    org = cv2.putText(org, text, (30,50), fontFace,1,color=(0, 255, 0))
    text1="REC:"+str(track) + "  reverse:" + str(reverse)
    if writer is not None:
        frame=frame+1
        text1=fname+' '+text1
    img = cv2.putText(img, text1, (30,80), fontFace,1,color=(0, 255, 0))
    #cv2.imshow('thresh-level',thresh)
    cv2.imshow("IMAGE",img)
    if writer is not None:
        writer.write(org)
        
    k=cv2.waitKey(1) & 0xFF
    key(k)
    if k== ord('q'):
        break

capture.release()
if writer is not None:
    writer.release()
cv2.destroyAllWindows()

Thread機能を利用して、処理の効率化を図る。

import cv2
import threading
import queue
import numpy as np
import time
import datetime
import os

cPATH='rtsp://4190:2712@192.168.68.74/live'
#cPATH= 'rtsp://admin:@192.168.68.128:554/1/h264major'
PATH="/home/pi/DATA"

fourcc = cv2.VideoWriter_fourcc(*"XVID")

class ThreadingVideoCapture:
    def __init__(self, src, max_queue_size=256):
        self.video = cv2.VideoCapture(src)
        self.q = queue.Queue(maxsize=max_queue_size)
        self.stopped = False

    def start(self):
        thread = threading.Thread(target=self.update, daemon=True)
        thread.start()
        return self

    def update(self):
        while True:
            if self.stopped:
                return
            if not self.q.full():
                ok, frame = self.video.read()
                self.q.put((ok, frame))
                if not ok:
                    self.stop()
                    return

    def read(self):
        return self.q.get()

    def stop(self):
        self.stopped = True

    def release(self):
        self.stopped = True
        self.video.release()

    def isOpened(self):
        return self.video.isOpened()

    def get(self, i):
        return self.video.get(i)
    
def key(k):
    global th, tc,track,reverse,disp
    if k == ord('2'):
        th = th - 1
    elif k == ord('3'):
        th = th + 1
    elif k == ord('4'):
        tc = tc -5
    elif k == ord('5'):
        tc = tc +5
    elif k == ord('t'):
        track = not track
    elif k == ord('d'):
        disp= not disp
    elif k == ord('r'):
        reverse = not reverse

def detect_mov(contours,detect):
    for i in range(0,len(contours)):
            if len(contours[i]) > 0:
                 if cv2.contourArea(contours[i]) > tc:
                    detect=detect+1
                    rect = contours[i]
                    x, y, w, h = cv2.boundingRect(rect)
                    cv2.rectangle(img, (x-w, y-h), (x + w*2, y + h*2), (0, 0, 255), 3)
    return img,detect
                    
fontFace =cv2.FONT_HERSHEY_SIMPLEX

video = ThreadingVideoCapture(cPATH)
video.start()
if not video.isOpened():
    raise RuntimeError

W = video.get(cv2.CAP_PROP_FRAME_WIDTH)
H = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
W2,H2=int(W/2),int(H/2)
cv2.namedWindow('ATOM', cv2.WINDOW_AUTOSIZE)

track, reverse,disp = False,False,False
avg=None
th = 10
tc = 25
x,y=0,0
writer = None
time_start = time.time()
frame=0
fname=None
log=PATH+'/metro.log'
detect=0
while(True):
    ret, img = video.read()
    if ret:
        org = img.copy()
        img = cv2.resize(img, dsize=(W2,H2))
        img=img[0:int(H2*0.85),0:int(W2)]
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if reverse:
            gray=cv2.bitwise_not(gray)
            avg=cv2.bitwise_not(avg)
        if avg is None:
            avg = gray.copy().astype("float")
            continue

        cv2.accumulateWeighted(gray, avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
        thresh = cv2.threshold(frameDelta, th, 255, cv2.THRESH_BINARY)[1]

        contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        img,detect=detect_mov(contours,detect)
        if writer is None and track and detect !=0:
            time_start = time.time()
            now=datetime.datetime.today()
            date=now.strftime("%Y%m%d")
            cDIR=PATH+'/'+date
            if not(os.path.exists(cDIR)):
                os.mkdir(cDIR)
            fname=cDIR+'/'+'E-'+now .strftime("%Y%m%d_%H%M%S")+".avi"
            writer = cv2.VideoWriter(fname, fourcc, 15, (int(W), int(H)))
            
        if time.time() -  time_start  > 5:
            if writer is not None:
                writer.release()
                #f.close()
                frame=0
                writer = None
        now=datetime.datetime.today()    
        text=now.strftime("%Y%m%d %H%M%S")+' No:'+str(frame)+ ' '+" TH:"+str(th)+" SZ:"+str(tc)
        img = cv2.putText(img, text, (30,50), fontFace,1,color=(0, 255, 0))
        org = cv2.putText(org, text, (30,50), fontFace,1,color=(0, 255, 0))
        text1="REC:"+str(track) + "  reverse:" + str(reverse)
        if writer is not None:
            frame=frame+1
            text1=fname+' '+text1
        img = cv2.putText(img, text1, (30,80), fontFace,1,color=(0, 255, 0))
        #cv2.imshow('thresh-level',thresh)
        if disp:
            cv2.imshow("ATOM",img)
        
        if writer is not None:
            writer.write(org)
    else:
        now=datetime.datetime.today()
        date=now.strftime("%Y%m%d_%H%M%S")
        print("disconected:",date)
        video.release()
        avg = None
        video = ThreadingVideoCapture(cPATH)
        video.start()
    k=cv2.waitKey(int(1000 / 30)) & 0xFF
    key(k)
    if k== ord('q'):
        break

video.release()
if writer is not None:
    writer.release()
cv2.destroyAllWindows()
print('Done.')

2重星団

ペルセウス座にある散開星団。カシオペヤ座との境界の近く、天の川のほぼ中央に位置している。2つの散開星団が近接しているためこの名で呼ばれる。NGCカタログ/Melカタログでのカタログ番号は、西側の星団が NGC 869/ Mel 13、東側の星団が NGC 884/ Mel 14。