Code: Select all
#!/usr/bin/python
# original script by brainflakes, improved by pageauc, peewee2 and Kesthal
# www.raspberrypi.org/phpBB3/viewtopic.php?f=43&t=45235
# You need to install PIL to run this script
# type "sudo apt-get install python-imaging-tk" in an terminal window to do this
# play with it KLL
# /home/pi/python_cam/picam4.py
# rev b with dropbox, ftp, email options
import StringIO
import subprocess
import os
import time
from datetime import datetime
from PIL import Image
# Import smtplib to provide email functions
import smtplib
#from email.mime.text import MIMEText
# Motion detection settings:
# Threshold - how much a pixel has to change by to be marked as "changed"
# Sensitivity - how many changed pixels before capturing an image, needs to be higher if noisy view
# ForceCapture - whether to force an image to be captured every forceCaptureTime seconds, values True or False
# filepath - location of folder to save photos
# filenamePrefix - string that prefixes the file name for easier identification of files.
# diskSpaceToReserve - Delete oldest images to avoid filling disk. How much byte to keep free on disk.
# cameraSettings - "" = no extra settings; "-hf" = Set horizontal flip of image; "-vf" = Set vertical flip; "-hf -vf" = both horizontal and vertical flip
threshold = 10
sensitivity = 140
rotation = 180 # KLL camera mounting
forceCapture = True
forceCaptureTime = 1 * 60 # every minute for webserver* 60 # Once an hour
# info by print
info_print = True
# store image files to temp fs
filepath = "/run/shm/"
filenamePrefix = "RPICAM"
file_typ = ".jpg"
prg_msg = "boot" # used to get more info in print when a picture is made
# option and the newest one is referred to by a linkfile in same dir
link_tolastpicture = True # KLL
lfile = "last.jpg" # KLL make it as symlink
# option send file to DROPBOX, ! very long API procedure !
send_dropbox = False # KLL test files in drop_box and to PC
# option send ( or move ) file to a FTP server
send_ftp = False # KLL FTP
ftp_remotepath = "/usb1_1/rpi/" # a USB stick
ftp_account = "kll-ftp:*****@192.168.1.1:2121" # in my router: USER:[email protected]:PORT
#wput_option = " -R" # opt "-R" for move file
wput_option = " "
# option email
send_email_enable = False
# Define email addresses to use
addr_to = '[email protected]'
addr_from = '[email protected]'
# Define SMTP email server details
GMAIL_USER = '[email protected]'
GMAIL_PASS = '*****'
SMTP_SERVER = 'smtp.gmail.com:587'
# email control
emaildeltaTime = 1 * 60 * 60 # send mail again only after ... hour
last_send = time.time() - emaildeltaTime # so first picture ( at start / boot ) also makes a email
# temp fs ?100MB? should keep 10MB for other program to use
diskSpaceToReserve = 10 * 1024 * 1024 # Keep 10 mb free on disk
# with 95*1024*1024 == 99614720 it deletes all ??
# delets always a file each time is makes a new one!
# with 90*1024*1024 == 94371840 it deletes (one) the oldest.
cameraSettings = ""
# settings of the photos to save
saveWidth = 1296
saveHeight = 972
saveQuality = 15 # Set jpeg quality (0 to 100)
# Test-Image settings
testWidth = 100
testHeight = 75
# this is the default setting, if the whole image should be scanned for changed pixel
testAreaCount = 1
testBorders = [ [[1,testWidth],[1,testHeight]] ] # [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
# testBorders are NOT zero-based, the first pixel is 1 and the last pixel is testWith or testHeight
# with "testBorders", you can define areas, where the script should scan for changed pixel
# for example, if your picture looks like this:
#
# ....XXXX
# ........
# ........
#
# "." is a street or a house, "X" are trees which move arround like crazy when the wind is blowing
# because of the wind in the trees, there will be taken photos all the time. to prevent this, your setting might look like this:
# testAreaCount = 2
# testBorders = [ [[1,50],[1,75]], [[51,100],[26,75]] ] # area y=1 to 25 not scanned in x=51 to 100
# even more complex example
# testAreaCount = 4
# testBorders = [ [[1,39],[1,75]], [[40,67],[43,75]], [[68,85],[48,75]], [[86,100],[41,75]] ]
# in debug mode, a file debug.bmp is written to disk with marked (?GREEN?) changed pixel an with marked border of scan-area
# debug mode should only be turned on while testing the parameters above
debugMode = False # False or True
debug_bmp = "debug.bmp"
def send_email(recipient, subject, text):
smtpserver = smtplib.SMTP(SMTP_SERVER)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(GMAIL_USER, GMAIL_PASS)
header = 'To:' + recipient + '\n' + 'From: ' + GMAIL_USER
header = header + '\n' + 'Subject:' + subject + '\n'
msg = header + '\n' + text + ' \n\n'
smtpserver.sendmail(GMAIL_USER, recipient, msg)
smtpserver.close()
# Capture a small test image (for motion detection)
def captureTestImage(settings, width, height):
command = "raspistill %s -w %s -h %s -t 200 -e bmp -n -o -" % (settings, width, height)
imageData = StringIO.StringIO()
imageData.write(subprocess.check_output(command, shell=True))
imageData.seek(0)
im = Image.open(imageData)
buffer = im.load()
imageData.close()
return im, buffer
# Save a full size image to disk
def saveImage(settings, width, height, quality, diskSpaceToReserve):
global last_send #it is read and set here
keepDiskSpaceFree(diskSpaceToReserve)
#time = datetime.now() # KLL bad to call this variable time !
t_now = datetime.now()
s_now = "-%04d%02d%02d-%02d%02d%02d" % (t_now.year, t_now.month, t_now.day, t_now.hour, t_now.minute, t_now.second)
filename = filenamePrefix + s_now + file_typ
fullfilename = filepath + filename
lastfilename = filepath + lfile
subprocess.call("raspistill %s -w %s -h %s -t 200 -e jpg -q %s -n -rot %s -o %s" % (settings, width, height, quality, rotation, fullfilename), shell=True)
if info_print :
print " %s captured %s" % (prg_msg,fullfilename)
if link_tolastpicture : # tested ok
try:
os.remove(lastfilename)
except:
pass # not exist at first start
os.symlink(fullfilename,lastfilename)
# os.chmod(lastfilename,stat.S_IXOTH) #stat.S_IRWXG
pass
if send_dropbox : # tested ok
subprocess.call("/home/pi/python_cam/dropbox_uploader.sh upload %s /RPICAM1/" % (fullfilename), shell=True)
if info_print :
print "upload dropbox"
pass
if send_ftp : # only works from that dir! # tested ok
if info_print :
wput_opt = wput_option
pass
else :
wput_opt = wput_option+" -q" # quiet
pass
os.chdir(filepath)
subprocess.call("wput %s %s ftp://%s%s" % (wput_opt,filename,ftp_account,ftp_remotepath), shell=True)
os.chdir("/")
if info_print :
print "upload ftp"
pass
if send_email_enable :
#print 'now: %s' % (str( time.time()))
#print 'last_send: %s' % (str(last_send))
#print 'emaildeltaTime: %s' % (str( emaildeltaTime ))
if time.time() - last_send > emaildeltaTime :
emailsubject = 'from RPI CAM1: '
emailtext = 'motion detect: ' + fullfilename
send_email(addr_to, emailsubject , emailtext)
last_send = time.time()
if info_print :
print "send email"
pass
pass
# Keep free space above given level
def keepDiskSpaceFree(bytesToReserve):
if (getFreeSpace() < bytesToReserve):
os.chdir(filepath) #KLL now works better
for dfilename in sorted(os.listdir(filepath)):
if dfilename.startswith(filenamePrefix) and dfilename.endswith(file_typ):
print "Deleted %s to avoid filling disk" % (dfilename)
os.remove(dfilename)
if (getFreeSpace() > bytesToReserve):
os.chdir("/") # KLL
return
# Get available disk space
def getFreeSpace():
st = os.statvfs(filepath)
du = st.f_bavail * st.f_frsize
#print " free space: %s " % (du)
return du
#_________ main ________________________________________________________________________________________
# Get first image
if info_print :
print('get first image')
image1, buffer1 = captureTestImage(cameraSettings, testWidth, testHeight)
# Reset last capture time
lastCapture = time.time()
if info_print :
print('start loop') # and take a very first picture to see start time ...
saveImage(cameraSettings, saveWidth, saveHeight, saveQuality, diskSpaceToReserve)
prg_msg = "motion"
while (True):
# Get comparison image
image2, buffer2 = captureTestImage(cameraSettings, testWidth, testHeight)
# Count changed pixels
changedPixels = 0
takePicture = False
if (debugMode): # in debug mode, save a bitmap-file with marked changed pixels and with visible testarea-borders
debugimage = Image.new("RGB",(testWidth, testHeight))
debugim = debugimage.load()
for z in xrange(0, testAreaCount): # = xrange(0,1) with default-values = z will only have the value of 0 = only one scan-area = whole picture
for x in xrange(testBorders[z][0][0]-1, testBorders[z][0][1]): # = xrange(0,100) with default-values
for y in xrange(testBorders[z][1][0]-1, testBorders[z][1][1]): # = xrange(0,75) with default-values; testBorders are NOT zero-based, buffer1[x,y] are zero-based (0,0 is top left of image, testWidth-1,testHeight-1 is botton right)
if (debugMode):
debugim[x,y] = buffer2[x,y]
if ((x == testBorders[z][0][0]-1) or (x == testBorders[z][0][1]-1) or (y == testBorders[z][1][0]-1) or (y == testBorders[z][1][1]-1)):
# print "Border %s %s" % (x,y)
debugim[x,y] = (0, 0, 255) # in debug mode, mark all border pixel to blue
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
if pixdiff > threshold:
changedPixels += 1
if (debugMode):
debugim[x,y] = (0, 255, 0) # in debug mode, mark all changed pixel to green
# Save an image if pixels changed
if (changedPixels > sensitivity):
takePicture = True # will shoot the photo later
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the y loop
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the x loop
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the z loop
if (debugMode):
debugimage.save(filepath + debug_bmp) # save debug image as bmp
print "debug.bmp saved, %s changed pixel" % changedPixels
# else:
# print "%s changed pixel" % changedPixels
# Check force capture
if forceCapture:
#print 'now: %s' % (str( time.time()))
#print 'lastCapture: %s' % (str(lastCapture))
#print 'forceCaptureTime: %s' % (str( forceCaptureTime ))
if time.time() - lastCapture > forceCaptureTime:
takePicture = True
prg_msg = "force"
if takePicture:
lastCapture = time.time()
saveImage(cameraSettings, saveWidth, saveHeight, saveQuality, diskSpaceToReserve)
prg_msg = "motion"
# Swap comparison buffers
image1 = image2
buffer1 = buffer2
Code: Select all
imageData.write(subprocess.check_output(command, shell=True))
Code: Select all
sudo ./sync.sh
Code: Select all
cd ~
mkdir picam
cd picam
wget https://raw.github.com/pageauc/pi-motion-grive/master/pimotion.tar
tar -pxvf pimotion.tar
./setup.sh
Code: Select all
from pushbullet import PushBullet
from pushbullet import device
apik="myapikey"
pb=PushBullet(apik)
de=pb.devices[0]
success, push = de.push_note("adsadasd","asdasdasd asd asd")
Code: Select all
Traceback (most recent call last):
File "gggg.py", line 6, in <module>
de = pb.devices[0]
IndexError: list index out of range
Code: Select all
#!/bin/bash
curl https://api.pushbullet.com/v2/pushes \
-u XXXXACCESS TOKENXXXX: \
-d device_iden="XXXXXXXdevive_idenXXXXXXX" \
-d type="note" \
-d title="Test" \
-d body="test test test" \
-X POST
Code: Select all
import time
import picamera
import picamera.array
width = 100
height = 75
threshold = 25
sensitivity = 25
def takepic():
with picamera.PiCamera() as camera:
time.sleep(1)
camera.resolution = (width, height)
with picamera.array.PiRGBArray(camera) as stream:
camera.capture(stream, format='rgb')
return stream.array
if __name__ == '__main__':
print 'Taking first pic'
data1 = takepic()
time.sleep(10)
print 'Taking second pic'
data2 = takepic()
print 'Diffing'
diffCount = 0L;
for w in range(0, width):
for h in range(0, height):
# get the diff of the pixel. Conversion to int
# is required to avoid unsigned short overflow.
diff = abs(int(data1[h][w][1]) - int(data2[h][w][1]))
if diff > threshold:
diffCount += 1
if diffCount > sensitivity:
break; # break inner loop
if diffCount > sensitivity:
break; #break outer loop.
if diffCount > threshold:
print 'Motion Detected'
Code: Select all
#!/usr/bin/python
# This script implements a motion capture surveillace cam for raspery pi using picam.
# It uses the motion vecors magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
import os
import subprocess
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
#seup filepath for motion capure data
filepath = '/home/pi/motion/video'
# setup pre and post video recording around motion event
video_preseconds = 3
video_postseconds = 3
#setup video/snapshot resolution
video_width = 640#1280
video_height = 480#720
#setup video rotation (0, 90, 180, 270)
video_rotation = 180
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
motion_threshold = 60
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
motion_sensitivity = 10
# do not change code behind that line
#--------------------------------------
motion_detected = False
motion_timestamp = time.time()
#call back handler for motion output data from h264 hw encoder
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_detected, motion_timestamp
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# If there're more than 10 vectors with a magnitude greater
# than 60, then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# motion logic, triger on motion and stop after 2 seconds of inactivity
if th:
motion_timestamp = now
if motion_detected:
if (now - motion_timestamp) >= video_postseconds:
motion_detected = False
else:
if th:
motion_detected = True
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
os.system('clear')
print "Motion Detection"
print "----------------"
print " "
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = 25
camera.rotation = video_rotation
camera.video_stabilization = True
camera.annotate_background = True
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
#camera.start_recording('test.h264', splitter_port=1)
# low resolution motion vector analysis from splitter port 2
camera.start_recording('/dev/null', splitter_port=2, resize=(340,240) ,format='h264', motion_output=MyMotionDetector(camera, size=(340,240)))
# wait some seconds for stable video data
camera.wait_recording(1, splitter_port=1)
motion_detected = False
print "Motion Capture ready!"
try:
while True:
# motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_detected:
print "Motion detected: " , dt.datetime.now()
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
camera.capture(motion_filename + '.jpg', use_video_port=True)
# save circular buffer before motion event
write_video(stream)
#wait for end of motion event here
while motion_detected:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#split video recording back in to circular buffer
camera.split_recording(stream, splitter_port=1)
subprocess.call("cat %s %s > %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".h264", motion_filename + "-*.h264"), shell=True)
print "Motion stopped:" , dt.datetime.now()
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
Probably the simplest way to restrict motion detection to a particular area is to crop the numpy array in MyMotionDetector.analyse (near the top) before doing anything else with it, e.g. to restrict it to the top left quadrant of the capture area:HerrJemineh wrote:Hey Greg,
nice code. Is there a possibility to restrict motion detection to a smaller area?
regards
Simon
Code: Select all
...
def analyse(self, a):
a = a[:240, :320]
...
Okay, here's killagreg's script copy'n'pasted with an extra line inserted at the start of analyse - the trick is to remember that the numpy array is organized in rows,cols so the y limits appear first, then the x limits (and that Python slices are half-open ranges so the upper limit needs to be +1):HerrJemineh wrote:Hi,
thank you for the fast reply!
Unfortunately I did not understand how to adjust the script for my needs..
Could you tell me how I have to change the code to detect motion in this particular area (see Appendix)?
That would be very nice!
Best regards
Simon
Code: Select all
#!/usr/bin/python
# This script implements a motion capture surveillace cam for raspery pi using picam.
# It uses the motion vecors magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
import os
import subprocess
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
#seup filepath for motion capure data
filepath = '/home/pi/motion/video'
# setup pre and post video recording around motion event
video_preseconds = 3
video_postseconds = 3
#setup video/snapshot resolution
video_width = 640#1280
video_height = 480#720
#setup video rotation (0, 90, 180, 270)
video_rotation = 180
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
motion_threshold = 60
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
motion_sensitivity = 10
# do not change code behind that line
#--------------------------------------
motion_detected = False
motion_timestamp = time.time()
#call back handler for motion output data from h264 hw encoder
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
a = a[145:285, 96:272]
global motion_detected, motion_timestamp
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# If there're more than 10 vectors with a magnitude greater
# than 60, then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# motion logic, triger on motion and stop after 2 seconds of inactivity
if th:
motion_timestamp = now
if motion_detected:
if (now - motion_timestamp) >= video_postseconds:
motion_detected = False
else:
if th:
motion_detected = True
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
os.system('clear')
print "Motion Detection"
print "----------------"
print " "
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = 25
camera.rotation = video_rotation
camera.video_stabilization = True
camera.annotate_background = True
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
#camera.start_recording('test.h264', splitter_port=1)
# low resolution motion vector analysis from splitter port 2
camera.start_recording('/dev/null', splitter_port=2, resize=(340,240) ,format='h264', motion_output=MyMotionDetector(camera, size=(340,240)))
# wait some seconds for stable video data
camera.wait_recording(1, splitter_port=1)
motion_detected = False
print "Motion Capture ready!"
try:
while True:
# motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_detected:
print "Motion detected: " , dt.datetime.now()
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
camera.capture(motion_filename + '.jpg', use_video_port=True)
# save circular buffer before motion event
write_video(stream)
#wait for end of motion event here
while motion_detected:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#split video recording back in to circular buffer
camera.split_recording(stream, splitter_port=1)
subprocess.call("cat %s %s > %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".h264", motion_filename + "-*.h264"), shell=True)
print "Motion stopped:" , dt.datetime.now()
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
Code: Select all
#!/usr/bin/python
# This script implements a motion capture surveillace cam for raspery pi using picam.
# It uses the motion vecors magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
import os
import subprocess
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
from PIL import Image
#debug mode?
debug = 1
#seup filepath for motion capure data
filepath = '/home/pi/motion/video'
# setup pre and post video recording around motion event
video_preseconds = 3
video_postseconds = 3
#setup video/snapshot resolution
video_width = 640 #1280
video_height = 480 #720
#setup video rotation (0, 180)
video_rotation = 180
# setup motion detection resolution, equal or smaller than video resolution
motion_width = 640
motion_height = 480
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
motion_threshold = 30
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
motion_sensitivity = 6
# motion masks define areas within the motion analysis picture that are used for motion analysis
# [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
# default this is the whole image frame
#motion_mask_count = 1
#motion_masks = [ [[1,motion_width],[1,motion_height]] ]
# another example
motion_mask_count = 1
motion_masks = [ [[270,370],[190,290]] ]
# exaple for 2 mask areas
#motion_mask_count = 2
#motion_masks = [ [[1,320],[1,240]], [[400,500],[300,400]] ]
# do not change code behind that line
#--------------------------------------
motion_detected = False
motion_timestamp = time.time()
motion_cols = (motion_width + 15) // 16 + 1
motion_rows = (motion_height + 15) // 16
motion_array = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
# create motion mask
motion_array_mask = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
for count in xrange(0, motion_mask_count):
for col in xrange( (motion_masks[count][0][0]-1)//16, (motion_masks[count][0][1]-1+15)//16 ):
for row in xrange( (motion_masks[count][1][0]-1)//16, (motion_masks[count][1][1]-1+15)//16 ):
motion_array_mask[row][col] = 1
#motion_array_mask[4:8, 3:9] = 255
#call back handler for motion output data from h264 hw encoder
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_detected, motion_timestamp, motion_array, motion_array_mask
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
a = a * motion_array_mask
# If there're more than 'sensitivity' vectors with a magnitude greater
# than 'threshold', then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# motion logic, trigger on motion and stop after 2 seconds of inactivity
if th:
motion_timestamp = now
if motion_detected:
if (now - motion_timestamp) >= video_postseconds:
motion_detected = False
else:
if th:
motion_detected = True
if debug:
idx = a > motion_threshold
a[idx] = 255
motion_array = a
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
os.system('clear')
print "Motion Detection"
print "----------------"
print " "
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = 25
camera.rotation = video_rotation
camera.video_stabilization = True
camera.annotate_background = True
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
#camera.start_recording('test.h264', splitter_port=1)
# low resolution motion vector analysis from splitter port 2
camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264', motion_output=MyMotionDetector(camera, size=(motion_width,motion_height)))
# wait some seconds for stable video data
camera.wait_recording(2, splitter_port=1)
motion_detected = False
print "Motion Capture ready!"
try:
while True:
# motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_detected:
print "Motion detected: " , dt.datetime.now()
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
camera.capture_sequence([motion_filename + '.jpg'], use_video_port=True, splitter_port=0)
# dump motion array as image
if debug:
img = Image.fromarray(motion_array)
img.save(motion_filename + "-motion.png")
# save circular buffer before motion event
write_video(stream)
#wait for end of motion event here
while motion_detected:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#split video recording back in to circular buffer
camera.split_recording(stream, splitter_port=1)
subprocess.call("cat %s %s > %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".h264", motion_filename + "-*.h264"), shell=True)
print "Motion stopped:" , dt.datetime.now()
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
Code: Select all
wget https://raw.github.com/pageauc/pi-motion-lite/master/pi-motion-lite_2.py
Code: Select all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# to fix scripts, turning them from MS-DOS format to unix format
# to get rid of MSDOS format do this to this file: sudo sed -i s/\\r//g ./filename
# This script was originally created by by killagreg ¯ Thu Dec 18, 2014 7:53 am
# and by killagreg ¯ Fri Dec 19, 2014 7:09 pm
# see http://www.raspberrypi.org/forums/viewtopic.php?p=656881#p656881
#
# This script implements a motion capture surveillace cam for raspbery pi using picam.
# It uses the "motion vectors" magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
#
# APPARENTLY INSPIRED BY PICAMERA 1.8 TECHNIQUES documented at
# http://picamera.readthedocs.org/en/release-1.8/recipes2.html#rapid-capture-and-processing
# where the PICAMERA code uses efficient underlying mmal access and numpy code
#
# "the original and the best" script code by "killagreg" was at
# http://www.raspberrypi.org/forums/viewtopic.php?p=656881#p656881
#
# Modifications:
# 2014.12.26
# modified slightly for the boundary case of no motion detection "windows" - avoid performing the masking step
#
# notes:
# 1. it likely uses 100% cpu since it loops around infinitely in a "while true" condition until motion is detected
# 2. maybe a programmer could look at it and do something different also remembering not to delay start of capture ... feel free
# 3. the output video streams files are raw h264, NOT repeat NOT mpeg4 video files, so youll have to convert them to .mp4 yourself
# 4. To prepare for using this python script (yes, yes, 777, roll your own if you object)
# sudo apt-get update
# sudo apt-get upgrade
# sudo apt-get install python-picamera python-imaging-tk
# sudo mkdir /var/pymotiondetector
# sudo chmod 777 /var/pymotiondetector
#
# licensing:
# this being a derivative, whatever killagreg had (acknowledging killagreg code looks to be substantially from examples in
# the picamera documentation http://picamera.readthedocs.org/en/release-1.8/recipes2.html#rapid-capture-and-processing
#
#
# Example to convery h264 tp mp4
# sudo apt-get update
# sudo apt-get install gpac
# sudo MP4Box -fps <use capture framerate> -add raw_video.h264 -isma -new wrapped_video.mp4
#
# on windows:
# "C:\ffmpeg\bin\ffmpeg.exe" -f h264 -r <use capture framerate> -i "raw_video.h264" -c:v copy -an -movflags +faststart -y "wrapped_video.mp4"
# REM if necessary add -bsf:v h264_mp4toannexb before "-r"
# or
# "C:\MP4box\MP4Box.exe" -fps <use capture framerate> -add "raw_video.h264" -isma -new "wrapped_video.mp4"
#
import os
import subprocess
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
from PIL import Image
# ----------------------------------------------------------------------------------------------------------------
# in this section are parameters you can fiddle with
#debug mode?
debug = False # False
#seup filepath for motion capure data (which is in raw h264 format) plus the start-of-motion jpeg.
# sudo mkdir /var/pymotiondetector
# sudo chmod 777 /var/pymotiondetector
filepath = '/var/pymotiondetector'
# setup pre and post video recording around motion events
video_preseconds = 3 # minimum 1
video_postseconds = 6 # minimum 1
# setup the main video/snapshot camera resolution
# see this link for a full discussion on how to choose a valid resolution that will work
# http://picamera.readthedocs.org/en/latest/fov.html
video_width = 640
#video_width = 1280
video_height = 480
#video_height = 720
#setup video rotation (0, 90, 180, 270)
video_rotation = 0
# setup the camera video framerate, PAL is 25, let's go for 5 instead
#video_framerate = 25
video_framerate = 5
# setup the camera to perform video stabilization
video_stabilization = True
# setup the camera to put a black background on the annootation (in our case, for date/time)
#video_annotate_background = True
video_annotate_background = False
# setup the camera to put frame number in the annotation
video_annotate_frame_num = True
# setup motion detection video resolution, equal or smaller than capture video resolution
# smaller = less cpu needed thus "better" and less likely to lose frames etc
motion_width = 320 #640
motion_height = 240 #480
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
#motion_threshold = 60
motion_threshold = 30
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
#motion_sensitivity = 10
motion_sensitivity = 6
# motion masks define areas within the motion analysis picture that are used for motion analysis
# [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
#
# default to no motion masking, ie use the "full area" of the lower-resolution-capture "motion vectors"
motion_mask_count = 0
# this is the whole "motion detecton image frame"
#motion_mask_count = 1
#motion_masks = [ [[1,motion_width],[1,motion_height]] ]
# another example, one motion detection mask area
#motion_mask_count = 1
#motion_masks = [ [[270,370],[190,290]] ]
# example for 2 mask areas
#motion_mask_count = 2
#motion_masks = [ [[1,320],[1,240]], [[400,500],[300,400]] ]
# ----------------------------------------------------------------------------------------------------------------
# do not change code below the line
#-----------------------------------
motion_detected = False
motion_timestamp = time.time()
if (motion_mask_count > 0) or (debug):
motion_cols = (motion_width + 15) // 16 + 1
motion_rows = (motion_height + 15) // 16
motion_array = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
# create a zero "AND" motion mask of masked areas
# and then fill 1's into the mask areas of interest which we specified above
if motion_mask_count > 0:
motion_array_mask = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
for count in xrange(0, motion_mask_count):
for col in xrange( (motion_masks[count][0][0]-1)//16, (motion_masks[count][0][1]-1+15)//16 ):
for row in xrange( (motion_masks[count][1][0]-1)//16, (motion_masks[count][1][1]-1+15)//16 ):
motion_array_mask[row][col] = 1
#motion_array_mask[4:8, 3:9] = 255
#call back handler for motion output data from h264 hw encoder
#this processes the motion ventors from the low resolution splitted capture
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_detected, motion_timestamp, motion_array, motion_array_mask
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# zero out (mask out) anything outside our specified areas of interest, if we have a mask
if motion_mask_count > 0:
a = a * motion_array_mask
# If there're more than 'sensitivity' vectors with a magnitude greater
# than 'threshold', then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# by now ...
# th = motion detected on current frame
# motion_timestamp = the last time when motion was detected in a frame (start of time window)
# motion_detected = whether motion detection time window is currently triggered
# = is only turned off if motion has previously been detected
# and both "no motion detected" and its time window has expired
# motion logic, trigger on motion and stop after video_postseconds seconds of inactivity
if th:
motion_timestamp = now
if motion_detected:
if (now - motion_timestamp) >= video_postseconds:
motion_detected = False
else:
if th:
motion_detected = True
if debug:
idx = a > motion_threshold
a[idx] = 255
motion_array = a
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
os.system('clear')
print "Motion Detection"
print "----------------"
print " "
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = video_framerate
camera.rotation = video_rotation
camera.video_stabilization = video_stabilization
camera.annotate_background = video_annotate_background
camera.annotate_frame_num = video_annotate_frame_num
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# 1. split the hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
#camera.start_recording('test.h264', splitter_port=1)
# 2. split the low resolution motion vector analysis from splitter port 2, throw away the actual video
camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264', motion_output=MyMotionDetector(camera, size=(motion_width,motion_height)))
# wait some seconds for stable video data to be available
camera.wait_recording(2, splitter_port=1)
motion_detected = False
print "Motion Capture ready!"
try:
while True:
# the callback "MyMotionDetector" has been setup above using the low resolution split
# original code "while true" above ... loop around as fast as we can go until motion is detected ... thus 100 percent cpu ?
# a motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_detected:
print "Motion detected: " , dt.datetime.now()
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
# split the high res video stream to a file instead of to the internal circular buffer
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
camera.capture_sequence([motion_filename + '.jpg'], use_video_port=True, splitter_port=0)
# if we want to see debug motion stuff, dump motion array as a png image
if debug:
img = Image.fromarray(motion_array)
img.save(motion_filename + "-motion.png")
# save circular buffer before motion event, write it to a file
write_video(stream)
#wait for end of motion event here, in one second increments
while motion_detected:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#split video recording back in to circular buffer
camera.split_recording(stream, splitter_port=1)
subprocess.call("cat %s %s > %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".h264", motion_filename + "-*.h264"), shell=True)
print "Motion stopped:" , dt.datetime.now()
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
Code: Select all
#!/usr/bin/python
# This script implements a motion capture surveillace cam for raspery pi using picam.
# It uses the motion vecors magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
import os, logging
import subprocess
import threading
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
from PIL import Image
#debug mode?
debug = 0
#seup filepath for motion and capure data output
filepath = '/var/www/motion'
# setup pre and post video recording around motion event
video_preseconds = 3
video_postseconds = 3
#setup video resolution
video_width = 1280
video_height = 720
video_framerate = 25
#setup cam rotation (0, 180)
cam_rotation = 180
# setup motion detection resolution
motion_width = 320
motion_height = 240
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
motion_threshold = 60
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
motion_sensitivity = 6
# range of interests define areas within the motion analysis is done
# [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
# default is the whole image frame
motion_roi_count = 1
motion_roi = [ [[1,motion_width], [1,motion_height]] ]
# another example
#motion_roi_count = 1
#motion_roi = [ [[270,370], [190,290]] ]
# exaple for 2 mask areas
#motion_roi_count = 2
#motion_roi = [ [[1,320],[1,240]], [[400,500],[300,400]] ]
# setup capture interval
capture_interval = 10
capture_filename = "snapshot"
# do not change code behind that line
#--------------------------------------
motion_event = threading.Event()
motion_timestamp = time.time()
if(motion_roi_count > 0) or (debug):
motion_cols = (motion_width + 15) // 16 + 1
motion_rows = (motion_height + 15) // 16
motion_array = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
# create motion mask
if motion_roi_count > 0:
motion_array_mask = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
for count in xrange(0, motion_roi_count):
for col in xrange( (motion_roi[count][0][0]-1)//16, (motion_roi[count][0][1]-1+15)//16 ):
for row in xrange( (motion_roi[count][1][0]-1)//16, (motion_roi[count][1][1]-1+15)//16 ):
motion_array_mask[row][col] = 1
capture_timestamp = time.time()
#call back handler for motion output data from h264 hw encoder
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_event, motion_timestamp, motion_array, motion_array_mask, motion_roi_count
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
if motion_roi_count > 0:
a = a * motion_array_mask
# If there're more than 'sensitivity' vectors with a magnitude greater
# than 'threshold', then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# motion logic, trigger on motion and stop after 2 seconds of inactivity
if th:
motion_timestamp = now
if motion_event.is_set():
if (now - motion_timestamp) >= video_postseconds:
motion_event.clear()
else:
if th:
motion_event.set()
if debug:
idx = a > motion_threshold
a[idx] = 255
motion_array = a
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
# create logger with 'spam_application'
logger = logging.getLogger('PiCam')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('picam.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.info('PiCam has been started')
os.system('clear')
print "Motion Detection Application"
print "----------------------------"
print " "
print "Capture videos with %dx%d resolution" % (video_width, video_height)
print "Analyze motion with %dx%d resolution" % (motion_width, motion_height)
print " resulting in %dx%d motion blocks" % (motion_cols, motion_rows)
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = video_framerate
camera.rotation = cam_rotation
camera.video_stabilization = True
camera.annotate_background = True
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
# low resolution motion vector analysis from splitter port 2
camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264', motion_output=MyMotionDetector(camera, size=(motion_width,motion_height)))
# wait some seconds for stable video data
camera.wait_recording(2, splitter_port=1)
motion_event.clear()
logger.info('waiting for motion')
print "Waiting for Motion!"
try:
while True:
# motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_event.wait(1):
logger.info('motion detected')
print "Motion detected: " , dt.datetime.now()
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
camera.capture_sequence([motion_filename + '.jpg'], use_video_port=True, splitter_port=0)
# dump motion array as image
if debug:
img = Image.fromarray(motion_array)
img.save(motion_filename + "-motion.png")
# save circular buffer before motion event
write_video(stream)
#wait for end of motion event here
while motion_event.is_set():
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#split video recording back in to circular buffer
camera.split_recording(stream, splitter_port=1)
subprocess.call("MP4Box -cat %s -cat %s %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".mp4", motion_filename + "-*.h264"), shell=True)
logger.info('motion stopped')
print "Motion stopped:" , dt.datetime.now()
else:
# webcam mode, capture images on a regular inerval
if capture_interval:
if(time.time() > (capture_timestamp + capture_interval) ):
capture_timestamp = time.time()
print "Capture Snapshot:", dt.datetime.now()
camera.capture_sequence([filepath + "/" + capture_filename + ".jpg"], use_video_port=True, splitter_port=0)
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
Code: Select all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# to fix scripts, turning them from MS-DOS format to unix format
# to get rid of MSDOS format do this to this file: sudo sed -i s/\\r//g ./filename
# This script was originally created by by killagreg ¯ Thu Dec 18, 2014 7:53 am
# and by killagreg ¯ Fri Dec 19, 2014 7:09 pm
# see http://www.raspberrypi.org/forums/viewtopic.php?p=656881#p656881
#
# This script implements a motion capture surveillace cam for raspbery pi using picam.
# It uses the "motion vectors" magnitude of the h264 hw-encoder to detect motion activity.
# At the time of motion detection a jpg snapshot is saved together with a h264 video stream
# some seconds before, during and after motion activity to the 'filepath' directory.
#
# APPARENTLY INSPIRED BY PICAMERA 1.8 TECHNIQUES documented at
# http://picamera.readthedocs.org/en/release-1.8/recipes2.html#rapid-capture-and-processing
# where the PICAMERA code uses efficient underlying mmal access and numpy code
#
# "the original and the best" script code by "killagreg" was at
# http://www.raspberrypi.org/forums/viewtopic.php?p=656881#p656881
#
# Modifications:
# 2014.12.26
# - modified slightly for the boundary case of no motion detection "windows" - avoid performing the masking step
# 2014.12.28 (hey "killagreg", really nice updates)
# - incorporate latest changes by killagreg over christmas 2014
# from http://www.raspberrypi.org/forums/viewtopic.php?p=660572#p660572
# - added/changed "mp4 mode" to be optional and not the default (also added some MP4box flags)
# - repositioned a small bit of code to avoid a possible "initial conditions" bug
# - modified (webcam like) snapshot capture interval processing slightly
# - added extra logging
# - made use of localtime instead of GMT, for use in filenames
# - removed "print" commands and instead rely on logging
# - added circular file logging and specified the path of the log file
#
# notes:
# 1. it likely uses 100% cpu since it loops around infinitely in a "while true" condition until motion is detected
# 2. maybe a programmer could look at it and do something different also remembering not to delay start of capture ... feel free
# 3. the output video streams files are raw h264, NOT repeat NOT mpeg4 video files, so youll have to convert them to .mp4 yourself
# 4. To prepare for using this python script (yes, yes, 777, roll your own if you object)
# sudo apt-get install -y rpi-update
# sudo rpi-update
# sudo apt-get update
# sudo apt-get upgrade
# sudo apt-get install python-picamera python-imaging-tk gpac -y
# sudo mkdir /var/pymotiondetector
# sudo chmod 777 /var/pymotiondetector
#
# licensing:
# this being a derivative, whatever killagreg had (acknowledging killagreg code looks to be substantially from examples in
# the picamera documentation http://picamera.readthedocs.org/en/release-1.8/recipes2.html#rapid-capture-and-processing
# i.e. free for any and all use I guess
#
# Example to separately and externally convert h264 files to mp4, on the Pi (using MP4box from gpac)
# sudo MP4Box -fps <use capture framerate> -add raw_video.h264 -isma -new wrapped_video.mp4
#
# Example to separately and externally convert h264 files to mp4, on the Windows
# "C:\ffmpeg\bin\ffmpeg.exe" -f h264 -r <use capture framerate> -i "raw_video.h264" -c:v copy -an -movflags +faststart -y "wrapped_video.mp4"
# REM if necessary add -bsf:v h264_mp4toannexb before "-r"
# or
# "C:\MP4box\MP4Box.exe" -fps <use capture framerate> -add "raw_video.h264" -isma -new "wrapped_video.mp4"
#
import os
import logging
import logging.handlers
import subprocess
import io
import picamera
import picamera.array
import numpy as np
import datetime as dt
import time
from PIL import Image
# ----------------------------------------------------------------------------------------------------------------
# in this section are parameters you can fiddle with
#debug mode? dumps extra debug info
debug = False # False
# mp4 mode ?
# if we set mp4_mode,
# then the h264 files are converted to an mp4 when the motion capture is completed, using MP4box (part of gpac)
# warning, warning, danger will robinson ...
# mp4 mode consumes a lot CPU and elapsed time and it is almost certain that we will *lose frames*
# after a detection has finished, if a new movement occurs during that conversion
# For that reason I don't use mp4 mode
# and instead use the original quicker "cat" and separately convert the .h264 files later, if I want to.
mp4_mode = False
#seup filepath for motion capure data (which is in raw h264 format) plus the start-of-motion jpeg.
# sudo mkdir /var/pymotiondetector
# sudo chmod 777 /var/pymotiondetector
filepath = '/var/pymotiondetector'
logger_filename = filepath + '/pymotiondetector.log'
#logger_filename = 'pymotiondetector.log'
# setup pre and post video recording around motion events
video_preseconds = 5 # minimum 1
video_postseconds = 10 # minimum 1
# setup the main video/snapshot camera resolution
# see this link for a full discussion on how to choose a valid resolution that will work
# http://picamera.readthedocs.org/en/latest/fov.html
video_width = 640
#video_width = 1280
video_height = 480
#video_height = 720
# setup the camera video framerate, PAL is 25, let's go for 5 instead
#video_framerate = 25
video_framerate = 5
#setup video rotation (0, 90, 180, 270)
video_rotation = 0
# setup the camera to perform video stabilization
video_stabilization = True
# setup the camera to put a black background on the annootation (in our case, for date/time)
#video_annotate_background = True
video_annotate_background = False
# setup the camera to put frame number in the annotation
video_annotate_frame_num = True
# we could setup a webcam mode, to capture images on a regular interval in between motion recordings
# setup jpeg capture snapshot interval and filename prefix
snapshot_capture_interval = 0
#snapshot_capture_interval = 300
snapshot_capture_filename = "snapshot"
#--- now for the motion detection parameters
# define motion detection video resolution, equal or smaller than capture video resolution
# smaller = less cpu needed thus "better" and less likely to lose frames etc
motion_width = 320 #640
motion_height = 240 #480
# setup motion detection threshold, i.e. magnitude of a motion block to count as motion
motion_threshold = 60
#motion_threshold = 30
# setup motion detection sensitivity, i.e number of motion blocks that trigger a motion detection
#motion_sensitivity = 10
motion_sensitivity = 6
# Range Of Interests define areas within the motion analysis is done within the smaller "motion detection video resolution"
# ie define areas within the motion analysis picture that are used for motion analysis
# [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
#
# default to no motion masking, ("0")
# ie use the "whole image frame" of the lower-resolution-capture "motion vectors"
# and avoid CPU/memory overheards of doing the masking
motion_roi_count = 0
# this is the whole "motion detecton image frame"
#motion_roi_count = 1
#motion_roi = [ [[1,motion_width],[1,motion_height]] ]
# another example, one motion detection mask area
#motion_roi_count = 1
#motion_roi = [ [[270,370],[190,290]] ]
# example for 2 mask areas
#motion_roi_count = 2
#motion_roi = [ [[1,320],[1,240]], [[400,500],[300,400]] ]
# ----------------------------------------------------------------------------------------------------------------
# do not change code below the line
#-----------------------------------
# pre-initialise variables in case they're used later
motion_detected = False
motion_timestamp = time.time()
snapshot_capture_timestamp = time.time()
motion_cols = (motion_width + 15) // 16 + 1
motion_rows = (motion_height + 15) // 16
if (motion_roi_count > 0) or (debug):
motion_array = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
# create a zero "AND" motion mask of masked areas
# and then fill 1's into the mask areas of interest which we specified above
if motion_roi_count > 0:
motion_array_mask = np.zeros((motion_rows, motion_cols), dtype = np.uint8)
for count in xrange(0, motion_roi_count):
for col in xrange( (motion_roi[count][0][0]-1)//16, (motion_roi[count][0][1]-1+15)//16 ):
for row in xrange( (motion_roi[count][1][0]-1)//16, (motion_roi[count][1][1]-1+15)//16 ):
motion_array_mask[row][col] = 1
#call back handler for motion output data from h264 hw encoder
#this processes the motion ventors from the low resolution splitted capture
class MyMotionDetector(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_detected, motion_timestamp, motion_array, motion_array_mask, motion_roi_count
# calcuate length of motion vectors of mpeg macro blocks
a = np.sqrt(
np.square(a['x'].astype(np.float)) +
np.square(a['y'].astype(np.float))
).clip(0, 255).astype(np.uint8)
# zero out (mask out) anything outside our specified areas of interest, if we have a mask
if motion_roi_count > 0:
a = a * motion_array_mask
# If there're more than 'sensitivity' vectors with a magnitude greater
# than 'threshold', then say we've detected motion
th = ((a > motion_threshold).sum() > motion_sensitivity)
now = time.time()
# by now ...
# th = motion detected on current frame
# motion_timestamp = the last time when motion was detected in a frame (start of time window)
# motion_detected = whether motion detection time window is currently triggered
# = is only turned off if motion has previously been detected
# and both "no motion detected" and its time window has expired
# motion logic, trigger on motion and stop after video_postseconds seconds of inactivity
if th:
motion_timestamp = now
if motion_detected:
if (now - motion_timestamp) >= video_postseconds:
motion_detected = False
else:
if th:
motion_detected = True
if debug:
idx = a > motion_threshold
a[idx] = 255
motion_array = a
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global motion_filename
with io.open(motion_filename + '-before.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
#-----------------------------------------
# create logger with 'spam_application'
#
logger = logging.getLogger('pymotiondetector')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
#fh = logging.FileHandler('pymotiondetector.log')
fh = logging.handlers.RotatingFileHandler(logger_filename, mode='a', maxBytes=(1024*1000 * 2), backupCount=5, delay=0)
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.info('---------------------------------')
logger.info('pymotiondetector has been started')
logger.info('---------------------------------')
msg = "Capture videos with %dx%d resolution" % (video_width, video_height)
logger.info(msg)
msg = "Analyze motion with %dx%d resolution" % (motion_width, motion_height)
logger.info(msg)
msg = " resulting in %dx%d motion blocks" % (motion_cols, motion_rows)
logger.info(msg)
#os.system('clear')
#print "Motion Detection Application"
#print "----------------------------"
#print " "
#print "Capture videos with %dx%d resolution" % (video_width, video_height)
#print "Analyze motion with %dx%d resolution" % (motion_width, motion_height)
#print " resulting in %dx%d motion blocks" % (motion_cols, motion_rows)
with picamera.PiCamera() as camera:
camera.resolution = (video_width, video_height)
camera.framerate = video_framerate
camera.rotation = video_rotation
camera.video_stabilization = video_stabilization
camera.annotate_background = video_annotate_background
camera.annotate_frame_num = video_annotate_frame_num
# setup a circular buffer
stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# 1. split the hi resolution video recording into circular buffer from splitter port 1
camera.start_recording(stream, format='h264', splitter_port=1)
#camera.start_recording('test.h264', splitter_port=1)
# 2. split the low resolution motion vector analysis from splitter port 2, throw away the actual video
camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264', motion_output=MyMotionDetector(camera, size=(motion_width,motion_height)))
# wait some seconds for stable video data to be available
camera.wait_recording(2, splitter_port=1)
motion_detected = False
logger.info('pymotiondetector has been started')
#print "Motion Capture ready - Waiting for motion"
logger.info('OK. Waiting for first motion to be detected')
try:
while True:
# the callback "MyMotionDetector" has been setup above using the low resolution split
# original code "while true" above ... loop around as fast as we can go until motion is detected ... thus 100 percent cpu ?
# a motion event must trigger this action here
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if motion_detected:
#print "Motion detected: " , dt.datetime.now()
logger.info('detected motion')
#motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
motion_filename = filepath + "/" + time.strftime("%Y%m%d-%H%M%S", time.localtime(motion_timestamp))
# split the high res video stream to a file instead of to the internal circular buffer
logger.info('splitting video from circular IO buffer to after-motion-detected h264 file ')
camera.split_recording(motion_filename + '-after.h264', splitter_port=1)
# catch an image as video preview during video recording (uses splitter port 0) at time of the motion event
msg = "started capture jpeg image file %s" % (motion_filename + ".jpg")
logger.info(msg)
camera.capture_sequence([motion_filename + '.jpg'], use_video_port=True, splitter_port=0)
msg = "finished capture jpeg image file %s" % (motion_filename + ".jpg")
logger.info(msg)
# if we want to see debug motion stuff, dump motion array as a png image
if debug:
logger.info('saving debug motion vectors')
img = Image.fromarray(motion_array)
img.save(motion_filename + "-motion.png")
# save circular buffer containing "before motion" event video, ie write it to a file
logger.info('started saving before-motion circular buffer')
write_video(stream)
logger.info('finished saving before-motion circular IO buffer')
#---- wait for the end of motion event here, in one second increments
logger.info('start waiting to detect end of motion')
while motion_detected:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(1, splitter_port=1)
#---- end of motion event detected
logger.info('detected end of motion')
#split video recording back in to circular buffer
logger.info('splitting video back into the circular IO buffer')
camera.split_recording(stream, splitter_port=1)
if mp4_mode:
msg = "started copying h264 into mp4 file %s" % (motion_filename + ".mp4")
logger.info(msg)
msg = "MP4Box -fps %d -cat %s -cat %s -isma -new %s && rm -f %s && rm -f %s" % (video_framerate, motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".mp4", motion_filename + "-before.h264", motion_filename + "-after.h264")
logger.info(msg)
subprocess.call(msg, shell=True)
msg = "finished copying h264 into mp4 file %s" % (motion_filename + ".mp4")
logger.info(msg)
else:
msg = "started concatenating h264 files into %s" % (motion_filename + ".h264")
logger.info(msg)
msg = "cat %s %s > %s && rm -f %s && rm -f %s" % (motion_filename + "-before.h264", motion_filename + "-after.h264", motion_filename + ".h264", motion_filename + "-before.h264", motion_filename + "-after.h264")
logger.info(msg)
subprocess.call(msg, shell=True)
msg = "finished concatenating h264 files into %s" % (motion_filename + ".h264")
logger.info(msg)
msg = "Finished capture processing, entering constant loop state awaiting next motion detection by class MyMotionDetector ..."
logger.info(msg)
snapshot_capture_timestamp = time.time()
else:
# no motion detected or in progress - if webcam mode, capture images on a regular interval
if (snapshot_capture_interval > 0):
if(time.time() > (snapshot_capture_timestamp + snapshot_capture_interval) ):
#snapf = filepath + "/" + snapshot_capture_filename + "-" + time.strftime("%Y%m%d-%H%M%S", time.gmtime(motion_timestamp))
snapf = filepath + "/" + snapshot_capture_filename + "-" + time.strftime("%Y%m%d-%H%M%S", time.localtime(motion_timestamp))
camera.capture_sequence([snapf + ".jpg"], use_video_port=True, splitter_port=0)
snapshot_capture_timestamp = time.time()
logger.info("Captured snapshot")
finally:
camera.stop_recording(splitter_port=1)
camera.stop_recording(splitter_port=2)
See here :-2014-12-28 15:54:39,904 - pymotiondetector - INFO - splitting video from circular IO buffer to after-motion-detected h264 file
2014-12-28 15:54:50,184 - pymotiondetector - INFO - started capture jpeg image file /var/pymotiondetector/20141228-155439.jpg
Code: Select all
2014-12-28 15:54:36,898 - pymotiondetector - INFO - ---------------------------------
2014-12-28 15:54:36,903 - pymotiondetector - INFO - pymotiondetector has been started
2014-12-28 15:54:36,906 - pymotiondetector - INFO - ---------------------------------
2014-12-28 15:54:36,909 - pymotiondetector - INFO - Capture videos with 640x480 resolution
2014-12-28 15:54:36,911 - pymotiondetector - INFO - Analyze motion with 320x240 resolution
2014-12-28 15:54:36,914 - pymotiondetector - INFO - resulting in 21x15 motion blocks
2014-12-28 15:54:39,763 - pymotiondetector - INFO - pymotiondetector has been started
2014-12-28 15:54:39,765 - pymotiondetector - INFO - OK. Waiting for first motion to be detected
2014-12-28 15:54:39,901 - pymotiondetector - INFO - detected motion
2014-12-28 15:54:39,904 - pymotiondetector - INFO - splitting video from circular IO buffer to after-motion-detected h264 file
2014-12-28 15:54:50,184 - pymotiondetector - INFO - started capture jpeg image file /var/pymotiondetector/20141228-155439.jpg
2014-12-28 15:54:50,358 - pymotiondetector - INFO - finished capture jpeg image file /var/pymotiondetector/20141228-155439.jpg
2014-12-28 15:54:50,361 - pymotiondetector - INFO - started saving before-motion circular buffer
2014-12-28 15:54:50,433 - pymotiondetector - INFO - finished saving before-motion circular IO buffer
2014-12-28 15:54:50,436 - pymotiondetector - INFO - start waiting to detect end of motion
2014-12-28 15:55:00,466 - pymotiondetector - INFO - detected end of motion
2014-12-28 15:55:00,469 - pymotiondetector - INFO - splitting video back into the circular IO buffer
2014-12-28 15:55:01,999 - pymotiondetector - INFO - started concatenating h264 files into /var/pymotiondetector/20141228-155439.h264
2014-12-28 15:55:02,001 - pymotiondetector - INFO - cat /var/pymotiondetector/20141228-155439-before.h264 /var/pymotiondetector/20141228-155439-after.h264 > /var/pymotiondetector/20141228-155439.h264 && rm -f /var/pymotiondetector/20141228-155439-before.h264 && rm -f /var/pymotiondetector/20141228-155439-after.h264
2014-12-28 15:55:02,078 - pymotiondetector - INFO - finished concatenating h264 files into /var/pymotiondetector/20141228-155439.h264
2014-12-28 15:55:02,081 - pymotiondetector - INFO - Finished capture processing, entering constant loop state awaiting next motion detection by class MyMotionDetector ...
We do not loss video there, because switching from the circular buffer to the "after" file happens within one frame.As for speed, logging suggests there's at least one potential choke point (pending checking for a bug) where we lose around 10 seconds of motions detection and recording - around the time of splitting from the circular IO memory buffer into an "after" h264 file.