Page 1 of 1

how to receive data from mqtt and open cv video stream at the same time

Posted: Thu Jun 25, 2020 12:51 pm
by Vincentyu
My project need to receive mqtt data and videostream data at the same time. However, when my raspi receiving video data by open cv, it cant receive iOT data at the same time. (now i can do it seperately :| )

My raspi need to check if the iot data is on, and the video will not turn on and receive data.
So the piority is
iOT sensor check if there is something detected-----> video will not turn. (case 1)
iOT sensor check if there is nothing detected-------> video turn on and receive another image data. (case 2)

Here is how i get the iot sensor data by mqtt

Code: Select all

# Main template for our paho.mqtt.client code
    # lets obtain potentiometer reading from ESP8266
    # listen to topic "/esp/pot"
    import paho.mqtt.client as mqtt
    
    # The callback for when the client receives a CONNACK response from the server.
    def on_connect(client, userdata,flags, rc):
        print("Connected with result code "+str(rc))
        # Subscribing in on_connect() means that if we lose the connection and
        # reconnect then subscriptions will be renewed.
        client.subscribe("/esp/pot") # remember this topic to put inside ESP code later
    
    # The callback for when a PUBLISH message is received from the server.
    def on_message(client, userdata, msg):
        cX=int(msg.payload)
        print(cX)
    
    client = mqtt.Client()
    client.on_connect = on_connect
    client.on_message = on_message
    
    client.connect("localhost", 1883, 60) # localhost is the Raspberry Pi itself
    
    # Blocking call that processes network traffic, dispatches callbacks and
    # handles reconnecting.
    # Other loop*() functions are available that give a threaded interface and a
    # manual interface.
    client.loop_forever()
Here is how my raspi get the videostream data by openCV

Code: Select all

 # USAGE
    # python ball_tracking.py --video ball_tracking_example.mp4
    # python ball_tracking.py
    
    # import the necessary packages
    from collections import deque
    from imutils.video import VideoStream
    import numpy as np
    import argparse
    import cv2
    import imutils
    import time
    import serial
    import struct

    
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video",
        help="path to the (optional) video file")
    ap.add_argument("-b", "--buffer", type=int, default=0,
        help="max buffer size")
    args = vars(ap.parse_args())
    
    # define the lower and upper boundaries of the "green"
    # ball in the HSV color space, then initialize the
    # list of tracked points
    greenLower = (25, 96, 49)
    greenUpper = (39, 255, 255)
    
    pts = deque(maxlen=args["buffer"])
    
    ser = serial.Serial('/dev/ttyACM0',9600)
                
    
    # if a video path was not supplied, grab the reference
    # to the webcam
    if not args.get("video", False):
        vs = VideoStream(src=0).start()
    
    # otherwise, grab a reference to the video file
    else:
        vs = cv2.VideoCapture(args["video"])
    
    # allow the camera or video file to warm up
    time.sleep(2.0)
    
    
    # keep looping
    while True:
        # grab the current frame
        frame = vs.read()
    
        # handle the frame from VideoCapture or VideoStream
        frame = frame[1] if args.get("video", False) else frame
    
        # if we are viewing a video and we did not grab a frame,
        # then we have reached the end of the video
        if frame is None:
            break
    
        # resize the frame, blur it, and convert it to the HSV
        # color space
        frame = imutils.resize(frame, width=248)
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
    
        # construct a mask for the color "green", then perform
        Lower = greenLower
        Upper = greenUpper
            
        mask = cv2.inRange(hsv, Lower, Upper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
    
        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        center = None
    
        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            center = (cX, cY)
            # only proceed if the radius meets a minimum size
            if radius > 2:

                cv2.circle(frame, center, 5, (0, 0, 255), -1)
                print (cX)
                    
            if len(cnts) == 0:
                cX = 249
                print (cX)

        # show the frame to our screen
        cv2.imshow("Frame", frame)

        chkKey = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if chkKey == ord("q"):
            cX = 0
            break
        
    # if we are not using a video file, stop the camera video stream
    if not args.get("video", False):
        vs.stop()
    
    # otherwise, release the camera
    else:
        vs.release()
    
    # close all windows
    cv2.destroyAllWindows()

I want to keep the Exit function if i press the "q" from my keyboard

Re: how to receive data from mqtt and open cv video stream at the same time

Posted: Mon Jun 29, 2020 3:46 pm
by Vincentyu
[Update 1] I try to combine the code like below, but what i receive is that i keep get 247 value, the function cant go to while loop and get the !=247 condition.

what i want to do is every time the code check if cX = 247 is received , if not excute the camera and trace the obbject, if yes, excute other action.

Code: Select all

# USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py

# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import serial
import struct
import paho.mqtt.client as mqtt

def on_connect(client, userdata,flags, rc):
    client.subscribe("/esp/pot")
    
def on_message(client, userdata, msg):
    cX=int(msg.payload)*247
    print(cX)
    
def on_disconnect(client, userdata,rc=0):
    logging.debug("DisConnected result code "+str(rc))
    client.loop_stop()
    
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message

client.connect("localhost", 1883, 60) # localhost is the Raspberry Pi itself


# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
    help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=0,
    help="max buffer size")
args = vars(ap.parse_args())

# define the lower and upper boundaries of the "green"
# ball in the HSV colorspace, then initialize the
# list of tracked points
greenLower = (25, 96, 49)
greenUpper = (39, 255, 255)
redLower = (0,132,153)
redUpper = (15, 255, 255)

pts = deque(maxlen=args["buffer"])

ser = serial.Serial('/dev/ttyUSB0',9600)
            

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

# allow the camera or video file to warm up
time.sleep(2.0)
cX=247
client.loop_start()

# keep looping
while (cX!=247):
    # grab the current frame
    frame = vs.read()
    :
    :
    :
Others codes are the same, could anyone offer helps, please