Data is not appearing in dashboard

Hello losant team. I’m doing a project in my University on a sound detection with an object detection and I want to show the image captured from my raspberry pi on the losant and the sound db detected on the losant. The issue that I am facing is that the image captured from my raspberry pi and the sound db detected won’t show in my losant dashboard. And I already installed losant-mqtt in my raspi4 and and made a device and dashboard in losant. I used gauge and image block to display/visualize the data coming from my raspi. When I tried running the script in thony/terminal I got an error “Exemption: Invalid Losant credidentials - error code 5”. How can I use/implement the Indicator if the sound detected is greater than the set threshold and if a human is detected? Do I need a workflow for it even though I already have it in my script the threshold and object detection? What should I do? I am having a hard time figuring it out. This is my first time using losant.

Hi @Sam_Li , and welcome to the Losant Forums.

Based on your explanation and what I am seeing in your account, you have not yet successfully connected your Raspberry Pi to Losant’s MQTT broker. I see you have a device and an access key set up on the Losant side, so perhaps the issue is related to utilizing those credentials in the losant-mqtt client.

Can you share a code snippet for what you have running on your Pi to establish the connection? You can send them to me in a DM or, if you place the snippet in this thread, make sure to remove the access key and secret.

Here is my code

import RPi.GPIO as GPIO
import cv2
import time
import numpy
from losantmqtt import Device

device = Device(“my-device-id”, “my-app-access-key”, “my-app-access-secret”)

device.connect(blocking=True)

#GPIO SETUP
channel = 17
threshold = 500
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel, GPIO.IN)
GPIO.setwarnings(False)

def callback(channel):
if GPIO.input(channel):
threshold > 500
print (“Sound Detected!”)

        #thres = 0.45 # Threshold to detect object

        classNames = []
        classFile = "/home/pi/Desktop/Object_Detection_Files/coco.names"
        with open(classFile,"rt") as f:
            classNames = f.read().rstrip("\n").split("\n")

        configPath = "/home/pi/Desktop/Object_Detection_Files/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
        weightsPath = "/home/pi/Desktop/Object_Detection_Files/frozen_inference_graph.pb"

        net = cv2.dnn_DetectionModel(weightsPath,configPath)
        net.setInputSize(320,320)
        net.setInputScale(1.0/ 127.5)
        net.setInputMean((127.5, 127.5, 127.5))
        net.setInputSwapRB(True)


        def getObjects(img, thres, nms, draw=True, objects=[]):
            classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=0.2)
            #print(classIds,bbox)
            if len(objects) == 0: objects = classNames
            objectInfo =[]
            if len(classIds) != 0:
                for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
                    className = classNames[classId - 1]
                    if className in objects:
                        objectInfo.append([box,className])
                        
                        count = 0 
                        if (draw):
                            cv2.rectangle(img,box,color=(0,255,0),thickness=2)
                            cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
                            cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
                            cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
                            cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
                            
                            t = time.strftime("%Y-%m-%d_%H-%M-%S")              
                            file = '/home/pi/Desktop/FINALS/photos/'+t+'.jpg'
                            cv2.imwrite(file,img)
                            count += 1
                            cv2.waitKey(1)
 
                            
            return img,objectInfo
        
        cap = cv2.VideoCapture(0)
        cap.set(3,640)
        cap.set(4,480)
        
        while True:
            
            success, img = cap.read()
            result, objectInfo = getObjects(img,0.50,0.2,objects=['person'])
            #print(objectInfo)
            cv2.imshow("Output",img)
            cv2.waitKey(1)

GPIO.add_event_detect(channel, GPIO.BOTH, bouncetime=300) # let us know when the pin goes HIGH or LOW
GPIO.add_event_callback(channel, callback) # assign function to GPIO PIN, Run function on change
def img():

def servo1():

GPIO.setmode(GPIO.BCM)

GPIO.setup(12,GPIO.OUT)
servo1 = GPIO.PWM(12,50) # Note 11 is pin, 50 = 50Hz pulse

#start PWM running, but with value of 0 (pulse off)
servo1.start(0)
time.sleep(2)

# Define variable duty
duty = 2

# Loop for duty values from 2 to 12 (0 to 180 degrees)
while duty <= 12:
    servo1.ChangeDutyCycle(duty)
    time.sleep(1)
    duty = duty + 1

time.sleep(2)
servo1.ChangeDutyCycle(7)
time.sleep(2)

servo1.ChangeDutyCycle(2)
time.sleep(0.5)
servo1.ChangeDutyCycle(0)

while True:
device.loop()
if device.is_connected():
sound = callback(channel)
image = callback(channel)
device.send_state({“soundlevel”: sound})
device.send_state({“images”: image})

time.sleep(1)

The fact that the error you are getting back is about invalid credentials leads me to believe that you are not putting the right values in here for your device ID, access key, and access secret. Can you confirm that you are pulling these values from the correct spots?

The device ID can be retrieved by clicking the “Copy” button next to the device name when viewing its detail page. It is also available in the URL when viewing the device detail page, as well as on the full list of your application’s devices.

The access key and access secret are available in the modal displayed after creating an access key that is configured to allow connection for your device.

More information can be found here -