hsapi
’s package structure is as follows:
hsapi
├── __init__.py
├── core
│ ├── __init__.py
│ ├── base.py
│ ├── device.py
│ └── graph.py
├── high
│ ├── __init__.py
│ ├── net.py
│ └── task.py
└── easy
├── __init__.py
└── prefab.py
core
Low-Level Interface.high
High-Level Interface. Abstract a neural network base class to simplify the operation process.easy
Provides some simple classes and methods that use built-in neural networks.# Import libs
import numpy
import hsapi as hs
# Get Device
device_list = hs.EnumerateDevices() # Get all connected devices
device = hs.Device(device_list[0])
# Open Device
device.OpenDevice()
"""
# [Optional, according to business needs]
# Read the image of the camera built into the Horned Sungem
image = device.GetImage(zoomMode=True) # zoomMode: True(640x360) False(1920x1080)
"""
# Manage neural network resources, processing data, business logic, etc.
...
# Close the device and the Horned Sungem will automatically reset
device.CloseDevice()
- Single-device Multi-model: Same Device instance Load multiple Graph files and get multiple Graph instances
- Multi-device Multi-model: Different Device instances Load different Graph files to get the corresponding Graph instance
# Load the Graph file, get the Graph instance
with open('Graph filePath', mode='rb') as f:
data = f.read()
graph = device.AllocateGraph(data, scale, mean) # scale/mean Image preprocessing parameters
# Neural network image input, there are two ways [2 choose 1]
# 1. Load external image, here to read the USB camera as an example
"""
import cv2
capture = cv2.VideoCapture(0)
_, image = capture.read()
image = image_preprocess(image) # Preprocess the image and process it into the format and size required by the neural network
graph.LoadTensor(image.astype(numpy.float16), None) # Load image as input to neural network
"""
# 2. Use the built-in camera of the Horned Sungem as input
# Use the built-in camera as input to return images
image = graph.GetImage(True) # zoomMode: True(640x360) False(1920x1080)
# Read neural network output
output, _ = graph.GetResult()
# Analyze neural network output to implement business logic
...
# Release neural network resources
graph.DeallocateGraph()
# Single-device Single-model
import cv2, numpy
import hsapi as hs # import libs
scale = 0.007843 # Image preprocessing parameters
mean = -1.0 # Image preprocessing parameters
device_list = hs.EnumerateDevices() # Get all connected devices
device = hs.Device(device_list[0]) # Get a Device instance
device.OpenDevice() # Open device
with open('Graph filePath', mode='rb') as f:
data = f.read()
graph = device.AllocateGraph(data, scale, mean) # Get a Graph instance
try:
while True:
# Use the built-in camera of the Horned Sungem as input
image = graph.GetImage(True) # Use the built-in camera as input to return images
output, _ = graph.GetResult() # Read neural network output
print(output)
cv2.imshow("horned-sungem", image)
cv2.waitKey(1)
finally:
graph.DeallocateGraph() # Release neural network resources
device.CloseDevice() # Close device
Net is an abstract class that encapsulates some simple device operations.
Based on this class, you can implement your own neural network class.
Refer to the model class preset in the easy.prefab
module.
# Single-device Single-model
import cv2, numpy
from hsapi import FaceDetector # import libs
# The model path can be specified by the 'graphPath' attribute. The default path is under 'examples/graphs'.
net = FaceDetector(graphPath="./graph_face_SSD", zoom = True, thresh=0.55) # Create a face detection network
try:
while True:
# Use the built-in camera of the Horned Sungem as input
result = net.run(image=None) # Use the built-in camera as input to return images
image = net.plot(result) # Draw an image based on the output
cv2.imshow("FaceDetector", image)
cv2.waitKey(1)
finally:
net.quit() # Exit the neural network, release resources, reset the device
# Single-device Multi-model
import cv2
import hsapi as hs
device_list = hs.EnumerateDevices() # Get all connected devices
device = hs.Device(device_list[0]) # Get a Device instance
face_net = hs.FaceDetector(device=device, zoom = True, thresh=0.55) # Create a face detection network
obj_net = hs.ObjectDetector(device=device, zoom = True, thresh=0.55) # Create a object detection network
"""
Multi-Device Multi-Model: Use different Device instances when initializing Net
"""
capture = cv2.VideoCapture(0) # Get a USB camera
try:
while True:
# Use a USB camera as input
_, image = capture.read() # Read USB camera image data
face_result = face_net.run(image) # Input images into the face detection network
obj_result = obj_net.run(image) # Input images into the object detection network
# Draw on the image according to the recognition result of the face detection network
image = face_net.overlay(image, face_result[1])
# Draw on the image according to the recognition result of the object detection network
image = obj_net.overlay(image, obj_result[1])
cv2.imshow("Face/Obj Detector", image)
cv2.waitKey(1)
finally:
face_net.quit() # Reset the device, the same device only needs to exit one network
The input of the neural network requires pre-processing of the image according to the needs of the network.
When using the built-in camera of the horned hummingbird as the neural network input, only the unified preprocessing operation of the RGB channel of the image is currently supported.
Here Horned Sungem internals and Python API both use multiply plus operations to preprocess images.
image *= scale
image += mean
If the image 3 channel unified processing is required, or the preprocessing requirements are not strict, the mean mean can be taken as a parameter input.
mean = -1.0 # [-1.0, -1.0, -1.0]
scale = 0.007843
graph = device.AllocateGraph(data, scale, mean) # scale & mean
# The built-in device image has been pre-processed and loaded into the neural network.
# The returned image is the unprocessed original image.
image = graph.GetImage(True)
output, _ = graph.GetResult() # Read neural network output
If grayscale input is required, or different pre-processing operations need to be performed on different channels, the image can be processed by itself and loaded into the Horned Sungem.
image = preprocess(image) # preprocess by yourself
graph.LoadTensor(image.astype(numpy.float16), None)