Skip to content

Commit

Permalink
Merge pull request #1043 from luxonis/develop
Browse files Browse the repository at this point in the history
DepthAI SDK 1.11.0
  • Loading branch information
daniilpastukhov authored Jun 5, 2023
2 parents 251296f + 492501e commit a001a2a
Show file tree
Hide file tree
Showing 42 changed files with 1,128 additions and 342 deletions.
8 changes: 8 additions & 0 deletions .github/workflows/python-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,11 @@ jobs:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
packages_dir: depthai_sdk/dist/

- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
event-type: robothub-oak-release
repository: luxonis/robothub-images
client-payload: '{"ref": "${{ github.ref }}"}'
3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
[submodule "depthai_sdk/src/depthai_sdk/integrations/depthai_pipeline_graph"]
path = depthai_sdk/src/depthai_sdk/integrations/depthai_pipeline_graph
url = https://github.com/luxonis/depthai_pipeline_graph
[submodule "resources/depthai_boards"]
path = resources/depthai_boards
url = https://github.com/luxonis/depthai-boards
2 changes: 1 addition & 1 deletion depthai_sdk/docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
author = 'Luxonis'

# The full version, including alpha/beta/rc tags
release = '1.10.1'
release = '1.11.0'


# -- General configuration ---------------------------------------------------
Expand Down
35 changes: 35 additions & 0 deletions depthai_sdk/docs/source/features/auto_ir.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
Automatic IR power control
==========================

.. note:: This feature is only available on OAK devices with IR lights.

.. note:: This feature is experimental, please report any issues you encounter
to the Luxonis team.

**Automatic IR power control** is a feature that allows the device to automatically
adjust the IR power based on the scene. This is useful for applications where
the scene is not always the same, for example when the camera is used in an
outdoor environment.

To enable automatic IR power control, you need to use :meth:`auto_ir <depthai_sdk.StereoComponent.auto_ir>` method
that accepts two parameters:

- ``auto_mode`` - ``True`` to enable automatic IR power control, ``False`` to disable it.
- ``continuous_mode`` - ``True`` to enable continuous mode, ``False`` otherwise. Requires ``auto_mode`` to be enabled.

When **automatic mode** is enabled, the device will automatically adjust the IR power after the startup.
The disparity map will be analyzed with different dot projector and illumination settings,
and once the best settings are found, the device will use them for the rest of the session.
The whole process takes around **25 seconds**.

If **continuous mode** is enabled, the device will continue to search for better settings.
In case the scene changes and disparity map quality drops below a certain threshold,
the device will automatically adjust the IR power again.

Usage
-----

The following example shows how to enable automatic IR power control in continuous mode:

.. literalinclude:: ../../../examples/StereoComponent/stereo_auto_ir.py
:language: python
5 changes: 5 additions & 0 deletions depthai_sdk/docs/source/features/replaying.rst
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,11 @@ The following table lists all available recordings:
- ``color.mp4``
- 5.3 MB
- Top-down view at an angle, source video `here <https://pixabay.com/videos/people-commerce-shop-busy-mall-6387/>`__
* - ``people-tracking-above-05``
- ``CAM_A.mp4``, ``CAM_A.mp4``, ``calib.json``
- 12 MB (35sec)
- Top-down view, left+right stereo cameras, `demo usage at replay.py <https://github.com/luxonis/depthai-experiments/tree/master/gen2-record-replay>`__



..
Expand Down
30 changes: 30 additions & 0 deletions depthai_sdk/examples/IMUComponent/imu_rerun.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from depthai_sdk import OakCamera
from depthai_sdk.classes.packets import IMUPacket
import rerun as rr
import subprocess
import depthai as dai

def callback(packet: IMUPacket):
for d in packet.data:
gyro: dai.IMUReportGyroscope = d.gyroscope
accel: dai.IMUReportAccelerometer = d.acceleroMeter
mag: dai.IMUReportMagneticField = d.magneticField
rot: dai.IMUReportRotationVectorWAcc = d.rotationVector
print(accel.x, accel.y, accel.z)
rr.log_scalar('world/accel_x', accel.x, color=(255,0,0))
rr.log_scalar('world/accel_y', accel.y, color=(0,255,0))
rr.log_scalar('world/accel_z', accel.z, color=(0,0,255))


with OakCamera() as oak:
subprocess.Popen(["rerun", "--memory-limit", "200MB"])
rr.init("Rerun ", spawn=False)
rr.connect()


imu = oak.create_imu()
imu.config_imu(report_rate=10, batch_report_threshold=2)
print(oak.device.getConnectedIMU())
oak.callback(imu, callback=callback)
oak.start(blocking=True)

18 changes: 18 additions & 0 deletions depthai_sdk/examples/PointcloudComponent/pointcloud.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import cv2
from depthai_sdk import OakCamera
from depthai_sdk.classes.packets import PointcloudPacket, FramePacket
import rerun as rr
import subprocess
subprocess.Popen(["rerun", "--memory-limit", "200MB"])
rr.init("Rerun ", spawn=False)
rr.connect()
def callback(packet: PointcloudPacket):
colors = packet.color_frame.getCvFrame()[..., ::-1] # BGR to RGB
rr.log_image('Color Image', colors)
points = packet.points.reshape(-1, 3)
rr.log_points("Pointcloud", points, colors=colors.reshape(-1, 3))

with OakCamera() as oak:
pcl = oak.create_pointcloud()
oak.callback(pcl, callback=callback)
oak.start(blocking=True)
2 changes: 1 addition & 1 deletion depthai_sdk/examples/StereoComponent/stereo.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@
stereo.config_postprocessing(colorize=StereoColor.RGBD, colormap=cv2.COLORMAP_MAGMA)
stereo.config_wls(wls_level=WLSLevel.MEDIUM) # WLS filtering, use for smoother results

oak.visualize(stereo.out.disparity)
oak.visualize(stereo.out.depth)
oak.start(blocking=True)
12 changes: 12 additions & 0 deletions depthai_sdk/examples/StereoComponent/stereo_auto_ir.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from depthai_sdk import OakCamera

with OakCamera() as oak:
left = oak.create_camera('left')
right = oak.create_camera('right')
stereo = oak.create_stereo(left=left, right=right)

# Automatically estimate IR brightness and adjust it continuously
stereo.set_auto_ir(auto_mode=True, continuous_mode=True)

oak.visualize([stereo.out.disparity, left])
oak.start(blocking=True)
84 changes: 84 additions & 0 deletions depthai_sdk/examples/mixed/collision_avoidance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from depthai_sdk import OakCamera
from depthai_sdk.visualize.configs import StereoColor
from depthai_sdk.classes.packets import DepthPacket
import math
import depthai as dai
import cv2

# User-defined constants
WARNING = 1000 # 1m, orange
CRITICAL = 500 # 50cm, red

slc_data = []

def cb(packet: DepthPacket):
global slc_data
fontType = cv2.FONT_HERSHEY_TRIPLEX

depthFrameColor = packet.visualizer.draw(packet.frame)

for depthData in slc_data:
roi = depthData.config.roi
roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0])

xmin = int(roi.topLeft().x)
ymin = int(roi.topLeft().y)
xmax = int(roi.bottomRight().x)
ymax = int(roi.bottomRight().y)

coords = depthData.spatialCoordinates
distance = math.sqrt(coords.x ** 2 + coords.y ** 2 + coords.z ** 2)

if distance == 0: # Invalid
continue

if distance < CRITICAL:
color = (0, 0, 255)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, thickness=4)
cv2.putText(depthFrameColor, "{:.1f}m".format(distance/1000), (xmin + 10, ymin + 20), fontType, 0.5, color)
elif distance < WARNING:
color = (0, 140, 255)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, thickness=2)
cv2.putText(depthFrameColor, "{:.1f}m".format(distance/1000), (xmin + 10, ymin + 20), fontType, 0.5, color)

cv2.imshow('0_depth', depthFrameColor)

with OakCamera() as oak:
stereo = oak.create_stereo('720p')
# We don't need high fill rate, just very accurate depth, that's why we enable some filters, and
# set the confidence threshold to 50
config = stereo.node.initialConfig.get()
config.postProcessing.brightnessFilter.minBrightness = 0
config.postProcessing.brightnessFilter.maxBrightness = 255
stereo.node.initialConfig.set(config)
stereo.config_postprocessing(colorize=StereoColor.RGBD, colormap=cv2.COLORMAP_BONE)
stereo.config_stereo(confidence=50, lr_check=True, extended=True)

oak.visualize([stereo], fps=True, callback=cb)

oak.build()

slc = oak.pipeline.create(dai.node.SpatialLocationCalculator)
for x in range(15):
for y in range(9):
config = dai.SpatialLocationCalculatorConfigData()
config.depthThresholds.lowerThreshold = 200
config.depthThresholds.upperThreshold = 10000
config.roi = dai.Rect(dai.Point2f((x+0.5)*0.0625, (y+0.5)*0.1), dai.Point2f((x+1.5)*0.0625, (y+1.5)*0.1))
# TODO: change from median to 10th percentile once supported
config.calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN
slc.initialConfig.addROI(config)

stereo.depth.link(slc.inputDepth)

slc_out = oak.pipeline.create(dai.node.XLinkOut)
slc_out.setStreamName('slc')
slc.out.link(slc_out.input)

oak.start() # Start the pipeline (upload it to the OAK)

q = oak.device.getOutputQueue('slc') # Create output queue after calling start()
while oak.running():
if q.has():
slc_data = q.get().getSpatialLocations()
oak.poll()
3 changes: 2 additions & 1 deletion depthai_sdk/examples/mixed/speed_calculation.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ def callback(packet):
stereo.config_stereo(subpixel=False, lr_check=True)

nn = oak.create_nn('face-detection-retail-0004', color, spatial=stereo, tracker=True)
nn.config_tracker(calculate_speed=True)

visualizer = oak.visualize(nn.out.tracker, callback=callback, fps=True)
visualizer.tracking(speed=True).text(auto_scale=True)
visualizer.tracking(show_speed=True).text(auto_scale=True)

oak.start(blocking=True)
3 changes: 2 additions & 1 deletion depthai_sdk/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@ depthai==2.21.2
PyTurboJPEG==1.6.4
marshmallow==3.17.0
xmltodict
sentry-sdk==1.21.0
sentry-sdk==1.21.0
depthai-pipeline-graph==0.0.5
2 changes: 1 addition & 1 deletion depthai_sdk/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

setup(
name='depthai-sdk',
version='1.10.1',
version='1.11.0',
description='This package provides an abstraction of the DepthAI API library.',
long_description=io.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
Expand Down
4 changes: 2 additions & 2 deletions depthai_sdk/src/depthai_sdk/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from depthai_sdk.utils import _create_config, get_config_field
from depthai_sdk.visualize import *

__version__ = '1.10.1'
__version__ = '1.11.0'


def __import_sentry(sentry_dsn: str) -> None:
Expand All @@ -20,7 +20,7 @@ def __import_sentry(sentry_dsn: str) -> None:
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
release='depthai_sdk@1.9.5',
release=f'depthai_sdk@{__version__}',
with_locals=False,
)
except:
Expand Down
32 changes: 23 additions & 9 deletions depthai_sdk/src/depthai_sdk/classes/packets.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,6 @@ class FramePacket:
Contains only dai.ImgFrame message and cv2 frame, which is used by visualization logic.
"""

name: str # ImgFrame stream name
msg: dai.ImgFrame # Original depthai message
frame: Optional[np.ndarray] # cv2 frame for visualization

def __init__(self,
name: str,
msg: dai.ImgFrame,
Expand All @@ -67,23 +63,41 @@ def __init__(self,
self.name = name
self.msg = msg
self.frame = frame

self.visualizer = visualizer


class PointcloudPacket:
def __init__(self,
name: str,
points: np.ndarray,
depth_map: dai.ImgFrame,
color_frame: Optional[np.ndarray],
visualizer: 'Visualizer' = None):
self.name = name
self.points = points
self.depth_imgFrame = dai.ImgFrame
self.color_frame = color_frame
self.visualizer = visualizer

class DepthPacket(FramePacket):
mono_frame: dai.ImgFrame

def __init__(self,
name: str,
disparity_frame: dai.ImgFrame,
mono_frame: dai.ImgFrame,
img_frame: dai.ImgFrame,
mono_frame: Optional[dai.ImgFrame],
depth_map: Optional[np.ndarray] = None,
visualizer: 'Visualizer' = None):
super().__init__(name=name,
msg=disparity_frame,
frame=disparity_frame.getCvFrame() if cv2 else None,
msg=img_frame,
frame=img_frame.getCvFrame() if cv2 else None,
visualizer=visualizer)
self.mono_frame = mono_frame

if mono_frame is not None:
self.mono_frame = mono_frame

self.depth_map = depth_map

class SpatialBbMappingPacket(FramePacket):
"""
Expand Down
19 changes: 9 additions & 10 deletions depthai_sdk/src/depthai_sdk/components/camera_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,16 +109,7 @@ def __init__(self,
socket = parse_camera_socket(source)
sensor = [f for f in device.getConnectedCameraFeatures() if f.socket == socket][0]

if node_type is not None: # User specified camera type
if node_type == dai.node.ColorCamera and dai.CameraSensorType.COLOR not in sensor.supportedTypes:
raise Exception(
f"User specified {node_type} node, but {sensor.sensorName} sensor doesn't support COLOR mode!"
)
if node_type == dai.node.MonoCamera and dai.CameraSensorType.MONO not in sensor.supportedTypes:
raise Exception(
f"User specified {node_type} node, but {sensor.sensorName} sensor doesn't support MONO mode!"
)
else:
if node_type is None: # User specified camera type
type = sensor.supportedTypes[0]
if type == dai.CameraSensorType.COLOR:
node_type = dai.node.ColorCamera
Expand Down Expand Up @@ -206,6 +197,7 @@ def __init__(self,
self._control_xlink_in = pipeline.create(dai.node.XLinkIn)
self._control_xlink_in.setStreamName(f"{self.node.id}_inputControl")
self._control_xlink_in.out.link(self.node.inputControl)
self._control_xlink_in.setMaxDataSize(1) # CameraControl message doesn't use any additional data (only metadata)

def on_pipeline_started(self, device: dai.Device):
if self._control_xlink_in is not None:
Expand Down Expand Up @@ -331,6 +323,12 @@ def config_color_camera(self,
if isp_scale:
self._resolution_forced = True
self.node.setIspScale(*isp_scale)

self.node.setPreviewSize(*self.node.getIspSize())
self.node.setVideoSize(*self.node.getIspSize())
self.stream_size = self.node.getIspSize()
self.stream = self.node.preview

if sharpness is not None: self.node.initialControl.setSharpness(sharpness)
if luma_denoise is not None: self.node.initialControl.setLumaDenoise(luma_denoise)
if chroma_denoise is not None: self.node.initialControl.setChromaDenoise(chroma_denoise)
Expand Down Expand Up @@ -404,6 +402,7 @@ def get_stream_xout(self) -> StreamXout:
return StreamXout(self.node.id, self.stream, name=self.name)
else: # ColorCamera
self.node.setVideoNumFramesPool(self._num_frames_pool)
self.node.setPreviewNumFramesPool(self._preview_num_frames_pool)
# node.video instead of preview (self.stream) was used to reduce bandwidth
# consumption by 2 (3bytes/pixel vs 1.5bytes/pixel)
return StreamXout(self.node.id, self.node.video, name=self.name)
Expand Down
Loading

0 comments on commit a001a2a

Please sign in to comment.