Commit 7e743b72 authored by Larkin Heintzman's avatar Larkin Heintzman

image pipeline added

parent 08933b6f
Pipeline #73 canceled with stages
image_pipeline @ c4527fdf
Subproject commit c4527fdf0de81241f6809ee2694bb3b3b7b49e07
image_pipeline
==============
[![](https://github.com/ros-perception/image_pipeline/workflows/Basic%20Build%20Workflow/badge.svg?branch=noetic)](https://github.com/ros-perception/image_pipeline/actions)
This package fills the gap between getting raw images from a camera driver and higher-level vision processing.
For more information on this metapackage and underlying packages, please see [the ROS wiki entry](http://wiki.ros.org/image_pipeline).
For examples, see the [image_pipeline tutorials entry](http://wiki.ros.org/image_pipeline/Tutorials) on the ROS Wiki.
This diff is collapsed.
cmake_minimum_required(VERSION 3.0.2)
project(camera_calibration)
find_package(catkin REQUIRED)
catkin_package()
catkin_python_setup()
if(CATKIN_ENABLE_TESTING)
# Unit test of calibrator.py
catkin_add_nosetests(test/directed.py)
# Tests simple calibration dataset
catkin_download_test_data(camera_calibration.tar.gz http://download.ros.org/data/camera_calibration/camera_calibration.tar.gz
DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/tests
MD5 6da43ea314640a4c15dd7a90cbc3aee0
)
# Tests multiple checkerboards
catkin_download_test_data(multi_board_calibration.tar.gz http://download.ros.org/data/camera_calibration/multi_board_calibration.tar.gz
DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/tests
MD5 ddc0f69582d140e33f9d3bfb681956bb
)
catkin_add_nosetests(test/multiple_boards.py)
endif()
catkin_install_python(PROGRAMS nodes/cameracalibrator.py
nodes/cameracheck.py
scripts/tarfile_calibration.py
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
# -*- coding: utf-8 -*-
#
# camera_calibration documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 1 14:21:53 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'camera_calibration'
copyright = '2009, Willow Garage, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'camera_calibrationdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'camera_calibration.tex', 'stereo\\_utils Documentation',
'James Bowman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy' : None,
'http://www.ros.org/doc/api/tf/html/python/' : None
}
camera_calibration
==================
The camera_calibration package contains a user-friendly calibration tool,
cameracalibrator. This tool uses the following Python classes, which
conveniently hide some of the complexities of using OpenCV's calibration
process and chessboard detection, and the details of constructing a ROS
CameraInfo message. These classes are documented here for people who
need to extend or make a new calibration tool.
For details on the camera model and camera calibration process, see
http://docs.opencv.org/master/d9/d0c/group__calib3d.html
.. autoclass:: camera_calibration.calibrator.MonoCalibrator
:members:
.. autoclass:: camera_calibration.calibrator.StereoCalibrator
:members:
/**
\mainpage
\htmlinclude manifest.html
\b The camera_calibration package contains tools for calibrating monocular and stereo cameras.
\section codeapi Code API
camera_calibration does not have a code API.
\section rosapi ROS API
List of nodes:
- \b calibrationnode
<!-- START: copy for each node -->
<hr>
\subsection node_name calibrationnode
calibrationnode subscribes to ROS raw image topics, and presents a
calibration window. It can run in both monocular and stereo modes.
The calibration window shows the current images from the cameras,
highlighting the checkerboard. When the user presses the "CALIBRATE"
button, the node computes the camera calibration parameters. When the
user clicks "UPLOAD", the node uploads these new calibration parameters
to the camera driver using a service call.
\subsubsection Usage
\verbatim
$ node_type1 [standard ROS args]
\endverbatim
\par Example
\verbatim
$ rosrun camera_calibration cal.py right:=/my_stereo/right/image_raw left:=/my_stereo/left/image_raw left_camera:=/my_stereo/left right_camera:=/my_stereo/right
\endverbatim
\subsubsection topics ROS topics
Subscribes to:
- \b "left": [sensor_msgs/Image] left raw image topic, for stereo cameras
- \b "right": [sensor_msgs/Image] left raw image topic, for stereo cameras
- \b "image": [sensor_msgs/Image] raw image topic, for monocular cameras
Makes service calls to:
\subsubsection services ROS services
- \b "foo_service": [std_srvs/FooType] description of foo_service
- \b "camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the camera for monocular
- \b "left_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera
- \b "right_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera
<!-- END: copy for each node -->
*/
This diff is collapsed.
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
from camera_calibration.camera_checker import CameraCheckerNode
def main():
from optparse import OptionParser
rospy.init_node('cameracheck')
parser = OptionParser()
parser.add_option("-s", "--size", default="8x6", help="specify chessboard size as nxm [default: %default]")
parser.add_option("-q", "--square", default=".108", help="specify chessboard square size in meters [default: %default]")
parser.add_option("--approximate",
type="float", default=0.0,
help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras")
options, args = parser.parse_args()
size = tuple([int(c) for c in options.size.split('x')])
dim = float(options.square)
approximate = float(options.approximate)
CameraCheckerNode(size, dim, approximate)
rospy.spin()
if __name__ == "__main__":
main()
<package>
<name>camera_calibration</name>
<version>1.16.0</version>
<description>
camera_calibration allows easy calibration of monocular or stereo
cameras using a checkerboard calibration target.
</description>
<author>James Bowman</author>
<author>Patrick Mihelich</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<maintainer email="software@autonomoustuff.com">Autonomoustuff team</maintainer>
<buildtool_depend version_gte="0.5.68">catkin</buildtool_depend>
<test_depend>rostest</test_depend>
<run_depend>cv_bridge</run_depend>
<run_depend>image_geometry</run_depend>
<run_depend>message_filters</run_depend>
<run_depend>rospy</run_depend>
<run_depend>std_srvs</run_depend>
<run_depend>sensor_msgs</run_depend>
<license>BSD</license>
<url>http://www.ros.org/wiki/camera_calibration</url>
<export>
<rosdoc config="rosdoc.yaml" />
<architecture_independent/>
</export>
</package>
- builder: sphinx
name: Python API
output_dir: python
sphinx_root_dir: doc
#!/usr/bin/env python
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['camera_calibration']
d['package_dir'] = {'':'src'}
setup(**d)
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cv2
import cv_bridge
import functools
import message_filters
import numpy
import rospy
import sensor_msgs.msg
import sensor_msgs.srv
import threading
from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo
from message_filters import ApproximateTimeSynchronizer
try:
from queue import Queue
except ImportError:
from Queue import Queue
def mean(seq):
return sum(seq) / len(seq)
def lmin(seq1, seq2):
""" Pairwise minimum of two sequences """
return [min(a, b) for (a, b) in zip(seq1, seq2)]
def lmax(seq1, seq2):
""" Pairwise maximum of two sequences """
return [max(a, b) for (a, b) in zip(seq1, seq2)]
class ConsumerThread(threading.Thread):
def __init__(self, queue, function):
threading.Thread.__init__(self)
self.queue = queue
self.function = function
def run(self):
while not rospy.is_shutdown():
while not rospy.is_shutdown():
m = self.queue.get()
if self.queue.empty():
break
self.function(m)
class CameraCheckerNode:
def __init__(self, chess_size, dim, approximate=0):
self.board = ChessboardInfo()
self.board.n_cols = chess_size[0]
self.board.n_rows = chess_size[1]
self.board.dim = dim
# make sure n_cols is not smaller than n_rows, otherwise error computation will be off
if self.board.n_cols < self.board.n_rows:
self.board.n_cols, self.board.n_rows = self.board.n_rows, self.board.n_cols
image_topic = rospy.resolve_name("monocular") + "/image_rect"
camera_topic = rospy.resolve_name("monocular") + "/camera_info"
tosync_mono = [
(image_topic, sensor_msgs.msg.Image),
(camera_topic, sensor_msgs.msg.CameraInfo),
]
if approximate <= 0:
sync = message_filters.TimeSynchronizer
else:
sync = functools.partial(ApproximateTimeSynchronizer, slop=approximate)
tsm = sync([message_filters.Subscriber(topic, type) for (topic, type) in tosync_mono], 10)
tsm.registerCallback(self.queue_monocular)
left_topic = rospy.resolve_name("stereo") + "/left/image_rect"
left_camera_topic = rospy.resolve_name("stereo") + "/left/camera_info"
right_topic = rospy.resolve_name("stereo") + "/right/image_rect"
right_camera_topic = rospy.resolve_name("stereo") + "/right/camera_info"
tosync_stereo = [
(left_topic, sensor_msgs.msg.Image),
(left_camera_topic, sensor_msgs.msg.CameraInfo),
(right_topic, sensor_msgs.msg.Image),
(right_camera_topic, sensor_msgs.msg.CameraInfo)
]
tss = sync([message_filters.Subscriber(topic, type) for (topic, type) in tosync_stereo], 10)
tss.registerCallback(self.queue_stereo)
self.br = cv_bridge.CvBridge()
self.q_mono = Queue()
self.q_stereo = Queue()
mth = ConsumerThread(self.q_mono, self.handle_monocular)
mth.setDaemon(True)
mth.start()
sth = ConsumerThread(self.q_stereo, self.handle_stereo)
sth.setDaemon(True)
sth.start()
self.mc = MonoCalibrator([self.board])
self.sc = StereoCalibrator([self.board])
def queue_monocular(self, msg, cmsg):
self.q_mono.put((msg, cmsg))
def queue_stereo(self, lmsg, lcmsg, rmsg, rcmsg):
self.q_stereo.put((lmsg, lcmsg, rmsg, rcmsg))
def mkgray(self, msg):
return self.mc.mkgray(msg)
def image_corners(self, im):
(ok, corners, ids, b) = self.mc.get_corners(im)
if ok:
return corners, ids
else:
return None
def handle_monocular(self, msg):
(image, camera) = msg
gray = self.mkgray(image)
C, ids = self.image_corners(gray)
if C is not None:
linearity_rms = self.mc.linear_error(C, ids, self.board)
# Add in reprojection check
image_points = C
object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
dist_coeffs = numpy.zeros((4, 1))
camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ],
[ camera.P[4], camera.P[5], camera.P[6] ],
[ camera.P[8], camera.P[9], camera.P[10] ] ] )
ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
# Convert rotation into a 3x3 Rotation Matrix
rot3x3, _ = cv2.Rodrigues(rot)
# Reproject model points into image
object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans)
reprojected_h = camera_matrix * object_points_world
reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :])
reprojection_errors = image_points.squeeze().T - reprojected
reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))
# Print the results
print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
else:
print('no chessboard')
def handle_stereo(self, msg):
(lmsg, lcmsg, rmsg, rcmsg) = msg
lgray = self.mkgray(lmsg)
rgray = self.mkgray(rmsg)
L, _ = self.image_corners(lgray)
R, _ = self.image_corners(rgray)
if L is not None and R is not None:
epipolar = self.sc.epipolar_error(L, R)
dimension = self.sc.chessboard_size(L, R, self.board, msg=(lcmsg, rcmsg))
print("epipolar error: %f pixels dimension: %f m" % (epipolar, dimension))
else:
print("no chessboard")
This diff is collapsed.
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import roslib
import rostest
import rospy
import unittest
import tarfile
import copy
import os, sys
from camera_calibration.calibrator import StereoCalibrator, ChessboardInfo, image_from_archive
# Large board used for PR2 calibration
board = ChessboardInfo()
board.n_cols = 7
board.n_rows = 6
board.dim = 0.108
class TestMultipleBoards(unittest.TestCase):
def test_multiple_boards(self):
small_board = ChessboardInfo()
small_board.n_cols = 5
small_board.n_rows = 4
small_board.dim = 0.025
stereo_cal = StereoCalibrator([board, small_board])
my_archive_name = roslib.packages.find_resource('camera_calibration', 'multi_board_calibration.tar.gz')[0]
stereo_cal.do_tarfile_calibration(my_archive_name)
stereo_cal.report()
stereo_cal.ost()
# Check error for big image
archive = tarfile.open(my_archive_name)
l1_big = image_from_archive(archive, "left-0000.png")
r1_big = image_from_archive(archive, "right-0000.png")
epi_big = stereo_cal.epipolar_error_from_images(l1_big, r1_big)
self.assert_(epi_big < 1.0, "Epipolar error for large checkerboard > 1.0. Error: %.2f" % epi_big)
# Small checkerboard has larger error threshold for now
l1_sm = image_from_archive(archive, "left-0012-sm.png")
r1_sm = image_from_archive(archive, "right-0012-sm.png")
epi_sm = stereo_cal.epipolar_error_from_images(l1_sm, r1_sm)
self.assert_(epi_sm < 2.0, "Epipolar error for small checkerboard > 2.0. Error: %.2f" % epi_sm)
if __name__ == '__main__':
if 1:
rostest.unitrun('camera_calibration', 'test_multi_board_cal', TestMultipleBoards)
else:
suite = unittest.TestSuite()
suite.addTest(TestMultipleBoards('test_multi_board_cal'))
unittest.TextTestRunner(verbosity=2).run(suite)
1.16.0 (2021-11-12)
-------------------
* Fix includes
In the following commit in vision_opencv, the include
opencv2/calib3d/calib3d.hpp was removed from pinhole_camera_model.h :
https://github.com/ros-perception/vision_opencv/commit/51ca54354a8353fc728fcc8bd8ead7d2b6cf7444
Since we indirectly depended on this include, we now have to add it
directly.
* support rgba8 and bgra8 encodings by skipping alpha channel
* functional xyzrgb radial nodelet
* add xyzrgb radial nodelet
* Support MONO16 image encodings.
* Add missing cstdint, vector, cmath includes.
* Contributors: Avinash Thakur, Evan Flynn, Martin Günther, Mike Purvis
1.15.3 (2020-12-11)
-------------------
* remove email blasts from steve macenski (`#595 <https://github.com/ros-perception/image_pipeline/issues/595>`_)
* Contributors: Steve Macenski
1.15.2 (2020-05-19)
-------------------
1.15.1 (2020-05-18)
-------------------
1.15.0 (2020-05-14)
-------------------
* Python 3 compatibility (`#530 <https://github.com/ros-perception/image_pipeline/issues/530>`_)
* cmake_minimum_required to 3.0.2
* Adapted to OpenCV4
* import setup from setuptools instead of distutils-core
* updated install locations for better portability. (`#500 <https://github.com/ros-perception/image_pipeline/issues/500>`_)
* Contributors: Joshua Whitley, Sean Yen
1.14.0 (2020-01-12)
-------------------
* Merge pull request `#478 <https://github.com/ros-perception/image_pipeline/issues/478>`_ from ros-perception/steve_main
added option to fill the sparse areas with neareast neighbor depth va…
* Merge pull request `#336 <https://github.com/ros-perception/image_pipeline/issues/336>`_ from madsherlock/indigo
depth_image_proc/point_cloud_xyzi_radial Add intensity conversion (copy) for float
* depth_image_proc: fix support for mono16 intensity encoding in point_cloud_xyzi node (`#352 <https://github.com/ros-perception/image_pipeline/issues/352>`_)
* added option to fill the sparse areas with neareast neighbor depth values on upsampling operations in depth_image_proc/register
* point_cloud_xyzi Add intensity conversion for float
* Add intensity conversion (copy) for float
This commit enables the generation of xyzi point clouds from 32-bit floating point intensity images.
The destination data type for intensity storage is 32-bit float, so all that is required is a data copy.
The change in this commit is simply an extension of the if-else statement to include the TYPE_32FC1 type and apply the usual convert_intensity() method.
* Contributors: Mikael Westermann, Richard Bormann, Steven Macenski, Stewart Jamieson, Tim Übelhör
1.13.0 (2019-06-12)
-------------------
* Merge pull request `#395 <https://github.com/ros-perception/image_pipeline/issues/395>`_ from ros-perception/steve_maintain
* adding autonomoustuff mainainer
* adding stevemacenski as maintainer to get emails
* Contributors: Joshua Whitley, Yoshito Okada, stevemacenski
1.12.23 (2018-05-10)
--------------------
1.12.22 (2017-12-08)
--------------------
1.12.21 (2017-11-05)
--------------------
* Fix C++11 compilation
This fixes `#292 <https://github.com/ros-perception/image_pipeline/issues/292>`_ and `#291 <https://github.com/ros-perception/image_pipeline/issues/291>`_
* Contributors: Vincent Rabaud
1.12.20 (2017-04-30)
--------------------
* Fix CMake warnings about Eigen.
* Convert depth image metric from [m] to [mm]
* address gcc6 build error
With gcc6, compiling fails with `stdlib.h: No such file or directory`,
as including '-isystem /usr/include' breaks with gcc6, cf.,
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129.
This commit addresses this issue for this package in the same way
it was addressed in various other ROS packages. A list of related
commits and pull requests is at:
https://github.com/ros/rosdistro/issues/12783
Signed-off-by: Lukas Bulwahn <lukas.bulwahn@oss.bmw-carit.de>
* Contributors: Kentaro Wada, Lukas Bulwahn, Vincent Rabaud
1.12.19 (2016-07-24)
--------------------
1.12.18 (2016-07-12)
--------------------
1.12.17 (2016-07-11)
--------------------
1.12.16 (2016-03-19)
--------------------
* check number of channels before the process
* search minimum value with OpenCV
* Use OpenCV to be faster
* Add a feature for a depth image to crop foremost image
* Contributors: Kenta Yonekura
1.12.15 (2016-01-17)
--------------------
* Add option for exact time sync for point_cloud_xyzrgb
* simplify OpenCV3 conversion
* Contributors: Kentaro Wada, Vincent Rabaud
1.12.14 (2015-07-22)
--------------------
1.12.13 (2015-04-06)
--------------------
* Add radial point cloud processors
* Contributors: Hunter Laux
1.12.12 (2014-12-31)
--------------------
* adds range_max
* exports depth_conversions
with convert for xyz PC only
* exports DepthTraits
* Contributors: enriquefernandez
1.12.11 (2014-10-26)
--------------------
1.12.10 (2014-09-28)
--------------------
1.12.9 (2014-09-21)
-------------------
* get code to compile with OpenCV3
fixes `#96 <https://github.com/ros-perception/image_pipeline/issues/96>`_
* Contributors: Vincent Rabaud
1.12.8 (2014-08-19)
-------------------
1.12.6 (2014-07-27)
-------------------
* Add point_cloud_xyzi nodelet
This is for cameras that output depth and intensity images.
It's based on the point_cloud_xyzrgb nodelet.
* Missing runtime dependency - eigen_conversions
`libdepth_image_proc` is missing this dependency at runtime
```
> ldd libdepth_image_proc.so | grep eigen
libeigen_conversions.so => not found
```
Which causes the following error on loading depth_image_proc:
```
[ INFO] [1402564815.530736554]: /camera/rgb/camera_info -> /camera/rgb/camera_info
[ERROR] [1402564815.727176562]: Failed to load nodelet [/camera/depth_metric_rect] of type
[depth_image_proc/convert_metric]: Failed to load library /opt/ros/indigo/lib//libdepth_image_proc.so.
Make sure that you are calling the PLUGINLIB_EXPORT_CLASS macro in the library code, and that
names are consistent between this macro and your XML. Error string: Could not load library (Poco
exception = libeigen_conversions.so: cannot open shared object file: No such file or directory)
[FATAL] [1402564815.727410623]: Service call failed!
```
* Contributors: Daniel Stonier, Hunter Laux
1.12.4 (2014-04-28)
-------------------
* depth_image_proc: fix missing symbols in nodelets
* Contributors: Michael Ferguson
1.12.3 (2014-04-12)
-------------------
1.12.2 (2014-04-08)
-------------------
1.12.1 (2014-04-06)
-------------------
* replace tf usage by tf2 usage
1.12.0 (2014-04-04)
-------------------
* remove PCL dependency
cmake_minimum_required(VERSION 3.0.2)
project(depth_image_proc)
find_package(catkin REQUIRED
cmake_modules
cv_bridge
eigen_conversions
image_geometry
image_transport
message_filters
nodelet
sensor_msgs
stereo_msgs
tf2
tf2_ros
)
catkin_package(
INCLUDE_DIRS include
CATKIN_DEPENDS image_geometry sensor_msgs
LIBRARIES ${PROJECT_NAME})
find_package(Boost REQUIRED)
find_package(Eigen3 QUIET)
if(NOT EIGEN3_FOUND)
find_package(Eigen REQUIRED)
set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS})
endif()
find_package(OpenCV REQUIRED)
include_directories(include ${BOOST_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS})
add_library(${PROJECT_NAME} src/nodelets/convert_metric.cpp
src/nodelets/crop_foremost.cpp
src/nodelets/disparity.cpp
src/nodelets/point_cloud_xyz.cpp
src/nodelets/point_cloud_xyzrgb.cpp
src/nodelets/point_cloud_xyzi.cpp
src/nodelets/point_cloud_xyz_radial.cpp
src/nodelets/point_cloud_xyzi_radial.cpp
src/nodelets/point_cloud_xyzrgb_radial.cpp
src/nodelets/register.cpp
)
target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
install(DIRECTORY include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h")
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
)
install(FILES nodelet_plugins.xml
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
)
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef DEPTH_IMAGE_PROC_DEPTH_CONVERSIONS
#define DEPTH_IMAGE_PROC_DEPTH_CONVERSIONS
#include <sensor_msgs/Image.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#include <image_geometry/pinhole_camera_model.h>
#include <depth_image_proc/depth_traits.h>
#include <limits>
namespace depth_image_proc {
typedef sensor_msgs::PointCloud2 PointCloud;
// Handles float or uint16 depths
template<typename T>
void convert(
const sensor_msgs::ImageConstPtr& depth_msg,
PointCloud::Ptr& cloud_msg,
const image_geometry::PinholeCameraModel& model,
double range_max = 0.0)
{
// Use correct principal point from calibration
float center_x = model.cx();
float center_y = model.cy();
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
double unit_scaling = DepthTraits<T>::toMeters( T(1) );
float constant_x = unit_scaling / model.fx();
float constant_y = unit_scaling / model.fy();
float bad_point = std::numeric_limits<float>::quiet_NaN();
sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
int row_step = depth_msg->step / sizeof(T);
for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
{
for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
{
T depth = depth_row[u];
// Missing points denoted by NaNs
if (!DepthTraits<T>::valid(depth))
{
if (range_max != 0.0)
{
depth = DepthTraits<T>::fromMeters(range_max);
}
else
{
*iter_x = *iter_y = *iter_z = bad_point;
continue;
}
}
// Fill in XYZ
*iter_x = (u - center_x) * depth * constant_x;
*iter_y = (v - center_y) * depth * constant_y;
*iter_z = DepthTraits<T>::toMeters(depth);
}
}
}
} // namespace depth_image_proc
#endif
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef DEPTH_IMAGE_PROC_DEPTH_TRAITS
#define DEPTH_IMAGE_PROC_DEPTH_TRAITS
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <vector>
namespace depth_image_proc {
// Encapsulate differences between processing float and uint16_t depths
template<typename T> struct DepthTraits {};
template<>
struct DepthTraits<uint16_t>
{
static inline bool valid(uint16_t depth) { return depth != 0; }
static inline float toMeters(uint16_t depth) { return depth * 0.001f; } // originally mm
static inline uint16_t fromMeters(float depth) { return (depth * 1000.0f) + 0.5f; }
static inline void initializeBuffer(std::vector<uint8_t>& buffer) {} // Do nothing - already zero-filled
};
template<>
struct DepthTraits<float>
{
static inline bool valid(float depth) { return std::isfinite(depth); }
static inline float toMeters(float depth) { return depth; }
static inline float fromMeters(float depth) { return depth; }
static inline void initializeBuffer(std::vector<uint8_t>& buffer)
{
float* start = reinterpret_cast<float*>(&buffer[0]);
float* end = reinterpret_cast<float*>(&buffer[0] + buffer.size());
std::fill(start, end, std::numeric_limits<float>::quiet_NaN());
}
};
} // namespace depth_image_proc
#endif
<!-- -->
<!-- Convert the depth image and the rgb image into a xyzrgb Point Cloud -->
<launch>
<node name="update_frame_id" pkg="depth_image_proc" type="update_frame_id.py" output="screen" />
<!-- Nodelet manager for this pipeline -->
<node pkg="nodelet" type="nodelet" args="manager" name="standalone_nodelet" output="screen"/>
<!-- Convert to point cloud -->
<node pkg="nodelet" type="nodelet" name="depth_image_proc" args="load depth_image_proc/point_cloud_xyzrgb standalone_nodelet --no-bond">
<!--remap from your specific sensors-->
<!-- Input: Camera calibration and metadata. (sensor_msgs/CameraInfo) -->
<!--remap from="rgb/camera_info" to="/carla/ego_vehicle/rgb_front/camera_info"/-->
<!-- Input: Rectified color image. (sensor_msgs/Image) -->
<!--remap from="rgb/image_rect_color" to="/carla/ego_vehicle/rgb_front/image"/-->
<!-- Input: Rectified depth image, registered to the RGB camera. (sensor_msgs/Image) -->
<remap from="depth_registered/image_rect" to="/carla/ego_vehicle/depth_front/image"/>
<!-- Output: XYZ point cloud. If using PCL, subscribe as PointCloud<PointXYZ>. (sensor_msgs/PointCloud2) -->
<remap from="depth_registered/points" to="/point_cloud/points"/>
</node>
</launch>
<!-- -->
/**
\mainpage
\htmlinclude manifest.html
\b depth_image_proc is ...
<!--
Provide an overview of your package.
-->
\section codeapi Code API
<!--
Provide links to specific auto-generated API documentation within your
package that is of particular interest to a reader. Doxygen will
document pretty much every part of your code, so do your best here to
point the reader to the actual API.
If your codebase is fairly large or has different sets of APIs, you
should use the doxygen 'group' tag to keep these APIs together. For
example, the roscpp documentation has 'libros' group.
-->
*/
<library path="lib/libdepth_image_proc">
<class name="depth_image_proc/convert_metric"
type="depth_image_proc::ConvertMetricNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to convert raw uint16 depth image in mm to float depth image in m.
</description>
</class>
<class name="depth_image_proc/disparity"
type="depth_image_proc::DisparityNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to convert depth image to disparity image.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyz"
type="depth_image_proc::PointCloudXyzNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to convert depth image to XYZ point cloud.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyzrgb"
type="depth_image_proc::PointCloudXyzrgbNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to combine registered depth image and RGB image into XYZRGB point cloud.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyzi"
type="depth_image_proc::PointCloudXyziNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to combine registered depth image and intensity image into XYZI point cloud.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyz_radial"
type="depth_image_proc::PointCloudXyzRadialNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to convert an Radial depth map to a point.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyzi_radial"
type="depth_image_proc::PointCloudXyziRadialNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to convert an Radial depth and intensity map to a point.
</description>
</class>
<class name="depth_image_proc/point_cloud_xyzrgb_radial"
type="depth_image_proc::PointCloudXyzRgbRadialNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to combine registered Radial depth image and RGB image into XYZRGB point cloud.
</description>
</class>
<class name="depth_image_proc/register"
type="depth_image_proc::RegisterNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to create a depth image registered to another camera frame.
</description>
</class>
<class name="depth_image_proc/crop_foremost"
type="depth_image_proc::CropForemostNodelet"
base_class_type="nodelet::Nodelet">
<description>
Nodelet to crop a depth image from foremost depth to a specific range.
</description>
</class>
</library>
<package>
<name>depth_image_proc</name>
<version>1.16.0</version>
<description>
Contains nodelets for processing depth images such as those
produced by OpenNI camera. Functions include creating disparity
images and point clouds, as well as registering (reprojecting)
a depth image into another camera frame.
</description>
<author>Patrick Mihelich</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<maintainer email="software@autonomoustuff.com">Autonomoustuff team</maintainer>
<license>BSD</license>
<url>http://ros.org/wiki/depth_image_proc</url>
<export>
<nodelet plugin="${prefix}/nodelet_plugins.xml" />
</export>
<buildtool_depend>catkin</buildtool_depend>
<test_depend>rostest</test_depend>
<build_depend>boost</build_depend>
<build_depend>cmake_modules</build_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>eigen_conversions</build_depend>
<build_depend>image_geometry</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>message_filters</build_depend>
<build_depend>nodelet</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>stereo_msgs</build_depend>
<build_depend>tf2</build_depend>
<build_depend>tf2_ros</build_depend>
<run_depend>boost</run_depend>
<run_depend>cv_bridge</run_depend>
<run_depend>eigen_conversions</run_depend>
<run_depend>image_geometry</run_depend>
<run_depend>image_transport</run_depend>
<run_depend>nodelet</run_depend>
<run_depend>tf2</run_depend>
<run_depend>tf2_ros</run_depend>
<run_depend>sensor_msgs</run_depend>
</package>
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
rospy.init_node("update_frame_id")
#Updating frame id for the error depth_front frame id does not match rgb_front frame id
class update_frame_id:
def __init__(self):
self.image = Image()
#subscribe to your specific sensors
self.sub_raw = rospy.Subscriber("/carla/ego_vehicle/rgb_front/image", Image, self.callback_raw)
self.sub_info = rospy.Subscriber("/carla/ego_vehicle/rgb_front/camera_info", CameraInfo, self.callback_info)
self.pub_raw = rospy.Publisher("/rgb/image_rect_color", Image, queue_size = 1)
self.pub_info = rospy.Publisher("/rgb/camera_info", CameraInfo, queue_size = 1)
def callback_raw(self, message):
message.header.frame_id = "ego_vehicle/depth_front"
self.pub_raw.publish(message)
def callback_info(self, message):
message.header.frame_id = "ego_vehicle/depth_front"
self.pub_info.publish(message)
update_frame_id = update_frame_id()
rospy.spin()
print("\nNode shutdown\n")
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <ros/ros.h>
#include <nodelet/nodelet.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/image_encodings.h>
#include <boost/thread.hpp>
namespace depth_image_proc {
namespace enc = sensor_msgs::image_encodings;
class ConvertMetricNodelet : public nodelet::Nodelet
{
// Subscriptions
boost::shared_ptr<image_transport::ImageTransport> it_;
image_transport::Subscriber sub_raw_;
// Publications
boost::mutex connect_mutex_;
image_transport::Publisher pub_depth_;
virtual void onInit();
void connectCb();
void depthCb(const sensor_msgs::ImageConstPtr& raw_msg);
};
void ConvertMetricNodelet::onInit()
{
ros::NodeHandle& nh = getNodeHandle();
it_.reset(new image_transport::ImageTransport(nh));
// Monitor whether anyone is subscribed to the output
image_transport::SubscriberStatusCallback connect_cb = boost::bind(&ConvertMetricNodelet::connectCb, this);
// Make sure we don't enter connectCb() between advertising and assigning to pub_depth_
boost::lock_guard<boost::mutex> lock(connect_mutex_);
pub_depth_ = it_->advertise("image", 1, connect_cb, connect_cb);
}
// Handles (un)subscribing when clients (un)subscribe
void ConvertMetricNodelet::connectCb()
{
boost::lock_guard<boost::mutex> lock(connect_mutex_);
if (pub_depth_.getNumSubscribers() == 0)
{
sub_raw_.shutdown();
}
else if (!sub_raw_)
{
image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
sub_raw_ = it_->subscribe("image_raw", 1, &ConvertMetricNodelet::depthCb, this, hints);
}
}
void ConvertMetricNodelet::depthCb(const sensor_msgs::ImageConstPtr& raw_msg)
{
// Allocate new Image message
sensor_msgs::ImagePtr depth_msg( new sensor_msgs::Image );
depth_msg->header = raw_msg->header;
depth_msg->height = raw_msg->height;
depth_msg->width = raw_msg->width;
// Set data, encoding and step after converting the metric.
if (raw_msg->encoding == enc::TYPE_16UC1)
{
depth_msg->encoding = enc::TYPE_32FC1;
depth_msg->step = raw_msg->width * (enc::bitDepth(depth_msg->encoding) / 8);
depth_msg->data.resize(depth_msg->height * depth_msg->step);
// Fill in the depth image data, converting mm to m
float bad_point = std::numeric_limits<float>::quiet_NaN ();
const uint16_t* raw_data = reinterpret_cast<const uint16_t*>(&raw_msg->data[0]);
float* depth_data = reinterpret_cast<float*>(&depth_msg->data[0]);
for (unsigned index = 0; index < depth_msg->height * depth_msg->width; ++index)
{
uint16_t raw = raw_data[index];
depth_data[index] = (raw == 0) ? bad_point : (float)raw * 0.001f;
}
}
else if (raw_msg->encoding == enc::TYPE_32FC1)
{
depth_msg->encoding = enc::TYPE_16UC1;
depth_msg->step = raw_msg->width * (enc::bitDepth(depth_msg->encoding) / 8);
depth_msg->data.resize(depth_msg->height * depth_msg->step);
// Fill in the depth image data, converting m to mm
uint16_t bad_point = 0;
const float* raw_data = reinterpret_cast<const float*>(&raw_msg->data[0]);
uint16_t* depth_data = reinterpret_cast<uint16_t*>(&depth_msg->data[0]);
for (unsigned index = 0; index < depth_msg->height * depth_msg->width; ++index)
{
float raw = raw_data[index];
depth_data[index] = std::isnan(raw) ? bad_point : (uint16_t)(raw * 1000);
}
}
else
{
ROS_ERROR("Unsupported image conversion from %s.", raw_msg->encoding.c_str());
return;
}
pub_depth_.publish(depth_msg);
}
} // namespace depth_image_proc
// Register as nodelet
#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(depth_image_proc::ConvertMetricNodelet,nodelet::Nodelet);
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
//#include <ros/ros.h>
#include <nodelet/nodelet.h>
#include <image_transport/image_transport.h>
#include <boost/thread.hpp>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/imgproc/imgproc.hpp>
namespace depth_image_proc {
namespace enc = sensor_msgs::image_encodings;
class CropForemostNodelet : public nodelet::Nodelet
{
// Subscriptions
boost::shared_ptr<image_transport::ImageTransport> it_;
image_transport::Subscriber sub_raw_;
// Publications
boost::mutex connect_mutex_;
image_transport::Publisher pub_depth_;
virtual void onInit();
void connectCb();
void depthCb(const sensor_msgs::ImageConstPtr& raw_msg);
double distance_;
};
void CropForemostNodelet::onInit()
{
ros::NodeHandle& nh = getNodeHandle();
ros::NodeHandle& private_nh = getPrivateNodeHandle();
private_nh.getParam("distance", distance_);
it_.reset(new image_transport::ImageTransport(nh));
// Monitor whether anyone is subscribed to the output
image_transport::SubscriberStatusCallback connect_cb = boost::bind(&CropForemostNodelet::connectCb, this);
// Make sure we don't enter connectCb() between advertising and assigning to pub_depth_
boost::lock_guard<boost::mutex> lock(connect_mutex_);
pub_depth_ = it_->advertise("image", 1, connect_cb, connect_cb);
}
// Handles (un)subscribing when clients (un)subscribe
void CropForemostNodelet::connectCb()
{
boost::lock_guard<boost::mutex> lock(connect_mutex_);
if (pub_depth_.getNumSubscribers() == 0)
{
sub_raw_.shutdown();
}
else if (!sub_raw_)
{
image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
sub_raw_ = it_->subscribe("image_raw", 1, &CropForemostNodelet::depthCb, this, hints);
}
}
void CropForemostNodelet::depthCb(const sensor_msgs::ImageConstPtr& raw_msg)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(raw_msg);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Check the number of channels
if(sensor_msgs::image_encodings::numChannels(raw_msg->encoding) != 1){
NODELET_ERROR_THROTTLE(2, "Only grayscale image is acceptable, got [%s]", raw_msg->encoding.c_str());
return;
}
// search the min value without invalid value "0"
double minVal;
cv::minMaxIdx(cv_ptr->image, &minVal, 0, 0, 0, cv_ptr->image != 0);
int imtype = cv_bridge::getCvType(raw_msg->encoding);
switch (imtype){
case CV_8UC1:
case CV_8SC1:
case CV_32F:
cv::threshold(cv_ptr->image, cv_ptr->image, minVal + distance_, 0, CV_THRESH_TOZERO_INV);
break;
case CV_16UC1:
case CV_16SC1:
case CV_32SC1:
case CV_64F:
// 8 bit or 32 bit floating array is required to use cv::threshold
cv_ptr->image.convertTo(cv_ptr->image, CV_32F);
cv::threshold(cv_ptr->image, cv_ptr->image, minVal + distance_, 1, CV_THRESH_TOZERO_INV);
cv_ptr->image.convertTo(cv_ptr->image, imtype);
break;
}
pub_depth_.publish(cv_ptr->toImageMsg());
}
} // namespace depth_image_proc
// Register as nodelet
#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(depth_image_proc::CropForemostNodelet,nodelet::Nodelet);
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <boost/version.hpp>
#if ((BOOST_VERSION / 100) % 1000) >= 53
#include <boost/thread/lock_guard.hpp>
#endif
#include <ros/ros.h>
#include <nodelet/nodelet.h>
#include <image_transport/image_transport.h>
#include <image_transport/subscriber_filter.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#include <sensor_msgs/image_encodings.h>
#include <stereo_msgs/DisparityImage.h>
#include <depth_image_proc/depth_traits.h>
namespace depth_image_proc {
namespace enc = sensor_msgs::image_encodings;
class DisparityNodelet : public nodelet::Nodelet
{
boost::shared_ptr<image_transport::ImageTransport> left_it_;
ros::NodeHandlePtr right_nh_;
image_transport::SubscriberFilter sub_depth_image_;
message_filters::Subscriber<sensor_msgs::CameraInfo> sub_info_;
typedef message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::CameraInfo> Sync;
boost::shared_ptr<Sync> sync_;
boost::mutex connect_mutex_;
ros::Publisher pub_disparity_;
double min_range_;
double max_range_;
double delta_d_;
virtual void onInit();
void connectCb();
void depthCb(const sensor_msgs::ImageConstPtr& depth_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg);
template<typename T>
void convert(const sensor_msgs::ImageConstPtr& depth_msg,
stereo_msgs::DisparityImagePtr& disp_msg);
};
void DisparityNodelet::onInit()
{
ros::NodeHandle &nh = getNodeHandle();
ros::NodeHandle &private_nh = getPrivateNodeHandle();
ros::NodeHandle left_nh(nh, "left");
left_it_.reset(new image_transport::ImageTransport(left_nh));
right_nh_.reset( new ros::NodeHandle(nh, "right") );
// Read parameters
int queue_size;
private_nh.param("queue_size", queue_size, 5);
private_nh.param("min_range", min_range_, 0.0);
private_nh.param("max_range", max_range_, std::numeric_limits<double>::infinity());
private_nh.param("delta_d", delta_d_, 0.125);
// Synchronize inputs. Topic subscriptions happen on demand in the connection callback.
sync_.reset( new Sync(sub_depth_image_, sub_info_, queue_size) );
sync_->registerCallback(boost::bind(&DisparityNodelet::depthCb, this, boost::placeholders::_1, boost::placeholders::_2));
// Monitor whether anyone is subscribed to the output
ros::SubscriberStatusCallback connect_cb = boost::bind(&DisparityNodelet::connectCb, this);
// Make sure we don't enter connectCb() between advertising and assigning to pub_disparity_
boost::lock_guard<boost::mutex> lock(connect_mutex_);
pub_disparity_ = left_nh.advertise<stereo_msgs::DisparityImage>("disparity", 1, connect_cb, connect_cb);
}
// Handles (un)subscribing when clients (un)subscribe
void DisparityNodelet::connectCb()
{
boost::lock_guard<boost::mutex> lock(connect_mutex_);
if (pub_disparity_.getNumSubscribers() == 0)
{
sub_depth_image_.unsubscribe();
sub_info_ .unsubscribe();
}
else if (!sub_depth_image_.getSubscriber())
{
image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
sub_depth_image_.subscribe(*left_it_, "image_rect", 1, hints);
sub_info_.subscribe(*right_nh_, "camera_info", 1);
}
}
void DisparityNodelet::depthCb(const sensor_msgs::ImageConstPtr& depth_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg)
{
// Allocate new DisparityImage message
stereo_msgs::DisparityImagePtr disp_msg( new stereo_msgs::DisparityImage );
disp_msg->header = depth_msg->header;
disp_msg->image.header = disp_msg->header;
disp_msg->image.encoding = enc::TYPE_32FC1;
disp_msg->image.height = depth_msg->height;
disp_msg->image.width = depth_msg->width;
disp_msg->image.step = disp_msg->image.width * sizeof (float);
disp_msg->image.data.resize( disp_msg->image.height * disp_msg->image.step, 0.0f );
double fx = info_msg->P[0];
disp_msg->T = -info_msg->P[3] / fx;
disp_msg->f = fx;
// Remaining fields depend on device characteristics, so rely on user input
disp_msg->min_disparity = disp_msg->f * disp_msg->T / max_range_;
disp_msg->max_disparity = disp_msg->f * disp_msg->T / min_range_;
disp_msg->delta_d = delta_d_;
if (depth_msg->encoding == enc::TYPE_16UC1)
{
convert<uint16_t>(depth_msg, disp_msg);
}
else if (depth_msg->encoding == enc::TYPE_32FC1)
{
convert<float>(depth_msg, disp_msg);
}
else
{
NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str());
return;
}
pub_disparity_.publish(disp_msg);
}
template<typename T>
void DisparityNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg,
stereo_msgs::DisparityImagePtr& disp_msg)
{
// For each depth Z, disparity d = fT / Z
float unit_scaling = DepthTraits<T>::toMeters( T(1) );
float constant = disp_msg->f * disp_msg->T / unit_scaling;
const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
int row_step = depth_msg->step / sizeof(T);
float* disp_data = reinterpret_cast<float*>(&disp_msg->image.data[0]);
for (int v = 0; v < (int)depth_msg->height; ++v)
{
for (int u = 0; u < (int)depth_msg->width; ++u)
{
T depth = depth_row[u];
if (DepthTraits<T>::valid(depth))
*disp_data = constant / depth;
++disp_data;
}
depth_row += row_step;
}
}
} // namespace depth_image_proc
// Register as nodelet
#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(depth_image_proc::DisparityNodelet,nodelet::Nodelet);
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <ros/ros.h>
#include <nodelet/nodelet.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/image_encodings.h>
#include <image_geometry/pinhole_camera_model.h>
#include <boost/thread.hpp>
#include <depth_image_proc/depth_conversions.h>
#include <sensor_msgs/point_cloud2_iterator.h>
namespace depth_image_proc {
namespace enc = sensor_msgs::image_encodings;
class PointCloudXyzNodelet : public nodelet::Nodelet
{
// Subscriptions
boost::shared_ptr<image_transport::ImageTransport> it_;
image_transport::CameraSubscriber sub_depth_;
int queue_size_;
// Publications
boost::mutex connect_mutex_;
typedef sensor_msgs::PointCloud2 PointCloud;
ros::Publisher pub_point_cloud_;
image_geometry::PinholeCameraModel model_;
virtual void onInit();
void connectCb();
void depthCb(const sensor_msgs::ImageConstPtr& depth_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg);
};
void PointCloudXyzNodelet::onInit()
{
ros::NodeHandle& nh = getNodeHandle();
ros::NodeHandle& private_nh = getPrivateNodeHandle();
it_.reset(new image_transport::ImageTransport(nh));
// Read parameters
private_nh.param("queue_size", queue_size_, 5);
// Monitor whether anyone is subscribed to the output
ros::SubscriberStatusCallback connect_cb = boost::bind(&PointCloudXyzNodelet::connectCb, this);
// Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_
boost::lock_guard<boost::mutex> lock(connect_mutex_);
pub_point_cloud_ = nh.advertise<PointCloud>("points", 1, connect_cb, connect_cb);
}
// Handles (un)subscribing when clients (un)subscribe
void PointCloudXyzNodelet::connectCb()
{
boost::lock_guard<boost::mutex> lock(connect_mutex_);
if (pub_point_cloud_.getNumSubscribers() == 0)
{
sub_depth_.shutdown();
}
else if (!sub_depth_)
{
image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
sub_depth_ = it_->subscribeCamera("image_rect", queue_size_, &PointCloudXyzNodelet::depthCb, this, hints);
}
}
void PointCloudXyzNodelet::depthCb(const sensor_msgs::ImageConstPtr& depth_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg)
{
PointCloud::Ptr cloud_msg(new PointCloud);
cloud_msg->header = depth_msg->header;
cloud_msg->height = depth_msg->height;
cloud_msg->width = depth_msg->width;
cloud_msg->is_dense = false;
cloud_msg->is_bigendian = false;
sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg);
pcd_modifier.setPointCloud2FieldsByString(1, "xyz");
// Update camera model
model_.fromCameraInfo(info_msg);
if (depth_msg->encoding == enc::TYPE_16UC1 || depth_msg->encoding == enc::MONO16)
{
convert<uint16_t>(depth_msg, cloud_msg, model_);
}
else if (depth_msg->encoding == enc::TYPE_32FC1)
{
convert<float>(depth_msg, cloud_msg, model_);
}
else
{
NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str());
return;
}
pub_point_cloud_.publish (cloud_msg);
}
} // namespace depth_image_proc
// Register as nodelet
#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyzNodelet,nodelet::Nodelet);
This diff is collapsed.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package image_pipeline
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.16.0 (2021-11-12)
-------------------
1.15.3 (2020-12-11)
-------------------
* remove email blasts from steve macenski (`#595 <https://github.com/ros-perception/image_pipeline/issues/595>`_)
* Contributors: Steve Macenski
1.15.2 (2020-05-19)
-------------------
1.15.1 (2020-05-18)
-------------------
1.15.0 (2020-05-14)
-------------------
* Python 3 compatibility (`#530 <https://github.com/ros-perception/image_pipeline/issues/530>`_)
* cmake_minimum_required to 3.0.2
* Adapted to OpenCV4
* import setup from setuptools instead of distutils-core
* Contributors: Joshua Whitley
1.14.0 (2020-01-12)
-------------------
1.13.0 (2019-06-12)
-------------------
* Merge pull request `#395 <https://github.com/ros-perception/image_pipeline/issues/395>`_ from ros-perception/steve_maintain
* adding autonomoustuff mainainer
* adding stevemacenski as maintainer to get emails
* Contributors: Joshua Whitley, Yoshito Okada, stevemacenski
1.12.23 (2018-05-10)
--------------------
1.12.22 (2017-12-08)
--------------------
1.12.21 (2017-11-05)
--------------------
1.12.20 (2017-04-30)
--------------------
* Update package.xml (`#263 <https://github.com/ros-perception/image_pipeline/issues/263>`_)
* Contributors: Kei Okada
1.12.19 (2016-07-24)
--------------------
1.12.18 (2016-07-12)
--------------------
1.12.17 (2016-07-11)
--------------------
1.12.16 (2016-03-19)
--------------------
1.12.15 (2016-01-17)
--------------------
1.12.14 (2015-07-22)
--------------------
1.12.13 (2015-04-06)
--------------------
1.12.12 (2014-12-31)
--------------------
1.12.11 (2014-10-26)
--------------------
1.12.10 (2014-09-28)
--------------------
1.12.9 (2014-09-21)
-------------------
1.12.8 (2014-08-19)
-------------------
1.12.6 (2014-07-27)
-------------------
1.12.4 (2014-04-28)
-------------------
1.12.3 (2014-04-12)
-------------------
1.12.2 (2014-04-08)
-------------------
1.11.7 (2014-03-28)
-------------------
cmake_minimum_required(VERSION 3.0.2)
project(image_pipeline)
find_package(catkin REQUIRED)
catkin_metapackage()
<package>
<name>image_pipeline</name>
<version>1.16.0</version>
<description>image_pipeline fills the gap between getting raw images from a camera driver and higher-level vision processing.</description>
<author>Patrick Mihelich</author>
<author>James Bowman</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<maintainer email="software@autonomoustuff.com">Autonomoustuff team</maintainer>
<license>BSD</license>
<url type="website">http://www.ros.org/wiki/image_pipeline</url>
<url type="bugtracker">https://github.com/ros-perception/image_pipeline/issues</url>
<url type="repository">https://github.com/ros-perception/image_pipeline</url>
<buildtool_depend>catkin</buildtool_depend>
<!-- Old stack contents -->
<run_depend>camera_calibration</run_depend>
<run_depend>depth_image_proc</run_depend>
<run_depend>image_proc</run_depend>
<run_depend>image_publisher</run_depend>
<run_depend>image_rotate</run_depend>
<run_depend>image_view</run_depend>
<run_depend>stereo_image_proc</run_depend>
<export>
<metapackage/>
</export>
</package>
This diff is collapsed.
cmake_minimum_required(VERSION 3.0.2)
project(image_proc)
find_package(catkin REQUIRED)
find_package(catkin REQUIRED cv_bridge dynamic_reconfigure image_geometry image_transport nodelet nodelet_topic_tools roscpp sensor_msgs)
find_package(OpenCV REQUIRED)
find_package(Boost REQUIRED COMPONENTS thread)
# Dynamic reconfigure support
generate_dynamic_reconfigure_options(cfg/CropDecimate.cfg cfg/Debayer.cfg cfg/Rectify.cfg cfg/Resize.cfg)
catkin_package(
CATKIN_DEPENDS image_geometry roscpp sensor_msgs
DEPENDS OpenCV
INCLUDE_DIRS include
LIBRARIES ${PROJECT_NAME}
)
include_directories(include ${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
# Temporary fix for DataType deprecation in OpenCV 3.3.1. We continue to use the deprecated form for now because
# the new one is not available in OpenCV 2.4 (on Trusty).
add_definitions(-DOPENCV_TRAITS_ENABLE_DEPRECATED)
# Nodelet library
add_library(${PROJECT_NAME} src/libimage_proc/processor.cpp
src/nodelets/debayer.cpp
src/nodelets/rectify.cpp
src/nodelets/resize.cpp
src/nodelets/crop_decimate.cpp
src/libimage_proc/advertisement_checker.cpp
src/nodelets/edge_aware.cpp
src/nodelets/crop_non_zero.cpp
)
add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg)
target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${Boost_LIBRARIES} ${OpenCV_LIBRARIES})
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
)
install(DIRECTORY include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
)
install(FILES nodelet_plugins.xml
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
)
# Standalone node
add_executable(image_proc_exe src/nodes/image_proc.cpp)
target_link_libraries(image_proc_exe ${PROJECT_NAME} ${Boost_LIBRARIES} ${OpenCV_LIBRARIES})
SET_TARGET_PROPERTIES(image_proc_exe PROPERTIES OUTPUT_NAME image_proc)
install(TARGETS image_proc_exe
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
# install the launch file
install(DIRECTORY launch
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/
)
if(CATKIN_ENABLE_TESTING)
add_subdirectory(test)
endif()
This diff is collapsed.
This diff is collapsed.
#! /usr/bin/env python
PACKAGE='image_proc'
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
interpolate_enum = gen.enum([ gen.const("NN", int_t, 0, "Nearest neighbor"),
gen.const("Linear", int_t, 1, "Linear"),
gen.const("Cubic", int_t, 2, "Cubic"),
gen.const("Lanczos4", int_t, 4, "Lanczos4")],
"interpolation type")
gen.add("interpolation", int_t, 0,
"Interpolation algorithm between source image pixels",
1, 0, 4, edit_method = interpolate_enum)
# First string value is node name, used only for generating documentation
# Second string value ("Rectify") is name of class and generated
# .h file, with "Config" added, so class RectifyConfig
exit(gen.generate(PACKAGE, "image_proc", "Rectify"))
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment