Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
R
ROS-Kubernetes
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Larkin Heintzman
ROS-Kubernetes
Commits
0fe0fa2e
Commit
0fe0fa2e
authored
Aug 30, 2022
by
Larkin Heintzman
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
general updates
parent
aa82576b
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
832 additions
and
10 deletions
+832
-10
Makefile
.../yolo/yolov4-for-darknet_ros/darknet_ros/darknet/Makefile
+1
-1
yolo-visdrone.yaml
...net_ros/darknet_ros/darknet_ros/config/yolo-visdrone.yaml
+20
-0
yolo_visdrone.launch
...t_ros/darknet_ros/darknet_ros/launch/yolo_visdrone.launch
+18
-0
darknetImageProc.py
...t_ros/darknet_ros/darknet_ros/scripts/darknetImageProc.py
+9
-9
yolov3-visdrone.cfg
...s/darknet_ros/yolo_network_config/cfg/yolov3-visdrone.cfg
+784
-0
yolov3-visdrone_firstPass.weights
..._network_config/weights/yolov3-visdrone_firstPass.weights
+0
-0
No files found.
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet/Makefile
View file @
0fe0fa2e
...
@@ -14,7 +14,7 @@ ZED_CAMERA_v2_8=0
...
@@ -14,7 +14,7 @@ ZED_CAMERA_v2_8=0
# set ZED_CAMERA=1 to enable ZED SDK 3.0 and above
# set ZED_CAMERA=1 to enable ZED SDK 3.0 and above
# set ZED_CAMERA_v2_8=1 to enable ZED SDK 2.X
# set ZED_CAMERA_v2_8=1 to enable ZED SDK 2.X
USE_CPP
=
1
USE_CPP
=
0
DEBUG
=
0
DEBUG
=
0
ARCH
=
-gencode
arch
=
compute_30,code
=
sm_30
\
ARCH
=
-gencode
arch
=
compute_30,code
=
sm_30
\
...
...
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet_ros/config/yolo-visdrone.yaml
0 → 100755
View file @
0fe0fa2e
yolo_model
:
config_file
:
name
:
yolov3-visdrone.cfg
weight_file
:
name
:
yolov3-visdrone_firstPass.weights
threshold
:
value
:
0.3
detection_classes
:
names
:
-
pedestrian
-
people
-
bicycle
-
car
-
van
-
truck
-
tricycle
-
awning-tricycle
-
bus
-
motor
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet_ros/launch/yolo_visdrone.launch
0 → 100755
View file @
0fe0fa2e
<?xml version="1.0" encoding="utf-8"?>
<launch>
<!-- Use YOLOv4-tiny -->
<arg
name=
"network_param_file"
default=
"$(find darknet_ros)/config/yolo-visdrone.yaml"
/>
<arg
name=
"image"
default=
"webcam/image_raw"
/>
<!-- Start image processing node -->
<node
name=
"darknetImageProcessor"
type=
"darknetImageProc.py"
pkg=
"darknet_ros"
/>
<!-- Include main launch file -->
<include
file=
"$(find darknet_ros)/launch/darknet_ros.launch"
>
<arg
name=
"network_param_file"
value=
"$(arg network_param_file)"
/>
<arg
name=
"image"
value=
"$(arg image)"
/>
</include>
</launch>
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet_ros/scripts/darknetImageProc.py
View file @
0fe0fa2e
...
@@ -18,18 +18,16 @@ class ImageProcessor():
...
@@ -18,18 +18,16 @@ class ImageProcessor():
self
.
image
=
None
self
.
image
=
None
self
.
seq
=
None
self
.
seq
=
None
self
.
newImageFlag
=
False
self
.
newImageFlag
=
False
self
.
firstCallback
=
True
self
.
frameTimer
=
rospy
.
Time
.
now
()
self
.
totalTime
=
0.0
# stuff that needs to be parameters
# stuff that needs to be parameters
self
.
objectsOfInterest
=
[
"pe
rson"
,
"backpack"
,
"car
"
]
# things we report
self
.
objectsOfInterest
=
[
"pe
ople"
,
"pedestrian"
,
"car"
,
"truck"
,
"bicycle
"
]
# things we report
self
.
reportConfidence
=
0.
8
5
# confidence required for report
self
.
reportConfidence
=
0.5
# confidence required for report
self
.
imageSaveDir
=
"/tmp/darknetMemories/"
self
.
imageSaveDir
=
"/tmp/darknetMemories/"
# place to put statistics
# place to put statistics
self
.
statsSavePath
=
"/tmp/darknetMemories/stats.json"
self
.
statsSavePath
=
"/tmp/darknetMemories/stats.json"
self
.
statsDict
=
{
"savedImages"
:
0
,
"totalImages"
:
0
,
"averageFramerate"
:
0.0
,
"minimumConfidence"
:
self
.
reportConfidence
}
self
.
statsDict
=
{
"savedImages"
:
0
,
"totalImages"
:
0
,
"averageFramerate"
:
0.0
,
"minimumConfidence"
:
self
.
reportConfidence
,
"totalTime"
:
0.0
}
rospy
.
on_shutdown
(
self
.
onShutdown
)
rospy
.
on_shutdown
(
self
.
onShutdown
)
def
onShutdown
(
self
):
def
onShutdown
(
self
):
...
@@ -61,9 +59,11 @@ class ImageProcessor():
...
@@ -61,9 +59,11 @@ class ImageProcessor():
def
imageCallback
(
self
,
frame
):
def
imageCallback
(
self
,
frame
):
self
.
newImageFlag
=
True
self
.
newImageFlag
=
True
self
.
statsDict
[
"totalImages"
]
=
self
.
statsDict
[
"totalImages"
]
+
1
self
.
statsDict
[
"totalImages"
]
=
self
.
statsDict
[
"totalImages"
]
+
1
self
.
frameTimer
=
rospy
.
Time
.
now
()
-
self
.
frameTimer
if
not
self
.
firstCallback
:
self
.
totalTime
+=
self
.
frameTimer
.
to_sec
()
self
.
statsDict
[
"totalTime"
]
+=
rospy
.
get_time
()
-
self
.
callbackTime
self
.
statsDict
[
"averageFramerate"
]
=
self
.
statsDict
[
"totalImages"
]
/
self
.
totalTime
self
.
statsDict
[
"averageFramerate"
]
=
self
.
statsDict
[
"totalImages"
]
/
self
.
statsDict
[
"totalTime"
]
self
.
firstCallback
=
False
self
.
callbackTime
=
rospy
.
get_time
()
# load image from topic for later use
# load image from topic for later use
self
.
image
=
self
.
bridge
.
imgmsg_to_cv2
(
frame
,
"bgr8"
)
self
.
image
=
self
.
bridge
.
imgmsg_to_cv2
(
frame
,
"bgr8"
)
self
.
seq
=
frame
.
header
.
seq
self
.
seq
=
frame
.
header
.
seq
...
...
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet_ros/yolo_network_config/cfg/yolov3-visdrone.cfg
0 → 100644
View file @
0fe0fa2e
[net]
# Testing
# batch=1
# subdivisions=1
# Training
batch=64
subdivisions=16
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=100
max_batches = 501000
policy=steps
steps=40000,45000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=45
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1
docker/yolo/yolov4-for-darknet_ros/darknet_ros/darknet_ros/yolo_network_config/weights/yolov3-visdrone_firstPass.weights
0 → 100644
View file @
0fe0fa2e
File added
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment