Commit bdfe40fb authored by Larkin Heintzman's avatar Larkin Heintzman

working image build

parent bd3e6dd8
......@@ -82,3 +82,12 @@ kubectl exec <pod_name> -it -- bash
kubectl get service -> good for getting port numbers
kubectl delete -f deployment.yaml -> removes (eventually) pods on the cluster in deployment
kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName --all-namespaces
label nodes to force pod to live on specific node:
kubectl label nodes <node_name> <selector_key>=<selector_value>
then in pod spec, add
nodeSelector:
<selector_key>=<selector_value>
check which pods are running on which nodes:
kubectl get pods --all-namespaces -o wide
......@@ -4,31 +4,56 @@ FROM ${from}
# RUN apt-get update && apt-get install -y \
# python-catkin-tools \
# && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y python3-pip git
RUN apt-get update && apt-get install -y python3-pip git ca-certificates
RUN apt-get update && apt-get install -y libsdl2-dev libusb-1.0-0-dev build-essential cmake gcc ffmpeg python3-opencv libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavresample-dev libavutil-dev libpostproc-dev libswresample-dev libswscale-dev
RUN apt-get update && apt-get install -y ros-noetic-tf
RUN pip3 install git+https://github.com/catkin/catkin_tools.git
# set up udev and usb stuff
WORKDIR /opt
RUN mkdir -p /etc/udev/rules.d && touch /etc/udev/rules.d/DJIDevice.rules \
&& echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="2ca3", MODE="0666"' >> /etc/udev/rules.d/DJIDevice.rules \
&& echo 'SUBSYSTEM=="tty", ATTRS{idVendor}=="10c4", ATTRS{idProduct}=="ea60", SYMLINK+="dji_usb"' >> /etc/udev/rules.d/DJIDevice.rules
# && usermod -a -G dialout $USER
# clone and install the onboard sdk first
RUN git clone http://git.caslab.ece.vt.edu/hlarkin3/onboard-sdk-arm.git
RUN mkdir onboard-sdk-arm/build
WORKDIR /opt/onboard-sdk-arm/build
RUN cmake .. \
&& make -j4\
&& make install\
&& cp ../sample/platform/linux/common/UserConfig.txt bin/ \
&& echo "app_id : 1069806" > bin/UserConfig.txt \
&& echo "app_key : e180b993ca8365653437fbafe7211ba040386d77c3c87627882857a11bd8efbd" >> bin/UserConfig.txt \
&& echo "device : /dev/dji_usb" >> bin/UserConfig.txt \
&& echo "baudrate : 921600" >> bin/UserConfig.txt \
&& echo "acm_port : /dev/ttyACM0" >> bin/UserConfig.txt \
&& cp bin/UserConfig.txt /root/.ros/UserConfig.txt
# clone eigen into correct location and rename
WORKDIR /usr/local/include/
RUN git clone https://gitlab.com/libeigen/eigen.git && mv eigen/ Eigen3/
# clone ros package repo
ENV ROS_WS /opt/ros_ws
RUN mkdir -p $ROS_WS/src
WORKDIR $ROS_WS
RUN git -C src clone \
-b $ROS_DISTRO-devel \
https://github.com/ros/ros_tutorials.git
RUN git -C src clone https://git.caslab.ece.vt.edu/hlarkin3/drone-ros-packages/ -b replicants
# install ros package dependencies
RUN apt-get update && \
rosdep update && \
rosdep install -y \
--from-paths \
src/ros_tutorials/roscpp_tutorials \
--ignore-src && \
rosdep install -y --from-paths src/* --ignore-src && \
rm -rf /var/lib/apt/lists/*
# build ros package source
RUN catkin config \
--extend /opt/ros/$ROS_DISTRO && \
catkin build \
roscpp_tutorials
catkin build
# \
# roscpp_tutorials
COPY bashCheckRoscore.sh /usr/local/bin/bashCheckRoscore.sh
COPY ros_entrypoint.sh /usr/local/bin/ros_entrypoint.sh
......
......@@ -45,6 +45,8 @@ spec:
value: http://service-master:11311
- name: ROS_HOSTNAME
value: service-listener
nodeSelector:
name: plath
# When the roscore container stops or fails, all the node
# containers need to be restarted because the ros network
# configuration is lost.
......
......@@ -27,6 +27,8 @@ spec:
ports:
- containerPort: 11311
name: roscoreport
nodeSelector:
name: master
---
......
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-deployment
labels:
app: llhcluster
node: drone
spec:
replicas: 1
# The deployment handles all matching templated pods
selector:
matchLabels:
node: drone
# Template for a replica.
# The deployment makes sure that a POD containing the containers
# defined below is always running.
template:
metadata:
labels:
node: drone
spec:
containers:
# The real node container
- name: droneContainer
image: llh/drone:v0
command: ["/bin/bash"]
args: ["-c", "source /opt/ros_ws/devel/setup.bash && /usr/local/bin/bashCheckRoscore.sh && rostopic pub -r 1 plathReady std_msgs/String 'ready'"]
env:
- name: ROS_MASTER_URI
value: http://service-master:11311
- name: ROS_HOSTNAME
value: service-drone
nodeSelector:
name: plath
---
apiVersion: v1
kind: Service
metadata:
name: service-drone
labels:
app: llhcluster
node: drone
spec:
# Start a headless service
# https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
clusterIP: None
ports:
# Dummy port
- port: 11311
selector:
node: drone
......@@ -45,6 +45,8 @@ spec:
value: http://service-master:11311
- name: ROS_HOSTNAME
value: service-talker
nodeSelector:
name: neruda
# When the roscore container stops or fails, all the node
# containers need to be restarted because the ros network
# configuration is lost.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment