Commit 9bb107af authored by Your Name's avatar Your Name

ha cluster update

parent 845edc97
host private ip: whatever ifconfig returns
master_0_ip: 192.168.1.230 # popIP
master_1_ip: 192.168.1.24 # plathIP
master_2_ip: 192.168.1.191 # nerudaIP
# likely will need to change a few files for modem operation or ethernet connection
(old) files that need changing are located in /etc/kubernetes/pki/etcd (the directory for cfssl certs and such) and in /etc/systemd/system/etcd3.service (file that contains service definition)
Sometimes the cluster gets out of sync (in terms of creation and files) and the data directory needs to be nuked on all nodes WITHOUT the etcd3.service running. run (super) rm -r /var/lib/etcd/* and restart with below daemon-reload procedure on all nodes quickly.
----------------------------
Control plane config notes:
Need to stick kube-vip.yaml into /etc/kubernetes/manifests/kube-vip.yaml to set up a virtual ip for the multi-control planes. Note vip value as well, it can be checked after the first control plane is up via kubectl cluster-info. the control plane tweaked join command gets printed when running ./clusterSetup (note changes in clusterConfig.yaml file as well)
----------------------------
Commands to restart etcd service on all master nodes:
sudo systemctl daemon-reload
sudo systemctl enable etcd3.service
sudo systemctl start etcd3.service
----------------------------
etcd.env file located in /etc/etcd.env on each node (each needs the THIS_* variables need to be updated)
# grab new token tbd
TOKEN="token-01"
CLUSTER_STATE=new
NAME_1=pop-os
NAME_2=rossetti
NAME_3=neruda
HOST_1=192.168.1.230
HOST_2=192.168.1.24
HOST_3=192.168.1.191
CLUSTER=pop-os=http://192.168.1.230:2380,rossetti=http://192.168.1.24:2380,neruda=http://192.168.1.191:2380
THIS_NAME=pop-os
THIS_IP=192.168.1.230
----------------------------
etcd3.service file located in /etc/systemd/system/etcd3.service on each node
[Unit]
Description=etcd
Documentation=http://github.com/coreos/etcd
Conflicts=etcd.service
Conflicts=etcd2.service
[Service]
EnvironmentFile=/etc/etcd.env
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0
ExecStart=/usr/bin/etcd \
--name ${THIS_NAME} \
--data-dir /var/lib/etcd \
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
--listen-peer-urls http://${THIS_IP}:2380 \
--advertise-client-urls http://${THIS_IP}:2379 \
--listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster '${CLUSTER}' \
--initial-cluster-token ${TOKEN} \
--initial-cluster-state ${CLUSTER_STATE}
[Install]
WantedBy=multi-user.target
#!/bin/sh
# lives in keepalived image
errorExit() {
echo "*** $*" 1>&2
exit 1
}
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi
apiVersion: v1
kind: Pod
metadata:
name: etcd-server
spec:
hostNetwork: true
containers:
- image: gcr.io/google_containers/etcd:2.0.9
name: etcd-container
command:
- /usr/local/bin/etcd
- --name
- ${NODE_NAME}
- --initial-advertise-peer-urls
- http://${NODE_IP}:2380
- --listen-peer-urls
- http://${NODE_IP}:2380
- --advertise-client-urls
- http://${NODE_IP}:4001
- --listen-client-urls
- http://127.0.0.1:4001
- --data-dir
- /var/etcd/data
- --discovery
- ${DISCOVERY_TOKEN}
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
volumes:
- hostPath:
path: /var/etcd/data
name: varetcd
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
[Unit]
Description=etcd
Documentation=http://github.com/coreos/etcd
Conflicts=etcd.service
Conflicts=etcd2.service
[Service]
EnvironmentFile=/etc/etcd.env
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0
ExecStart=/usr/bin/etcd \
--name ${THIS_NAME} \
--data-dir /var/lib/etcd \
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
--listen-peer-urls http://${THIS_IP}:2380 \
--advertise-client-urls http://${THIS_IP}:2379 \
--listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster '${CLUSTER}' \
--initial-cluster-token ${TOKEN} \
--initial-cluster-state ${CLUSTER_STATE}
[Install]
WantedBy=multi-user.target
apiVersion: v1
kind: Pod
# etcd yaml file designed to be put into each master node's /etc/kubernetes/manifests/etcd.yaml file w ip, name, and matching discovery token on all nodes
metadata:
name: etcd-server
spec:
hostNetwork: true
containers:
- image: pachyderm/etcd:v3.5.2
name: etcd-container
command:
- /usr/local/bin/etcd
- --name
- neruda
- --initial-advertise-peer-urls
- http://192.168.1.191:2380
- --listen-peer-urls
- http://192.168.1.191:2380
- --advertise-client-urls
- http://192.168.1.191:4001
- --listen-client-urls
- http://127.0.0.1:4001
- --data-dir
- /var/etcd/data
- --discovery
- https://discovery.etcd.io/f2118acbf70e71971f6869f6b6bcb8cc
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
volumes:
- hostPath:
path: /var/etcd/data
name: varetcd
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
apiVersion: v1
kind: Pod
# etcd yaml file designed to be put into each master node's /etc/kubernetes/manifests/etcd.yaml file w ip, name, and matching discovery token on all nodes
metadata:
name: etcd-server
spec:
hostNetwork: true
containers:
- image: pachyderm/etcd:v3.5.2
name: etcd-container
command:
- /usr/local/bin/etcd
- --name
- pop-os
- --initial-advertise-peer-urls
- http://192.168.1.230:2380
- --listen-peer-urls
- http://192.168.1.230:2380
- --advertise-client-urls
- http://192.168.1.230:4001
- --listen-client-urls
- http://127.0.0.1:4001
- --data-dir
- /var/etcd/data
- --discovery
- https://discovery.etcd.io/f2118acbf70e71971f6869f6b6bcb8cc
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
volumes:
- hostPath:
path: /var/etcd/data
name: varetcd
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
apiVersion: v1
kind: Pod
# etcd yaml file designed to be put into each master node's /etc/kubernetes/manifests/etcd.yaml file w ip, name, and matching discovery token on all nodes
metadata:
name: etcd-server
spec:
hostNetwork: true
containers:
- image: pachyderm/etcd:v3.5.2
name: etcd-container
command:
- /usr/local/bin/etcd
- --name
- rossetti
- --initial-advertise-peer-urls
- http://192.168.1.24:2380
- --listen-peer-urls
- http://192.168.1.24:2380
- --advertise-client-urls
- http://192.168.1.24:4001
- --listen-client-urls
- http://127.0.0.1:4001
- --data-dir
- /var/etcd/data
- --discovery
- https://discovery.etcd.io/f2118acbf70e71971f6869f6b6bcb8cc
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
volumes:
- hostPath:
path: /var/etcd/data
name: varetcd
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
! /etc/keepalived/keepalived.conf
! Configuration File for keepalived (docker image has it's own config file)
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state ${STATE}
interface ${INTERFACE}
virtual_router_id ${ROUTER_ID}
priority ${PRIORITY}
authentication {
auth_type PASS
auth_pass ${AUTH_PASS}
}
virtual_ipaddress {
${APISERVER_VIP}
}
track_script {
check_apiserver
}
}
apiVersion: v1
kind: Pod
# put this in /etc/kubernetes/manifests (it gets deleted on cluster reset)
metadata:
creationTimestamp: null
name: keepalived
namespace: kube-system
spec:
containers:
- image: osixia/keepalived:2.0.17
name: keepalived
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
volumeMounts:
- mountPath: /usr/local/etc/keepalived/keepalived.conf
name: config
- mountPath: /etc/keepalived/check_apiserver.sh
name: check
env:
- name: KEEPALIVED_INTERFACE
value: wlp0s20f3
- name: KEEPALIVED_PASSWORD
value: password
- name: KEEPALIVED_PRIORITY
value: 100
- name: KEEPALIVED_ROUTER_ID
value: 51
- name: KEEPALIVED_UNICAST_PEERS
value: "#PYTHON2BASH:['192.168.1.24', '192.168.1.191', '192.168.1.230']"
# the actual load balancer v-ip
- name: KEEPALIVED_VIRTUAL_IPS
value: "#PYTHON2BASH:['192.168.1.240']"
- name: KEEPALIVED_STATE
value: MASTER
hostNetwork: true
volumes:
- hostPath:
path: /etc/keepalived/keepalived.conf
name: config
- hostPath:
path: /etc/keepalived/check_apiserver.sh
name: check
status: {}
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02
command:
- /bin/sh
- -c
- /usr/local/bin/kube-apiserver --address=127.0.0.1 --etcd-servers=http://127.0.0.1:4001
--cloud-provider=gce --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
--service-cluster-ip-range=10.0.0.0/16 --client-ca-file=/srv/kubernetes/ca.crt
--basic-auth-file=/srv/kubernetes/basic_auth.csv --cluster-name=e2e-test-bburns
--tls-cert-file=/srv/kubernetes/server.cert --tls-private-key-file=/srv/kubernetes/server.key
--secure-port=443 --token-auth-file=/srv/kubernetes/known_tokens.csv --v=2
--allow-privileged=False 1>>/var/log/kube-apiserver.log 2>&1
ports:
- containerPort: 443
hostPort: 443
name: https
- containerPort: 7080
hostPort: 7080
name: http
- containerPort: 8080
hostPort: 8080
name: local
volumeMounts:
- mountPath: /srv/kubernetes
name: srvkube
readOnly: true
- mountPath: /var/log/kube-apiserver.log
name: logfile
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ssl
name: usrsharessl
readOnly: true
- mountPath: /var/ssl
name: varssl
readOnly: true
- mountPath: /usr/ssl
name: usrssl
readOnly: true
- mountPath: /usr/lib/ssl
name: usrlibssl
readOnly: true
- mountPath: /usr/local/openssl
name: usrlocalopenssl
readOnly: true
- mountPath: /etc/openssl
name: etcopenssl
readOnly: true
- mountPath: /etc/pki/tls
name: etcpkitls
readOnly: true
volumes:
- hostPath:
path: /srv/kubernetes
name: srvkube
- hostPath:
path: /var/log/kube-apiserver.log
name: logfile
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ssl
name: usrsharessl
- hostPath:
path: /var/ssl
name: varssl
- hostPath:
path: /usr/ssl
name: usrssl
- hostPath:
path: /usr/lib/ssl
name: usrlibssl
- hostPath:
path: /usr/local/openssl
name: usrlocalopenssl
- hostPath:
path: /etc/openssl
name: etcopenssl
- hostPath:
path: /etc/pki/tls
name: etcpkitls
apiVersion: v1
# virutal ip load balancer pod, goes in /etc/kubernetes/manifests
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: wlp0s20f3
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: vip_address
value: 192.168.1.240
image: ghcr.io/kube-vip/kube-vip:v0.4.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_TIME
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}
# ROS-Kubernetes # ROS-Kubernetes
Contains scrips and files to create a fielded ros kubernetes cluster. Contains scrips and files to create a fielded ros kubernetes cluster.
[[_TOC_]]
## Image building ## Image building
...@@ -107,6 +108,25 @@ potentially followed by [clean-up][cleanupCluster] ...@@ -107,6 +108,25 @@ potentially followed by [clean-up][cleanupCluster]
[cleanupCluster]: [https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/] [cleanupCluster]: [https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/]
## Highly Available Cluster Setup
The cluster can also be brought up using the highly-available paradigm (more details and steps [here][HAtutorialK8] and on the official source [here][HAtutorial]), allowing multiple control-plane nodes to be in the cluster. To achieve this setup, we will be creating an external etcd cluster with all the nodes to be used in the K8 cluster, configure `kubeadm` to use that external etcd option, create a load balancer with kube-vip pods so there is a single point of contact, and finally add each node to the K8 cluster as a control-plane.
[HAtutorial]: [https://medium.com/velotio-perspectives/demystifying-high-availability-in-kubernetes-using-kubeadm-3d83ed8c458b]
[HAtutorialK8]: [https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#stacked-control-plane-and-etcd-nodes]
### ETCD Cluster Setup
We can create an external etcd cluster using files from this repo with the following steps:
- Make sure etcd is installed on the machine, steps [here][etcdInstall], though we will be using a different `etcd.conf` file so stop after step 1. Confirm installation with `etcd --version` (expect a warning about unsupported ARM arch if using jetson, `export ETCD_UNSUPPORTED_ARCH=arm64` fixes it). This also installs the `etcdctl` tool which we'll need later.
- Create the file `/etc/systemd/system/etcd3.service`, making it a copy of the `etcd3.service` template in the [HA cluster][ha_folder] folder. Then we can remove any other `etcdx.service` files to reduce confusion. Do this for all nodes to be added to the cluster. No changes need to be made to the service file as we will be using an environment file to specify addresses.
[etcdInstall][https://docs.portworx.com/reference/knowledge-base/etcd-quick-setup/]
-
## ROS Commands ## ROS Commands
Once the cluster is up, either with the cluster setup bash script or otherwise, there are several helpful built in testing commands to take advantage of. Once the cluster is up, either with the cluster setup bash script or otherwise, there are several helpful built in testing commands to take advantage of.
...@@ -153,6 +173,7 @@ The pods are already configured to have access to each other via services as wel ...@@ -153,6 +173,7 @@ The pods are already configured to have access to each other via services as wel
If the pipeline is working the NFS directory shared from the control-plane should be filling up with images. Not every image received is saved so there may be a delay of a few seconds between images at the moment (kubernetes-based processing pipeline is TBD). If the pipeline is working the NFS directory shared from the control-plane should be filling up with images. Not every image received is saved so there may be a delay of a few seconds between images at the moment (kubernetes-based processing pipeline is TBD).
## Handy Troubleshooting Commands ## Handy Troubleshooting Commands
There are a number of non-intuitive kubernetes commands that are super helpful for specific tasks: There are a number of non-intuitive kubernetes commands that are super helpful for specific tasks:
...@@ -202,3 +223,4 @@ Debugging clusters can be somewhat difficult as everything is stuffed into pods, ...@@ -202,3 +223,4 @@ Debugging clusters can be somewhat difficult as everything is stuffed into pods,
[docker-buildx-platform]: https://github.com/docker/buildx/issues/464 [docker-buildx-platform]: https://github.com/docker/buildx/issues/464
[kubernetes_folder]: kubernetes/ [kubernetes_folder]: kubernetes/
[docker_folder]: docker/ [docker_folder]: docker/
[ha_folder]: HAcluster/
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# script that starts up kubernetes cluster, and does some basic qol # script that starts up kubernetes cluster, and does some basic qol
sudo swapoff -a # turn off swap memory sudo swapoff -a # turn off swap memory
sudo kubeadm init --config kubeadm-config.yaml sudo kubeadm init --config kubeadm-config.yaml --upload-certs
if [[ $? -ne 0 ]] ; then if [[ $? -ne 0 ]] ; then
# some error # some error
......
# kubeadm-config.yaml # kubeadm-config.yaml
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta2 apiVersion: kubeadm.k8s.io/v1beta2
kubernetesVersion: v1.21.3 kind: ClusterConfiguration
kubernetesVersion: v1.23.4
controlPlaneEndpoint: "192.168.1.240:6443"
networking: networking:
podSubnet: "10.244.0.0/16" podSubnet: "10.244.0.0/16"
etcd:
# can be external or local
external:
endpoints:
- http://192.168.1.230:2379
- http://192.168.1.24:2379
- http://192.168.1.191:2379
# certs?!
--- ---
kind: KubeletConfiguration kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1 apiVersion: kubelet.config.k8s.io/v1beta1
......
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: mongo
labels: labels:
app: opendatacam app: opendatacam
name: opendatacam-mongo
spec: spec:
ports: ports:
# was 27017
- name: "27017" - name: "27017"
port: 27017 port: 27017
targetPort: 27017 targetPort: 27017
clusterIP: None
selector: selector:
app: opendatacam app: opendatacam
tier: mongo tier: mongo
...@@ -22,7 +24,7 @@ metadata: ...@@ -22,7 +24,7 @@ metadata:
spec: spec:
storageClassName: nfs storageClassName: nfs
accessModes: accessModes:
- ReadWriteOnce - ReadWriteMany
resources: resources:
requests: requests:
storage: 10Gi storage: 10Gi
...@@ -37,10 +39,11 @@ spec: ...@@ -37,10 +39,11 @@ spec:
volumeMode: Filesystem volumeMode: Filesystem
storageClassName: nfs storageClassName: nfs
accessModes: accessModes:
- ReadWriteOnce - ReadWriteMany
capacity: capacity:
storage: 10Gi storage: 10Gi
persistentVolumeReclaimPolicy: Retain persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
mountOptions: mountOptions:
- hard - hard
- nfsvers=3 - nfsvers=3
...@@ -48,6 +51,9 @@ spec: ...@@ -48,6 +51,9 @@ spec:
path: /home/llh/opendatacamNFS path: /home/llh/opendatacamNFS
server: 192.168.1.230 server: 192.168.1.230
readOnly: false readOnly: false
# path: /home/llh/tempMongoDir
# server: 192.168.1.230
# readOnly: false
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
...@@ -69,13 +75,16 @@ spec: ...@@ -69,13 +75,16 @@ spec:
app: opendatacam app: opendatacam
tier: mongo tier: mongo
spec: spec:
# hostNetwork: true
containers: containers:
- name: mongo - name: mongo
image: mongo image: llh/mongo:v0
imagePullPolicy: "" imagePullPolicy: ""
# command: [ "/bin/bash"] command: ["/bin/bash"]
# args: ["-c", "while true; do sleep 10; done;"] # Our simple program just sleeps inside args: ["-c", "mongod --bind_ip_all"]
args: ["--dbpath","/root/db"] # command: ["/bin/bash"]
# args: ["-c", "chmod u=rwx,g=rwx,o=rwx /data/db && mongod --dbpath '/data/db'"]
# # current problem: we get a no permission error on chowing the /data/db/ directory... and now on the read-write permissions god bless
ports: ports:
- containerPort: 27017 - containerPort: 27017
resources: {} resources: {}
...@@ -92,9 +101,8 @@ spec: ...@@ -92,9 +101,8 @@ spec:
key: password key: password
volumeMounts: volumeMounts:
- name: mongodb-persistent-storage - name: mongodb-persistent-storage
mountPath: /root/db mountPath: /data/db
securityContext: # mountPath: /data/db/
privileged: true
restartPolicy: Always restartPolicy: Always
volumes: volumes:
- name: mongodb-persistent-storage - name: mongodb-persistent-storage
......
...@@ -15,6 +15,7 @@ spec: ...@@ -15,6 +15,7 @@ spec:
- name: "8090" - name: "8090"
port: 8090 port: 8090
targetPort: 8090 targetPort: 8090
# clusterIP: None # trying to use a headless service to make it available
selector: selector:
app: opendatacam app: opendatacam
tier: frontend tier: frontend
...@@ -40,40 +41,37 @@ spec: ...@@ -40,40 +41,37 @@ spec:
app: opendatacam app: opendatacam
tier: frontend tier: frontend
spec: spec:
# hostNetwork: true
containers: containers:
- image: opendatacam/opendatacam:v3.0.2-xavier - image: opendatacam/opendatacam:v3.0.2-beta.1-xavier
# - image: llh/opendatacam:v0
command: ["/bin/bash"] command: ["/bin/bash"]
args: ["-c", "/var/local/opendatacam/launch.sh"] args: ["-c", "/var/local/opendatacam/launch.sh"]
name: opendatacam name: opendatacam
ports: ports:
- containerPort: 8080 - containerPort: 8080
hostPort: 8080
- containerPort: 8070 - containerPort: 8070
- containerPort: 8090 - containerPort: 8090
resources: {} resources: {}
securityContext: securityContext:
privileged: true privileged: true
volumeMounts: volumeMounts:
- name: opendatacam-config - name: opendatacam-config
mountPath: /var/local/opendatacam/config.json mountPath: /var/local/opendatacam/config.json
subPath: "config.json" subPath: "config.json"
- name: video-source - name: video-source
mountPath: /dev/video0 mountPath: /dev/video0
restartPolicy: Always restartPolicy: Always
volumes: volumes:
- name: video-source - name: video-source
hostPath: hostPath:
path: /dev/video0 path: /dev/video0
- name: opendatacam-config - name: opendatacam-config
configMap: configMap:
name: opendatacam name: opendatacam
items: items:
- key: config.json - key: config.json
path: config.json path: config.json
nodeSelector: nodeSelector:
kubernetes.io/hostname: neruda kubernetes.io/hostname: neruda
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
"NEURAL_NETWORK": "yolov4", "NEURAL_NETWORK": "yolov4",
"VIDEO_INPUTS_PARAMS": { "VIDEO_INPUTS_PARAMS": {
"file": "opendatacam_videos/demo.mp4", "file": "opendatacam_videos/demo.mp4",
"usbcam": "v4l2src device=/dev/video0 ! video/x-raw, framerate=30/1, width=480, height=272 ! videoconvert ! appsink", "usbcam": "v4l2src device=/dev/video0 brightness=1000 saturation=100 ! video/x-raw, framerate=30/1, width=640, height=480 ! videoconvert ! appsink",
"raspberrycam": "nvarguscamerasrc ! video/x-raw(memory:NVMM),width=1280, height=720, framerate=30/1, format=NV12 ! nvvidconv ! video/x-raw, format=BGRx, width=480, height=272 ! videoconvert ! video/x-raw, format=BGR ! appsink", "raspberrycam": "nvarguscamerasrc ! video/x-raw(memory:NVMM),width=1280, height=720, framerate=30/1, format=NV12 ! nvvidconv ! video/x-raw, format=BGRx, width=480, height=272 ! videoconvert ! video/x-raw, format=BGR ! appsink",
"remote_cam": "YOUR IP CAM STREAM (can be .m3u8, MJPEG ...), anything supported by opencv", "remote_cam": "YOUR IP CAM STREAM (can be .m3u8, MJPEG ...), anything supported by opencv",
"remote_hls_gstreamer": "souphttpsrc location=http://YOUR_HLSSTREAM_URL_HERE.m3u8 ! hlsdemux ! decodebin ! videoconvert ! videoscale ! appsink" "remote_hls_gstreamer": "souphttpsrc location=http://YOUR_HLSSTREAM_URL_HERE.m3u8 ! hlsdemux ! decodebin ! videoconvert ! videoscale ! appsink"
......
ARG from=mongo:latest
FROM ${from}
RUN apt-get update && apt-get install -y net-tools sudo nano
# claim ownerhship of /data/db as my own
# UID? 1000
RUN mkdir -p /data/db
RUN chown -R mongodb /data/db
USER root
CMD ["/bin/bash", "-c", "mongod --dbpath /data/db"]
# # put rsa key in image
# RUN mkdir -p /home/ssher/.ssh
# RUN chown -R ssher:ssher /home/ssher/.ssh
# RUN service ssh start
# EXPOSE 22
#
# ENTRYPOINT ["/usr/local/bin/ros_entrypoint.sh"]
# CMD ["/usr/sbin/sshd","-D"]
# CMD ["bash"]
# run ros package launch file
# CMD ["roslaunch", "roscpp_tutorials", "talker_listener.launch"]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment