Skip to content
Snippets Groups Projects
Commit 882f6769 authored by Julien Leduc's avatar Julien Leduc
Browse files

Integrating cta-orchestration in CTA repository

parent 921a9300
No related branches found
No related tags found
No related merge requests found
Showing
with 687 additions and 18 deletions
#!/bin/bash
#instance=$1
# defaults objectstore to file
config_objectstore="./objectstore-file.yaml"
# defaults DB to sqlite
config_database="./database-sqlite.yaml"
# By default keep Database and keep Objectstore
# default should not make user loose data if he forgot the option
keepDatabase=1
keepObjectstore=1
usage() { cat <<EOF 1>&2
Usage: $0 -n <namespace> [-o <objectstore_configmap>] [-d <database_configmap>] [-D] [-O]
Options:
-D wipe database content during initialization phase (database content is kept by default)
-O wipe objectstore content during initialization phase (objectstore content is kept by default)
EOF
exit 1
}
while getopts "n:o:d:t:DO" o; do
case "${o}" in
o)
config_objectstore=${OPTARG}
test -f ${config_objectstore} || error="${error}Objectstore configmap file ${config_objectstore} does not exist\n"
;;
d)
config_database=${OPTARG}
test -f ${config_database} || error="${error}Database configmap file ${config_database} does not exist\n"
;;
n)
instance=${OPTARG}
;;
O)
keepObjectstore=0
;;
D)
keepDatabase=0
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${instance}" ]; then
usage
fi
COMMITID=$(git log -n1 | grep ^commit | cut -d\ -f2 | sed -e 's/\(........\).*/\1/')
echo "Creating instance for latest image built for ${COMMITID} (highest PIPELINEID)"
imagetag=$(../ci_helpers/list_images.sh 2>/dev/null | grep ${COMMITID} | sort -n | tail -n1)
if [ "${imagetag}" == "" ]; then
echo "commit:${COMMITID} has no docker image available in gitlab registry, please check pipeline status and registry images available."
exit 1
fi
echo "Creating instance using docker image with tag: ${imagetag}"
if [ ! -z "${error}" ]; then
echo -e "ERROR:\n${error}"
exit 1
fi
if [ $keepDatabase == 1 ] ; then
echo "DB content will be kept"
else
echo "DB content will be wiped"
fi
if [ $keepObjectstore == 1 ] ; then
echo "objecstore content will be kept"
else
echo "objectstore content will be wiped"
fi
exit 0
echo "Creating ${instance} instance"
kubectl create namespace ${instance} || exit 1
kubectl --namespace ${instance} create configmap init --from-literal=keepDatabase=${keepDatabase} --from-literal=keepObjectstore=${keepObjectstore}
echo "creating configmaps in instance"
kubectl create -f ${config_objectstore} --namespace=${instance}
kubectl create -f ${config_database} --namespace=${instance}
echo -n "Requesting an unused MHVTL library"
kubectl create -f /opt/kubernetes/CTA/library/library_claim.yaml --namespace=${instance}
for ((i=0; i<120; i++)); do
echo -n "."
kubectl get persistentvolumeclaim claimlibrary --namespace=${instance} | grep -q Bound && break
sleep 1
done
kubectl get persistentvolumeclaim claimlibrary --namespace=${instance} | grep -q Bound || exit 1
LIBRARY_DEVICE=$(kubectl get persistentvolumeclaim claimlibrary --namespace=${instance} -o yaml| grep -i volumeName | sed -e 's%.*sg%sg%')
kubectl --namespace=${instance} create -f /opt/kubernetes/CTA/library/config/library-config-${LIBRARY_DEVICE}.yaml
echo "got library: ${LIBRARY_DEVICE}"
echo "creating services in instance"
for service_file in *svc\.yaml; do
kubectl create -f ${service_file} --namespace=${instance}
done
echo "creating pods in instance"
kubectl create -f pod-init.yaml --namespace=${instance}
echo -n "Waiting for init"
for ((i=0; i<400; i++)); do
echo -n "."
kubectl get pod init -a --namespace=${instance} | grep -q Completed && break
sleep 1
done
# initialization went wrong => exit now with error
kubectl get pod init -a --namespace=${instance} | grep -q Completed || exit 1
echo OK
echo "Launching pods"
for podname in ctacli tpsrv ctaeos ctafrontend kdc; do
kubectl create -f pod-${podname}.yaml --namespace=${instance}
done
echo -n "Waiting for other pods"
for ((i=0; i<240; i++)); do
echo -n "."
# exit loop when all pods are in Running state
kubectl get pods -a --namespace=${instance} | grep -v init | tail -n+2 | grep -q -v Running || break
sleep 1
done
if [[ $(kubectl get pods -a --namespace=${instance} | grep -v init | tail -n+2 | grep -q -v Running) ]]; then
echo "Some pods have not been initialized properly:"
kubectl get pods -a --namespace=${instance}
exit 1
fi
echo OK
echo -n "Waiting for KDC to be configured"
# Kdc logs sometimes get truncated. We rely on a different mechanism to detect completion
for ((i=0; i<300; i++)); do
echo -n "."
[ "`kubectl --namespace=${instance} exec kdc -- bash -c "[ -f /root/kdcReady ] && echo -n Ready || echo -n Not ready"`" = "Ready" ] && break
sleep 1
done
[ "`kubectl --namespace=${instance} exec kdc -- bash -c "[ -f /root/kdcReady ] && echo -n Ready || echo -n Not ready"`" = "Ready" ] || (echo "Failed to configure KDC."; exit 1)
echo OK
echo -n "Configuring KDC clients (frontend, cli)... "
kubectl --namespace=${instance} exec kdc cat /etc/krb5.conf | kubectl --namespace=${instance} exec -i ctacli -- bash -c "cat > /etc/krb5.conf"
kubectl --namespace=${instance} exec kdc cat /etc/krb5.conf | kubectl --namespace=${instance} exec -i ctafrontend -- bash -c "cat > /etc/krb5.conf"
kubectl --namespace=${instance} exec kdc cat /root/admin1.keytab | kubectl --namespace=${instance} exec -i ctacli -- bash -c "cat > /root/admin1.keytab"
kubectl --namespace=${instance} exec kdc cat /root/cta-frontend.keytab | kubectl --namespace=${instance} exec -i ctafrontend -- bash -c "cat > /etc/cta-frontend.keytab"
kubectl --namespace=${instance} exec ctacli -- kinit -kt /root/admin1.keytab admin1@TEST.CTA
echo Done
echo "klist for ctacli:"
kubectl --namespace=${instance} exec ctacli klist
echo -n "Configuring cta SSS for ctafrontend access from ctaeos"
for ((i=0; i<300; i++)); do
echo -n "."
[ "`kubectl --namespace=${instance} exec ctafrontend -- bash -c "[ -f /etc/ctafrontend_SSS_c.keytab ] && echo -n Ready || echo -n Not ready"`" = "Ready" ] && break
sleep 1
done
[ "`kubectl --namespace=${instance} exec ctafrontend -- bash -c "[ -f /etc/ctafrontend_SSS_c.keytab ] && echo -n Ready || echo -n Not ready"`" = "Ready" ] || (echo "Failed to retrieve cta frontend SSS key"; exit 1)
kubectl --namespace=${instance} exec ctafrontend -- cat /etc/ctafrontend_SSS_c.keytab | kubectl --namespace=${instance} exec -i ctaeos -- bash -c "cat > /etc/ctafrontend_SSS_c.keytab; chmod 600 /etc/ctafrontend_SSS_c.keytab; chown daemon /etc/ctafrontend_SSS_c.keytab"
echo Done
echo -n "Waiting for EOS to be configured"
for ((i=0; i<300; i++)); do
echo -n "."
kubectl --namespace=${instance} logs ctaeos | grep -q "### ctaeos mgm ready ###" && break
sleep 1
done
kubectl --namespace=${instance} logs ctaeos | grep -q "### ctaeos mgm ready ###" || exit 1
echo OK
echo "Instance ${instance} successfully created:"
kubectl get pods -a --namespace=${instance}
exit 0
apiVersion: v1
kind: Service
metadata:
name: ctacli
labels:
k8s-app: ctacli
spec:
selector:
k8s-app: ctacli
clusterIP: None
ports:
- name: ctacli
port: 1
protocol: TCP
apiVersion: v1
kind: Service
metadata:
name: ctaeos
labels:
k8s-app: ctaeos-mgm
spec:
selector:
k8s-app: ctaeos-mgm
clusterIP: None
ports:
- name: ctaeos-mgm
port: 1094
protocol: TCP
apiVersion: v1
kind: Service
metadata:
name: ctafrontend
labels:
k8s-app: ctafrontend
spec:
selector:
k8s-app: ctafrontend
clusterIP: None
ports:
- name: ctafrontend
port: 10955
protocol: TCP
apiVersion: v1
kind: ConfigMap
metadata:
name: database-config
labels:
config: database
type: oracle
data:
database.type: oracle
database.oracle.username: user1
database.oracle.password: mypasswd
database.oracle.database: dbname
apiVersion: v1
kind: ConfigMap
metadata:
name: database-config
labels:
config: database
type: sqlite
data:
database.type: sqlite
database.file.path: /shared/%NAMESPACE/catdb/catdb
#!/bin/bash
usage() { cat <<EOF 1>&2
Usage: $0 -n <namespace>
EOF
exit 1
}
while getopts "n:" o; do
case "${o}" in
n)
instance=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${instance}" ]; then
usage
fi
if [ ! -z "${error}" ]; then
echo -e "ERROR:\n${error}"
exit 1
fi
echo "Deleting ${instance} instance"
kubectl delete namespace ${instance}
for ((i=0; i<120; i++)); do
echo -n "."
kubectl get namespace | grep -q "^${instance} " || break
sleep 1
done
echo OK
./recycle_librarydevice_PV.sh
echo "Status of library pool after test:"
kubectl get pv
apiVersion: v1
kind: Service
metadata:
name: kdc
labels:
k8s-app: kdc
spec:
selector:
k8s-app: kdc
clusterIP: None
ports:
- name: kdc-tcp
port: 88
protocol: TCP
- name: kdc-udp
port: 88
protocol: UDP
apiVersion: v1
kind: ConfigMap
metadata:
name: library-config
labels:
config: library
type: mhvtl
data:
library.type: mhvtl
library.name: VLSTK10
library.device: sg30
library.drivenames: (VDSTK11 VDSTK12 VDSTK13)
library.drivedevices: (nst0 nst1 nst2)
library.tapes: (V01001 V01002 V01003 V01004 V01005 V01006 V01007)
apiVersion: v1
kind: ConfigMap
metadata:
name: objectstore-config
labels:
config: objectstore
type: ceph
data:
objectstore.type: ceph
objectstore.ceph.mon: cephmond.cern.ch
objectstore.ceph.monport: "6790"
objectstore.ceph.pool: tapetest
objectstore.ceph.namespace: cta-julien
objectstore.ceph.id: cta-julien
objectstore.ceph.key: KEY
apiVersion: v1
kind: ConfigMap
metadata:
name: objectstore-config
labels:
config: objectstore
type: file
data:
objectstore.type: file
objectstore.file.path: /shared/%NAMESPACE/objectstore
apiVersion: v1
kind: Pod
metadata:
name: init
name: ctacli
labels:
k8s-app: cta-init
k8s-app: ctacli
spec:
restartPolicy: Never
containers:
- name: ctainit
image: gitlab-registry.cern.ch/cta/ctageneric:%CTA_BUILD_ID
- name: ctacli
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
......@@ -21,21 +21,13 @@ spec:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: objectstore
value: "/shared/$(INSTANCE_NAME)/objectstore"
- name: catdbdir
value: "/shared/$(INSTANCE_NAME)/catdb"
- name: catdbfile
value: "catdb"
- name: LIBRARY_CONFIG
value: "/shared/$(INSTANCE_NAME)/libraryconfig"
command: ['/opt/ci/init/init.sh']
- name: TERM
value: "xterm"
command: ['/opt/run/bin/ctacli.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /library
name: mylibrary
securityContext:
privileged: true
......@@ -43,6 +35,4 @@ spec:
- name: shared
hostPath:
path: /opt/cta
- name: mylibrary
persistentVolumeClaim:
claimName: claimlibrary
apiVersion: v1
kind: Pod
metadata:
name: ctaeos
labels:
k8s-app: ctaeos-mgm
spec:
restartPolicy: Never
containers:
- name: mgm
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: frontend
value: 'cta-frontend:10955'
- name: TERM
value: "xterm"
command: ['/opt/run/bin/ctaeos-mgm.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
securityContext:
privileged: true
volumes:
- name: shared
hostPath:
path: /opt/cta
apiVersion: v1
kind: Pod
metadata:
name: ctafrontend
labels:
k8s-app: ctafrontend
spec:
restartPolicy: Never
containers:
- name: ctafrontend
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: TERM
value: "xterm"
command: ['/opt/run/bin/ctafrontend.sh']
args: ["none"]
volumeMounts:
- mountPath: /dev/log
name: dev-log
- mountPath: /shared
name: shared
- mountPath: /etc/config/objectstore
name: myobjectstore
- mountPath: /etc/config/database
name: mydatabase
- mountPath: /etc/config/library
name: mylibrary
securityContext:
# RW access needed to volumes
privileged: true
ports:
- containerPort: 10955
name: ctafrontend
protocol: TCP
volumes:
- name: dev-log
hostPath:
path: /dev/log
- name: shared
hostPath:
path: /opt/cta
- name: myobjectstore
configMap:
name: objectstore-config
- name: mydatabase
configMap:
name: database-config
- name: mylibrary
configMap:
name: library-config
apiVersion: v1
kind: Pod
metadata:
name: init
labels:
k8s-app: init
spec:
restartPolicy: Never
containers:
- name: ctainit
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: KEEP_DATABASE
valueFrom:
configMapKeyRef:
name: init
key: keepDatabase
- name: KEEP_OBJECTSTORE
valueFrom:
configMapKeyRef:
name: init
key: keepObjectstore
command: ['/opt/run/bin/init.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /etc/config/objectstore
name: myobjectstore
- mountPath: /etc/config/database
name: mydatabase
- mountPath: /etc/config/library
name: mylibrary
securityContext:
privileged: true
volumes:
- name: shared
hostPath:
path: /opt/cta
- name: myobjectstore
configMap:
name: objectstore-config
- name: mydatabase
configMap:
name: database-config
- name: mylibrary
configMap:
name: library-config
apiVersion: v1
kind: Pod
metadata:
name: kdc
labels:
k8s-app: kdc
spec:
restartPolicy: Never
containers:
- name: kdc
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: TERM
value: "xterm"
command: ['/opt/run/bin/kdc.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
securityContext:
privileged: true
volumes:
- name: shared
hostPath:
path: /opt/cta
apiVersion: v1
kind: Pod
metadata:
name: tpsrv
labels:
k8s-app: ctataped
spec:
restartPolicy: Never
containers:
- name: rmcd
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: TERM
value: "xterm"
command: ['/opt/run/bin/rmcd.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /etc/config/library
name: mylibrary
securityContext:
privileged: true
- name: taped
image: gitlab-registry.cern.ch/cta/ctageneric:78673git921a9300
stdin: true
env:
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_NAME
value: "$(MY_NAMESPACE)"
- name: eoshost
value: "mgm"
- name: TERM
value: "xterm"
command: ['/opt/run/bin/taped.sh']
args: ["none"]
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /etc/config/objectstore
name: myobjectstore
- mountPath: /etc/config/database
name: mydatabase
- mountPath: /etc/config/library
name: mylibrary
securityContext:
privileged: true
volumes:
- name: shared
hostPath:
path: /opt/cta
- name: myobjectstore
configMap:
name: objectstore-config
- name: mydatabase
configMap:
name: database-config
- name: mylibrary
configMap:
name: library-config
#!/bin/bash
LIBRARY_DIR="/opt/librarydevice"
kubectl get pv | grep \/claimlibrary\ | grep -v Bound | while read line; do
library_device=$(echo $line | awk '{print $1}')
pv_status=$(echo $line | awk '{print $4}')
echo "Deleting PV ${library_device} with status ${pv_status}"
kubectl delete pv ${library_device} && \
kubectl create -f /opt/kubernetes/CTA/library/resource/${library_device}_librarydevice_resource.yaml && \
echo OK
done
#!/bin/bash
cta bs -u root --hostname $(hostname -i) -m "docker cli"
cta logicallibrary add --name VLSTK --comment "ctasystest"
cta tapepool add --name ctasystest --partialtapesnumber 5 --encrypted false --comment "ctasystest"
cta tape add --logicallibrary VLSTK --tapepool ctasystest --capacity 1000000000 --comment "ctasystest" --vid ${VID} --disabled false --full false --encryptionkey test
cta storageclass add --instance root --name ctaStorageClass --copynb 1 --comment "ctasystest"
cta archiveroute add --instance root --storageclass ctaStorageClass --copynb 1 --tapepool ctasystest --comment "ctasystest"
cta mountpolicy add --name ctasystest --archivepriority 1 --minarchiverequestage 1 --retrievepriority 1 --minretrieverequestage 1 --maxdrivesallowed 1 --comment "ctasystest"
cta requestermountrule add --instance root --name root --mountpolicy ctasystest --comment "ctasystest"
cta drive up VDSTK1
# example test
kubectl --namespace ${NAMESPACE} exec mgm xrdcp /etc/group root://localhost//eos/mgm/cta/toto
kubectl --namespace ${NAMESPACE} exec mgm eos file workflow /eos/mgm/cta/toto default closew
kubectl --namespace ${NAMESPACE} exec mgm eos file workflow /eos/mgm/cta/toto default prepare
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment