Build secrets and SSH forwarding in Docker 18.09
backup files from a docker volume into /tmp/backup.tar
function docker-volume-backup() {
docker run --rm -v /media/sf_Volumes/:/backup -v "$1":/tmp/data busybox tar -cvf /backup/backup.tar -C /tmp/data .
}
restore files from /tmp/backup.tar into a docker volume
function docker-volume-restore() {
docker run --rm -v /home/bouquet/Volumes:/backup -v "$1":/tmp/data busybox tar -xvf /backup/backup.tar -C /tmp/data
echo "Double checking files..."
docker run --rm -v /home/bouquet/Volumes:/backup -v "$1":/tmp/data busybox ls -lh /tmp/data
}
Access Private Repositories from Your Dockerfile Without Leaving Behind Your SSH Keys · vsupalov.com
put temporary ssh key in docker to build
# this is our first build stage, it will not persist in the final image
FROM ubuntu as intermediate
# install git
RUN apt-get update
RUN apt-get install -y git
# add credentials on build
ARG SSH_PRIVATE_KEY
RUN mkdir /root/.ssh/
RUN echo "${SSH_PRIVATE_KEY}" > /root/.ssh/id_rsa
# make sure your domain is accepted
RUN touch /root/.ssh/known_hosts
RUN ssh-keyscan bitbucket.org >> /root/.ssh/known_hosts
RUN git clone git@bitbucket.org:your-user/your-repo.git
FROM ubuntu
# copy the repository form the previous image
COPY --from=intermediate /your-repo /srv/your-repo
# ... actually use the repo :)
The SSH_PRIVATE_KEY is passed when issuing the build command with --build-arg or in the build block of your docker-compose.yml file. As it is not used in the final image, the value will not be available using the history command. For a better overview of using variables when handling your Docker workflows, read this in-depth guide.
volumes:
- "/etc/timezone:/etc/timezone:ro"
- "/etc/localtime:/etc/localtime:ro"
version: '3'
services:
mongorsn1:
container_name: mongors1n1
image: mongo
command: mongod --shardsvr --replSet mongors1 --dbpath /data/db --port 27017
ports:
- 27017:27017
expose: - "27017"
environment:
TERM: xterm
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/data1:/data/dbA
mongors1n2:
container_name: mongors1n2
image: mongo
command: mongod --shardsvr --replSet mongors1 --dbpath /data/db --port 27017
ports: - 27027:27017
expose: - "27017"
environment:
TERM: xterm
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/data2:/data/db
mongors1n3:
container_name: mongors1n3
image: mongo
command: mongod --shardsvr --replSet mongors1 --dbpath /data/db --port 27017
ports: - 27037:27017
expose: - "27017"
environment:
TERM: xterm
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/data3:/data/db
mongocfg1:
container_name: mongocfg1
image: mongo
command: mongod --configsvr --replSet mongors1conf --dbpath /data/db --port 27017
environment:
TERM: xterm
expose: - "27017"
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/config1:/data/db
mongocfg2:
container_name: mongocfg2
image: mongo
command: mongod --configsvr --replSet mongors1conf --dbpath /data/db --port 27017
environment:
TERM: xterm
expose: - "27017"
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/config2:/data/db
mongocfg3:
container_name: mongocfg3
image: mongo
command: mongod --configsvr --replSet mongors1conf --dbpath /data/db --port 27017
environment:
TERM: xterm
expose: - "27017"
volumes: - /etc/localtime:/etc/localtime:ro
- /mongo_cluster/config3:/data/db
mongos1:
container_name: mongos1
image: mongo
depends_on: - mongocfg1
- mongocfg2
command: mongos --configdb mongors1conf/mongocfg1:27017,mongocfg2:27017,mongocfg3:27017 --port 27017
ports: - 27019:27017
expose: - "27017"
volumes: - /etc/localtime:/etc/localtime:ro
mongos2:
container_name: mongos2
image: mongo
depends_on: - mongocfg1
- mongocfg2
command: mongos --configdb mongors1conf/mongocfg1:27017,mongocfg2:27017,mongocfg3:27017 --port 27017
ports: - 27020:27017
expose: - "27017"
volumes: - /etc/localtime:/etc/localtime:ro
un bac à sable docker valable 4h, jusqu'à 5 nodes swarm
version: '3.1' # if no version is specificed then v1 is assumed. Recommend v2 minimum
services: # containers. same as docker run
servicename: # a friendly name. this is also DNS name inside network
image: # Optional if you use build:
command: # Optional, replace the default CMD specified by the image
environment: # Optional, same as -e in docker run
volumes: # Optional, same as -v in docker run
servicename2:
volumes: # Optional, same as docker volume create
networks: # Optional, same as docker network create
FROM ubuntu:13.10
ENV http_proxy <HTTP_PROXY>
ENV https_proxy <HTTPS_PROXY>
RUN apt-get update && apt-get upgrade
docker dnsdock
tuto portainer docker
tout dockeriser
ressources et OS docker
Utiliser des containers Docker à la volée est relativement simple, avec un docker run on lance simplement notre application, mais parfois, les managers tels quels peut…
docker pull image
docker images : liste les images
docker built -t <REPOSITORY:TAG> .
=> docker prendra le dockerfile du répertoire pour builder
FROM
MAINTAINER
RUN adduser -ms /bin/bash toto
USER toto
=> exec -it etc etc => connexion en toto
=> avec -u 0 connexion en root
docker create --name kanboard -v /home/user/kanboard:/var/www/kanboard/data -p 81:80 -t <REPOSITORY Image>
=> génère un conteneur nommé kanboard à partir de l'image <REPOSITORY Image>
=> bind le port 81 du serveur sur le port 80 du conteneur
=> monte le répertoire /home/user/kanboard du serveur dans /var/www/kanboard/data du conteneur
-P : docker réalise un mappage aléatoire du ou des ports utilisé(s) dans le conteneur
=> docker ps montrera le mappage
docker port <nom du conteneur> $CONTAINERPORT
=> montre les mappages
-p <port local>:<port conteneur>
-v chemin_local:chemin_conteneur
avec -v, le répertoire partagé est accessible dans /var/lib/docker/<id du conteneur>
docker start kanboard
docker stop kanboard
restart
pour se connecter au conteneur :
docker exec -ti kanboard
docker attach CONTAINER : se connecte au process lancé par le conteneur : Ctrl-c le tuera => fin du conteneur
supprimer tous les conteneurs ne tournant pas :
docker rm $(docker ps -a -q) => -q ne retourne que l'ID des conteneurs
pour prendre en compte les modifications faites dans un conteneur et en faire une nouvelle image :
docker ps -a ou -l
récupérer le CONTAINER_ID
docker commit CONTAINER_ID "nom_de_la/nouvelle_image:TAG" -m "Commit message" -a "nom du mainteneur"
ENV VARIABLE VALEUR
ENV VARIABLE2="VALEUR"
=> si export après USER, ENV est quand même system wide ?
ENTRYPOINT echo "display on every container" <= commande par défaut du conteneur, sera lancé par les conteneurs issus de cette image (à voir)
docker network ls
docker network inspect <name>
bonne présentation de docker avec les base de manipulation / exécution des images & containers