I’ll be going through this tutorial.

Note: This is still a work in progress, as I haven’t been able to successfully bring up Kubernetes on DigitalOcean.

Generate certificates

Traffic between nodes will be encrypted, thus you will need to generate certificates. I followed this guide.

  • apiserver.pem
  • apiserver-key.pem
  • worker-1.example.com-worker-key.pem
  • worker-1.example.com-worker.pem
  • worker-2.example.com-worker-key.pem
  • worker-2.example.com-worker.pem

Deploy master node

Create ssl directory:

sudo mkdir -p /etc/kubernetes/ssl

Copy these files to ssl directory:

  • apiserver.pem
  • apiserver-key.pem
  • ca.pem

Set permissions:

sudo chmod 600 /etc/kubernetes/ssl/*-key.pem
sudo chown root:root /etc/kubernetes/ssl/*-key.pem

Create flannel directory:

sudo mkdir -p /etc/flannel

Create /etc/flannel/options.env:

FLANNELD_IFACE=$ADVERTISE_IP
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS
  • ADVERTISE_IP (node’s public IP address)
  • ETCD_ENDPOINTS (use fleetctl list-machines)

Create systemd config:

sudo mkdir -p /etc/systemd/system/flanneld.service.d

Create /etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf:

[Service]
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env

Create Docker configuration:

sudo mkdir -p /etc/systemd/system/docker.service.d

Create /etc/systemd/system/docker.service.d/40-flannel.conf:

[Unit]
Requires=flanneld.service
After=flanneld.service

Create /etc/systemd/system/kubelet.service:

[Service]
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
Environment=KUBELET_VERSION=v1.4.6_coreos.0
Environment="RKT_OPTS=--volume var-log,kind=host,source=/var/log \
  --mount volume=var-log,target=/var/log \
  --volume dns,kind=host,source=/etc/resolv.conf \
  --mount volume=dns,target=/etc/resolv.conf"
ExecStart=/usr/lib/coreos/kubelet-wrapper \
  --api-servers=http://127.0.0.1:8080 \
  --register-schedulable=false \
  --allow-privileged=true \
  --config=/etc/kubernetes/manifests \
  --hostname-override=1.2.3.4 \
  --cluster-dns=10.3.0.10 \
  --cluster-domain=cluster.local
Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target

Create manifests directory:

sudo mkdir -p /etc/kubernetes/manifests

Create /etc/kubernetes/manifests/kube-apiserver.yaml:

apiVersion: v1
kind: Pod
metadata:
  name: kube-apiserver
  namespace: kube-system
spec:
  hostNetwork: true
  containers:
  - name: kube-apiserver
    image: quay.io/coreos/hyperkube:v1.4.6_coreos.0
    command:
    - /hyperkube
    - apiserver
    - --bind-address=0.0.0.0
    - --etcd-servers=${ETCD_ENDPOINTS}
    - --allow-privileged=true
    - --service-cluster-ip-range=10.3.0.0/24
    - --secure-port=443
    - --advertise-address=${ADVERTISE_IP}
    - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
    - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
    - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
    - --client-ca-file=/etc/kubernetes/ssl/ca.pem
    - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
    - --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true
    ports:
    - containerPort: 443
      hostPort: 443
      name: https
    - containerPort: 8080
      hostPort: 8080
      name: local
    volumeMounts:
    - mountPath: /etc/kubernetes/ssl
      name: ssl-certs-kubernetes
      readOnly: true
    - mountPath: /etc/ssl/certs
      name: ssl-certs-host
      readOnly: true
  volumes:
  - hostPath:
      path: /etc/kubernetes/ssl
    name: ssl-certs-kubernetes
  - hostPath:
      path: /usr/share/ca-certificates
    name: ssl-certs-host

TODO What is admission control?

Create /etc/kubernetes/manifests/kube-proxy.yaml:

apiVersion: v1
kind: Pod
metadata:
  name: kube-proxy
  namespace: kube-system
spec:
  hostNetwork: true
  containers:
  - name: kube-proxy
    image: quay.io/coreos/hyperkube:v1.4.6_coreos.0
    command:
    - /hyperkube
    - proxy
    - --master=http://127.0.0.1:8080
    - --proxy-mode=iptables
    securityContext:
      privileged: true
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ssl-certs-host
      readOnly: true
  volumes:
  - hostPath:
      path: /usr/share/ca-certificates
    name: ssl-certs-host

Create /etc/kubernetes/manifests/kube-controller-manager.yaml:

apiVersion: v1
kind: Pod
metadata:
  name: kube-controller-manager
  namespace: kube-system
spec:
  hostNetwork: true
  containers:
  - name: kube-controller-manager
    image: quay.io/coreos/hyperkube:v1.4.3_coreos.0
    command:
    - /hyperkube
    - controller-manager
    - --master=http://127.0.0.1:8080
    - --leader-elect=true
    - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
    - --root-ca-file=/etc/kubernetes/ssl/ca.pem
    livenessProbe:
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10252
      initialDelaySeconds: 15
      timeoutSeconds: 1
    volumeMounts:
    - mountPath: /etc/kubernetes/ssl
      name: ssl-certs-kubernetes
      readOnly: true
    - mountPath: /etc/ssl/certs
      name: ssl-certs-host
      readOnly: true
  volumes:
  - hostPath:
      path: /etc/kubernetes/ssl
    name: ssl-certs-kubernetes
  - hostPath:
      path: /usr/share/ca-certificates
    name: ssl-certs-host

Create /etc/kubernetes/manifests/kube-scheduler.yaml:

apiVersion: v1
kind: Pod
metadata:
  name: kube-scheduler
  namespace: kube-system
spec:
  hostNetwork: true
  containers:
  - name: kube-scheduler
    image: quay.io/coreos/hyperkube:v1.4.6_coreos.0
    command:
    - /hyperkube
    - scheduler
    - --master=http://127.0.0.1:8080
    - --leader-elect=true
    livenessProbe:
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10251
      initialDelaySeconds: 15
      timeoutSeconds: 1

Reload units:

sudo systemctl daemon-reload

Setup flannel network:

  • $POD_NETWORK (10.2.0.0/16)
  • $ETCD_SERVER (http://1.2.3.4:2379)
$ curl -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ETCD_SERVER/v2/keys/coreos.com/network/config"

Start flannel:

sudo systemctl start flanneld
sudo systemctl enable flanneld

Start kubelet:

sudo systemctl start kubelet
sudo systemctl enable kubelet

Check kubelet’s status:

systemctl status kubelet.service

kubelet-status

Check pods’ status:

curl -s localhost:10255/pods | jq -r '.items[].metadata.name'

You should see entries for:

  • kube-apiserver
  • kube-controller-manager
  • kube-proxy
  • kube-server

Deploy worker node

Create ssl directory:

sudo mkdir -p /etc/kubernetes/ssl

Copy these files to ssl directory:

  • worker-1.example.com-worker.pem
  • worker-1.example.com-worker-key.pem
  • ca.pem

Set permissions:

sudo chmod 600 /etc/kubernetes/ssl/*-key.pem
sudo chown root:root /etc/kubernetes/ssl/*-key.pem

Create flannel directory:

sudo mkdir -p /etc/flannel

Create /etc/flannel/options.env:

FLANNELD_IFACE=$ADVERTISE_IP
FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS
  • ADVERTISE_IP (node’s public IP address)
  • ETCD_ENDPOINTS (use fleetctl list-machines)

Create systemd config:

sudo mkdir -p /etc/systemd/system/flanneld.service.d

Create /etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf:

[Service]
ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env

Create Docker configuration:

sudo mkdir -p /etc/systemd/system/docker.service.d

Create /etc/systemd/system/docker.service.d/40-flannel.conf:

[Unit]
Requires=flanneld.service
After=flanneld.service

Create /etc/systemd/system/kubelet.service:

[Service]
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/mkdir -p /var/log/containers
Environment=KUBELET_VERSION=v1.4.6_coreos.0
Environment="RKT_OPTS=--volume var-log,kind=host,source=/var/log \
  --mount volume=var-log,target=/var/log \
  --volume dns,kind=host,source=/etc/resolv.conf \
  --mount volume=dns,target=/etc/resolv.conf"
ExecStart=/usr/lib/coreos/kubelet-wrapper \
  --api-servers=https://${MASTER_HOST} \
  --register-node=true \
  --allow-privileged=true \
  --config=/etc/kubernetes/manifests \
  --hostname-override=1.2.3.4 \
  --cluster-dns=10.3.0.10 \
  --cluster-domain=cluster.local\
  --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
  --tls-cert-file=/etc/kubernetes/ssl/worker-example.com.pem
  --tls-private-key-file=/etc/kubernetes/ssl/worker-example.com-key.pem
Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target

Create manifests directory:

sudo mkdir -p /etc/kubernetes/manifests

Create /etc/kubernetes/manifests/kube-proxy.yaml:

apiVersion: v1
kind: Pod
metadata:
  name: kube-proxy
  namespace: kube-system
spec:
  hostNetwork: true
  containers:
  - name: kube-proxy
    image: quay.io/coreos/hyperkube:v1.4.6_coreos.0
    command:
    - /hyperkube
    - proxy
    - --master=https://138.197.198.62
    - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
    - --proxy-mode=iptables
    securityContext:
      privileged: true
    volumeMounts:
      - mountPath: /etc/ssl/certs
        name: "ssl-certs"
      - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
        name: "kubeconfig"
        readOnly: true
      - mountPath: /etc/kubernetes/ssl
        name: "etc-kube-ssl"
        readOnly: true
  volumes:
    - name: "ssl-certs"
      hostPath:
        path: "/usr/share/ca-certificates"
    - name: "kubeconfig"
      hostPath:
        path: "/etc/kubernetes/worker-kubeconfig.yaml"
    - name: "etc-kube-ssl"
      hostPath:
        path: "/etc/kubernetes/ssl"

Check kubelet’s status:

systemctl status kubelet.service

kubelet-status-worker

Check pods’ status:

curl -s localhost:10255/pods | jq -r '.items[].metadata.name'