diff --git a/README.md b/README.md index fccda34..45aedb4 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![Build Status](https://travis-ci.org/containerum/kdc-docs.svg?branch=master)](https://travis-ci.org/containerum/kdc-docs) + [Stable documentation](https://docs.kdc.containerum.com) [Staging documentation](http://dev.docs.kdc.containerum.com) diff --git a/content/files/calico.yaml b/content/files/calico/calico.yaml similarity index 100% rename from content/files/calico.yaml rename to content/files/calico/calico.yaml diff --git a/content/files/calico/calicoctl.yaml b/content/files/calico/calicoctl.yaml new file mode 100644 index 0000000..61696d5 --- /dev/null +++ b/content/files/calico/calicoctl.yaml @@ -0,0 +1,49 @@ +# Calico Version v3.2.3 +# https://docs.projectcalico.org/v3.2/releases#v3.2.3 +# This manifest includes the following component versions: +# calico/ctl:v3.2.3 + +apiVersion: v1 +kind: Pod +metadata: + name: calicoctl + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: calicoctl + image: quay.io/calico/ctl:v3.2.3 + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] + env: + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + If you're using TLS enabled etcd uncomment the following. + Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + volumeMounts: + - mountPath: /calico-secrets + name: etcd-certs + volumes: + If you're using TLS enabled etcd uncomment the following. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets diff --git a/content/files/calico/rbac.yaml b/content/files/calico/rbac.yaml new file mode 100644 index 0000000..7074d1d --- /dev/null +++ b/content/files/calico/rbac.yaml @@ -0,0 +1,71 @@ +# Calico Version v3.2.3 +# https://docs.projectcalico.org/v3.2/releases#v3.2.3 + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + - serviceaccounts + verbs: + - watch + - list + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/content/files/coredns.yaml b/content/files/coredns.yaml new file mode 100644 index 0000000..86dcbec --- /dev/null +++ b/content/files/coredns.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: kube-dns + name: coredns + namespace: kube-system +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: kube-dns + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: docker.io/containerum/coredns:1.1.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: coredns + serviceAccountName: coredns + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + name: coredns + name: config-volume +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + reload + } +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: KubeDNS + name: kube-dns + namespace: kube-system +spec: + clusterIP: 10.96.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:coredns + resourceVersion: "217" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch diff --git a/content/installation/packages/1intro.md b/content/installation/packages/1intro.md index 43c13f6..72e7bb1 100644 --- a/content/installation/packages/1intro.md +++ b/content/installation/packages/1intro.md @@ -27,7 +27,7 @@ Before you start bootstrapping a cluster with Kubernetes Distribution by Contain - `EXTERNAL_IP` is an IP address of an instance in external network - `INTERNAL_IP` is an IP address of instance in internal network -- `MASTER_NODES_IP` is a sequence of all IP addresses of master nodes. In the case of only one node it is equal to the master node's `EXTERNAL_IP` value + - `ETCD_NODE_IP` is an IP address of the etcd node. In case of multiple etcd nodes they can be declared as `ETCD_NODE_1_IP`, `ETCD_NODE_2_IP`, etc. - `POD_CIDR` is the range of IP addresses for pods @@ -40,9 +40,9 @@ Before you start bootstrapping a cluster with Kubernetes Distribution by Contain ## Network information It is necessary to ensure that all cluster hosts can communicate by hostname. It will be sufficient to add the following entries to /etc/hosts on each node: -192.168.0.4 master -192.168.0.5 node-1 -192.168.0.6 node-2 +172.16.0.4 master +172.16.0.5 node-1 +172.16.0.6 node-2 Set a separate hostname for each node. For the node with the master role and name set: ```bash @@ -58,8 +58,8 @@ Configure the network interfaces for public and private networks: BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 -GATEWAY=192.168.0.1 -IPADDR=192.168.0.2 +GATEWAY=172.16.0.1 +IPADDR=172.16.0.2 NETMASK=255.255.255.0 ONBOOT=yes TYPE=Ethernet @@ -78,30 +78,57 @@ TYPE=Ethernet USERCTL=no ``` +## Turn off SELinux + +Disabling SELinux by running setenforce 0 or permanently disabling is required to allow containers to access the host filesystem, which is required by pod networks for example. You have to do this until SELinux support is improved in the kubelet. + +Run this command to permanently disable SELinux: + +``` +setenforce 0 +sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config +``` + +## Configure nf call + +Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config, e.g. + +Run this command to enable nf-call: + +``` +{{< highlight bash >}} +cat < /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +EOF +sysctl --system +{{< / highlight >}} +``` + ## Containerum RPM repository ### Repository definition -Add Containerum repository to yum. Put this in /etc/yum.repos.d/exonlab.repo: +Add Containerum repository to yum. To put add our repository to yum run this command: ``` +{{< highlight bash >}} +cat < /etc/yum.repos.d/exon.repo [exonlab-kubernetes1.11-testing] name=Exon lab kubernetes repo for CentOS baseurl=http://repo.containerum.io/centos/7/kubernetes-1_11-pkg/x86_64/ skip_if_unavailable=False gpgcheck=1 repo_gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ExonLab +gpgkey=https://repo.containerum.io/RPM-GPG-KEY-ExonLab enabled=1 enabled_metadata=1 - +EOF +{{< / highlight >}} ``` -### GPG package signing key - +And update repositories: ``` -curl -O http://repo.containerum.io/RPM-GPG-KEY-ExonLab -sudo mv RPM-GPG-KEY-ExonLab /etc/pki/rpm-gpg/ -sudo chown root:root /etc/pki/rpm-gpg/RPM-GPG-KEY-ExonLab +yum update -y ``` Key fingerprint: `2ED4 CBD2 309F 2C75 1642 CA7B 4E39 9E04 3CDA 4338` diff --git a/content/installation/packages/2certificates.md b/content/installation/packages/2certificates.md index 4a91294..546a40e 100644 --- a/content/installation/packages/2certificates.md +++ b/content/installation/packages/2certificates.md @@ -26,15 +26,18 @@ Download and build the script that helps generate and maintain certificate infra ```bash {{< highlight bash >}} -git clone https://github.com/containerum/kube-cert-generator.git -cd kube-cert-generator -go build cmd/kube-cert-generator/*.go +mkdir -p cert +cd cert +curl -OL https://github.com/containerum/kube-cert-generator/releases/download/v1.0.4/kube-cert-generator_linux_amd64_v1.0.4.tar.gz +tar xvf kube-cert-generator_linux_amd64_*.tar.gz mv ca generator +chmod +x generator +rm -rfv kube-cert-generator_linux_amd64_*.tar.gz {{< / highlight >}} ``` -Config file: +Config file `config.toml`: ``` overwrite_files = false # If "true" overwrite exsisting files. @@ -52,24 +55,25 @@ street_address = [] postal_code = [] [master_node] # certificate for kubernetes control plane -alias = "master" # HZ -addresses = ["10.96.0.1", "192.0.2.1", "192.168.0.1", "192.168.0.2", "192.168.0.3"] # SAN for apiserver. Must contain all apiserver private and public addresses (or public load balancer addr.) and cluster ip (10.96.0.1 here). +alias = "master" +addresses = ["10.96.0.1", "192.0.2.1", "172.16.0.1", "172.16.0.2", "172.16.0.3"] # SAN for apiserver. Must contain all apiserver private addresses, public address (or public load balancer addr.) and cluster ip (10.96.0.1 here). + [[worker_node]] # certificates for worker node alias = "node-01" # must be same as hostname of node. -addresses = ["node-01", "192.168.0.11"] # internal ip addr and hostname of node +addresses = ["node-01", "172.16.0.11"] # internal ip addr and hostname of node [[worker_node]] alias = "node-02" -addresses = ["node-02", "192.168.0.12"] +addresses = ["node-02", "172.16.0.12"] [[etcd_node]] # certificates for etcd alias = "etcd1" # filename of etcd cert -addresses = ["ectd1", "192.168.1.5"] # SAN for etcd +addresses = ["ectd1", "172.16.1.5"] # SAN for etcd [[etcd_node]] alias = "etcd2" -addresses = ["ectd2", "192.168.1.6"] +addresses = ["ectd2", "172.16.1.6"] [[extra_cert]] # you can generate some custom cert name = "custom_cert" @@ -86,7 +90,7 @@ key_size = 2048 [extra_cert.host] # SANs for custom cert alias = "etcd2" - addresses = ["custom.example.com", "127.0.0.1", "192.168.0.111"] + addresses = ["custom.example.com", "127.0.0.1", "172.16.0.111"] [ca] # certificate authority configuration root_dir = "cert" diff --git a/content/installation/packages/3kubernetes-configuration-files.md b/content/installation/packages/3kubernetes-configuration-files.md index 4781ce9..4ee289c 100644 --- a/content/installation/packages/3kubernetes-configuration-files.md +++ b/content/installation/packages/3kubernetes-configuration-files.md @@ -197,6 +197,13 @@ kubectl config use-context default --kubeconfig=admin.kubeconfig ## Distribute configuration files +Distribute certificates across the nodes: +```bash +for instance in node-01 node-02 node-03; do + scp ca.crt ${instance}.crt ${instance}.key ${instance}:~/ +done +``` + Copy the appropriate kubeconfig files for `kubelet` and `kube-proxy` to each worker node: ```bash diff --git a/content/installation/packages/4etcd.md b/content/installation/packages/4etcd.md index db89814..8dcaa94 100644 --- a/content/installation/packages/4etcd.md +++ b/content/installation/packages/4etcd.md @@ -32,6 +32,8 @@ To install etcd from the official repo run: sudo yum install etcd +mkdir -p /etc/ssl/etcd/ + {{< / highlight >}} ``` @@ -41,6 +43,7 @@ Run: ```bash {{< highlight bash >}} + sudo mkdir /etc/ssl/etcd/ sudo cp ca.crt etcd.crt etcd.key /etc/ssl/etcd/ sudo chown etcd:etcd /etc/ssl/etcd/*.key /etc/ssl/etcd/*.crt diff --git a/content/installation/packages/5bootstrap-controllers.md b/content/installation/packages/5bootstrap-controllers.md index e52237e..4689f86 100644 --- a/content/installation/packages/5bootstrap-controllers.md +++ b/content/installation/packages/5bootstrap-controllers.md @@ -68,7 +68,7 @@ The node internal IP address will be used to manifest the API server as a cluste ``` ADVERTISE_ADDRESS=192.0.2.1 BIND_ADDRESS=0.0.0.0 -ETCD_SERVERS=ttps://${ETCD_NODE_1_IP}:2379,https://${ETCD_NODE_2_IP}:2379,https://${ETCD_NODE_3_IP}:2379 +ETCD_SERVERS=https://${ETCD_NODE_1_IP}:2379,https://${ETCD_NODE_2_IP}:2379,https://${ETCD_NODE_3_IP}:2379 ``` ### Configure Kubernetes Controller Manager @@ -107,8 +107,8 @@ Run: ```bash {{< highlight bash >}} -sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler -sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler +sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubernetes.target +sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler kubernetes.target {{< / highlight >}} ``` @@ -210,7 +210,7 @@ EOF Make an HTTP request to print the Kubernetes version: ```bash -curl --cacert ca.crt https://${KUBERNETES_PUBLIC_IP}:6443/version +curl -k --cacert ca.crt https://${KUBERNETES_PUBLIC_IP}:6443/version ``` Output: diff --git a/content/installation/packages/7bootstrap-workers.md b/content/installation/packages/7bootstrap-workers.md index 878e74e..ecc623f 100644 --- a/content/installation/packages/7bootstrap-workers.md +++ b/content/installation/packages/7bootstrap-workers.md @@ -18,7 +18,7 @@ draft: false This section covers how to launch three worker nodes and install the following components: [runc](https://github.com/opencontainers/runc), [container networking plugins](https://github.com/containernetworking/cni), [containerd](https://github.com/containerd/containerd), [kubelet](https://kubernetes.io/docs/admin/kubelet), [kube-proxy](https://kubernetes.io/docs/concepts/cluster-administration/proxies). -> **Don't forget to run all commands on all worker nodes.** +> **Don't forget to run these commands on all worker nodes.** ## Provision a worker node @@ -30,9 +30,11 @@ Install the OS dependencies: sudo yum update sudo yum -y install socat conntrack ipset +swapoff -a + {{< / highlight >}} ``` - +> **Don't forget to turn off swap completely in /etc/fstab.** > `socat` enables support for `kubectl port-forward` command. ### Download and install the components binaries @@ -45,21 +47,6 @@ sudo yum install kubernetes-node-meta {{< / highlight >}} ``` -Create installation directories: - -```bash -{{< highlight bash >}} - -sudo mkdir -p \ - /var/lib/kubelet \ - /var/lib/kube-proxy \ - /var/lib/kubernetes \ - /var/run/kubernetes \ - /etc/kubernetes/pki - -{{< / highlight >}} -``` - ### Install docker Kubernetes supports various container runtimes. By default this installation uses docker. To install and configure alternative runtimes consult the [plugins](/plugins) section. @@ -67,9 +54,7 @@ Kubernetes supports various container runtimes. By default this installation use ```bash {{< highlight bash >}} -yum install docker -sed -i 's/native.cgroupdriver=systemd/native.cgroupdriver=cgroupfs/' /usr/lib/systemd/system/docker.service -systemctl daemon-reload +yum install docker -y systemctl enable docker && systemctl start docker {{< / highlight >}} @@ -93,7 +78,7 @@ sudo cp $HOSTNAME.kubeconfig /etc/kubernetes/kubelet.kubeconfig ```bash {{< highlight bash >}} -sudo mv kube-proxy.kubeconfig /etc/kubernetes +sudo cp kube-proxy.kubeconfig /etc/kubernetes {{< / highlight >}} ``` @@ -104,8 +89,8 @@ sudo mv kube-proxy.kubeconfig /etc/kubernetes {{< highlight bash >}} sudo systemctl daemon-reload -sudo systemctl enable kubernetes.target -sudo systemctl start kubernetes.target +sudo systemctl enable kubernetes.target kubelet kube-proxy +sudo systemctl start kubernetes.target kubelet kube-proxy {{< / highlight >}} ``` @@ -130,4 +115,4 @@ node-03 Ready 20s v1.10.2 Done! -Now you can proceed to [configuring Flannel](/installation/packages/8flannel). +Now you can proceed to [Calico set up](/installation/packages/8calico). diff --git a/content/installation/packages/8calico.md b/content/installation/packages/8calico.md new file mode 100644 index 0000000..bceeb45 --- /dev/null +++ b/content/installation/packages/8calico.md @@ -0,0 +1,160 @@ +--- +title: Calico Installation +linktitle: Install Calico +description: Installing Calico - an overlay network for the cluster. + +categories: [] +keywords: [] + +menu: + docs: + parent: "packages" + weight: 9 + +draft: false +--- + +# Install Calico + +Calico is an overlay network for containers. Download the Calico networking manifest: + +```bash +curl -O https://raw.githubusercontent.com/containerum/kdc-docs/master/content/files/calico/calico.yaml +``` + +Now it's time to configure Calico! +To run Calico it needs to be connected to etcd database. We will use same database as Kubernetes do. + +At first you need to set up etcd address in `calico.yaml`: + +Sure, you can use only one etcd server in this config. +``` +etcd_endpoints: "https://${ETCD_NODE_1_IP}:2379,https://${ETCD_NODE_2_IP}:2379,https://${ETCD_NODE_3_IP}:2379" +``` +Next uncomment certificate paths in `calico.yaml` ConfigMap: +``` +etcd_ca: "/calico-secrets/etcd-ca" +etcd_cert: "/calico-secrets/etcd-cert" +etcd_key: "/calico-secrets/etcd-key" +``` +To allow Calico connect to Kubernetes etcd you need add certificates to yaml Secrets section: + +You have to encode certificates in base64 removing newlines, you can use this command for ca, key and cert: +```bash +cat ca.crt | base64 -w0 +``` +Paste output into fields: +``` +etcd-key: "" +etcd-cert: "" +etcd-ca: "" +``` +If you do not want to use pod-cidr=192.168.0.0/16 then update "CALICO_IPV4POOL_CIDR" value in yaml. Use same CIDR as in other config files. + +> **It's also recommended to define default interface for Calico traffic, just add IP_AUTODETECTION_METHOD to env variables in `calico.yaml`.** + +# Enable BGP in Calico + +If we have everything done right, we need to proceed with BGP configuration in Calico. + +First of all, we need to get calicoctl utility, to do this you can add pod to run calicoctl commands: +```bash +curl -O https://raw.githubusercontent.com/containerum/kdc-docs/master/content/files/calico/calicoctl.yaml +``` + +> Run commands with this syntax: +```bash +kubectl exec -ti -n kube-system calicoctl -- /calicoctl get profiles -o wide +``` + +```bash +cat << EOF | kubectl exec -ti -n kube-system calicoctl -- /calicoctl create -f - +apiVersion: projectcalico.org/v3 +kind: BGPConfiguration +metadata: + name: default +spec: + logSeverityScreen: Info + nodeToNodeMeshEnabled: true + asNumber: 63400 +EOF; +``` + +To access pods from master you have to install [bird](https://bird.network.cz/) on your master: +```bash +yum install -y bird +``` +Bird config sample `/etc/bird.conf`: +```ini +log syslog { trace, info, remote, warning, error, auth, fatal, bug }; +log stderr all; + +router id 172.16.0.1; # master internal ip + +# push all incoming routes to kernel routing table +protocol kernel { + persist; # save routes on bird shutdown + scan time 2; + export all; # export all incoming routes to kernel + graceful restart; +} + +# scan interfaces +protocol device { + debug { states }; + scan time 2; +} + +protocol direct { + debug { states }; + interface "ens160"; # master internal interface + # should be the same as configured + # for Calico communication +} + +# apply incoming routes to pod subnet +filter main_filter { + if net ~ 192.168.0.0/16 then accept; # use your POD CIDR + else reject; +} + +# BGP rule template +template bgp bgp_template { + debug { states }; + description "Connection to BGP peer"; + local as 63400; # same as Calico host AS + multihop; # allow connection to neighbor through router + gateway recursive; # allow routes through router + import filter main_filter; # apply filter + next hop self; # advertise our ip as next hop + source address 172.16.0.1; # master internal ip + add paths on; # allow multiple routes to same subnet + graceful restart; +} + +# list of BGP peers (kubernetes nodes) +protocol bgp node-01 from bgp_template { + neighbor 172.16.0.11 as 63400; +} + +protocol bgp node-02 from bgp_template { + neighbor 172.16.0.12 as 63400; +} +``` + +You have to add master node IP to bgp peers in Calico: +```bash +cat << EOF | kubectl exec -ti -n kube-system calicoctl -- /calicoctl create -f - +apiVersion: projectcalico.org/v3 +kind: BGPPeer +metadata: + name: bgppeer-m1 +spec: + peerIP: 172.16.0.1 + asNumber: 63400 +EOF; +``` + +Done! + +Now you can proceed to [configuring DNS](/installation/packages/9dns). diff --git a/content/installation/packages/8flannel.md b/content/installation/packages/8flannel.md deleted file mode 100644 index bff5aee..0000000 --- a/content/installation/packages/8flannel.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Flannel Installation -linktitle: Install Flannel -description: Installing Flannel - an overlay network for the cluster. - -categories: [] -keywords: [] - -menu: - docs: - parent: "packages" - weight: 9 - -draft: false ---- - -# Install Flannel - -Flannel is an etcd-backed overlay network for containers. Download the Flannel networking manifest: - -```bash -curl -OL https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -``` - -If you do not want to use pod-cird=10.244.0.0/16 then change it in kube-flannel. -Apply the manifest with: - -``` -kubectl apply -f kube-flannel.yml -``` - -Done! - -Now you can proceed to [configuring DNS](/installation/packages/9dns). diff --git a/content/installation/packages/9dns.md b/content/installation/packages/9dns.md index 0b560f2..4da2021 100644 --- a/content/installation/packages/9dns.md +++ b/content/installation/packages/9dns.md @@ -1,5 +1,5 @@ --- -title: Kubernetes DNS Cluster Add-on +title: Kubernetes CoreDNS Cluster Add-on linktitle: DNS Cluster description: Launching service discovery to applications running inside the Kubernetes cluster. @@ -15,26 +15,26 @@ draft: false --- # Launch DNS Cluster Add-on -Configure DNS Cluster to enable service discovery to applications running inside the Kubernetes cluster. +Configure CoreDNS Cluster to enable service discovery for applications running in Kubernetes cluster. ## Deploy the kube-dns cluster add-on -Launch `kube-dns`: +Launch `core-dns`: ```bash -kubectl create -f https://raw.githubusercontent.com/containerum/cdk-docs/master/content/files/kube-dns.yaml +kubectl create -f https://raw.githubusercontent.com/containerum/cdk-docs/master/content/files/coredns.yaml ``` Output: ``` -service "kube-dns" created -serviceaccount "kube-dns" created -configmap "kube-dns" created -deployment.extensions "kube-dns" created +service "coredns" created +serviceaccount "coredns" created +configmap "coredns" created +deployment.extensions "coredns" created ``` -List the pods of the `kube-dns` deployment: +List the pods of the `coredns` deployment: ```bash kubectl get pods -l k8s-app=kube-dns -n kube-system @@ -43,7 +43,7 @@ Output: ``` NAME READY STATUS RESTARTS AGE -kube-dns-3097350089-gq015 3/3 Running 0 20s +coredns-c68859c76-5pw2z 3/3 Running 0 20s ``` ## Verification diff --git a/content/installation/packages/_index.md b/content/installation/packages/_index.md index d833c01..dbe5dbb 100644 --- a/content/installation/packages/_index.md +++ b/content/installation/packages/_index.md @@ -41,8 +41,8 @@ Kubectl is a CLI tool for Kubernetes. - Bootstrap workers Launching worker nodes. You can launch as many workers as you need. -- Install flannel -Flannel is a virtual network that attaches IP addresses to containers. +- Install Calico +Calico is a virtual network that attaches IP addresses to containers. - Configure DNS add-on [DNS add-on](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) is a DNS-based service discovery to applications running in the Kubernetes cluster.