Summary: kubernetes, ingress, bare-metal, kubectl, nodeport, metallb

ingress-nginx

About | Deployment

Install

kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
kubectl get nodes -o wide
NAME             STATUS   ROLES    AGE   VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
k8s-dev-master   Ready    master   97m   v1.16.3   10.0.100.134   <none>        Ubuntu 18.04.3 LTS   4.15.0-72-generic   docker://18.9.7
k8s-dev-node1    Ready    <none>   95m   v1.16.3   10.0.100.135   <none>        Ubuntu 18.04.3 LTS   4.15.0-72-generic   docker://18.9.7
k8s-dev-node2    Ready    <none>   95m   v1.16.3   10.0.100.136   <none>        Ubuntu 18.04.3 LTS   4.15.0-72-generic   docker://18.9.7

NodePort

Run Ingress NodePort Service

kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml
kubectl -n ingress-nginx get svc
# ingress-nginx   NodePort   10.233.0.11   <none>        80:32469/TCP,443:30963/TCP   13m
# !!! 32469 - host external port !!!

Run Application

# kubia-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: kubia
spec:
  replicas: 3
  selector:
    app: kubia
  template:
    metadata:
      labels:
        app: kubia
    spec:
      containers:
        - name: kubia
          image: zoidenberg/kubia
          ports:
            - containerPort: 8080
# kubia-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kubia
spec:
  ports:
    - port: 80
      targetPort: 8080
  selector:
    app: kubia
kubectl create -f kubia-rc.yaml
kubectl get pods

kubectl create -f kubia-svc.yaml
kubectl get svc

Run Ingress

# kubia-ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: kubia
spec:
  rules:
    - http:
        paths:
          - path: /foo
            backend:
              serviceName: kubia
              servicePort: 80
kubectl -f kubia-ingress.yaml
kubectl get ing kubia
# NAME    HOSTS   ADDRESS       PORTS   AGE
# kubia   *       10.233.0.11   80      12m

Test

curl 10.0.100.134:32469/foo
# You've hit kubia-jjv5q
curl 10.0.100.134:32469/foo
# You've hit kubia-b9r5g
curl 10.0.100.134:32469/foo
# You've hit kubia-g4tw8

LoadBalancer With MetalLB

https://metallb.universe.tf

MetalLB - это механизм, который позволяет создать LoadBalancer при запуске кластера k8s на голом железе.

Настройка HA-кластера Kubernetes на «голом железе» с GlusterFS & MetalLB. Часть 2/3

Install MetalLB

# --- устанавливаем metallb
#     @see https://metallb.universe.tf/installation
kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.3/manifests/metallb.yaml

# --- проверяем что все модули запущены
kubectl get pod --namespace=metallb-system
# NAME                          READY   STATUS    RESTARTS   AGE
# controller-65895b47d4-tdfqd   1/1     Running   2          5h4m
# speaker-9w6th                 1/1     Running   2          7h14m
# speaker-hdrsd                 1/1     Running   2          7h14m
# speaker-n9z95                 1/1     Running   2          7h14m

Config MetalLB

Создаем конфигурацию MetalLB.

!!! ВНИМАНИЕ !!! Диапазон адресов в поле addressess НЕ ДОЛЖЕН ПЕРЕСЕКАТЬСЯ с адресами узлов кластера. Они должны быть определены из свободных адресов (спросите у админов).

# metallb-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.0.200-192.168.0.210
kubectl create -f metallb-config.yaml
kubectl -n metallb-system describe configmaps config

Run Ingress-Nginx Load Balancer

# service-load-balancer.yaml
apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  type: LoadBalancer
  ports:
    - name: http
      port: 80
      targetPort: 80
      protocol: TCP
    - name: https
      port: 443
      targetPort: 443
      protocol: TCP
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
---
kubectl create -f service-load-balancer.yaml
kubectl -n ingress-nginx get svc
# NAME            TYPE           CLUSTER-IP      EXTERNAL-IP    PORT(S)                      AGE
# ingress-nginx   LoadBalancer   10.233.32.188   10.0.100.181   80:32002/TCP,443:31997/TCP   21m

EXTERNAL-IP 10.0.100.181 - global ip from metallb config pool (!not from nodes ips!)

Run Application

# kubia-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: kubia
spec:
  replicas: 3
  selector:
    app: kubia
  template:
    metadata:
      labels:
        app: kubia
    spec:
      containers:
        - name: kubia
          image: zoidenberg/kubia
          ports:
            - containerPort: 8080
# kubia-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kubia
spec:
  ports:
    - port: 80
      targetPort: 8080
  selector:
    app: kubia
kubectl create -f kubia-rc.yaml
kubectl get pods
# NAME          READY   STATUS    RESTARTS   AGE
# kubia-jsxd4   1/1     Running   0          25s
# kubia-scrgn   1/1     Running   0          25s
# kubia-wlxxv   1/1     Running   0          25s

kubectl create -f kubia-svc.yaml
kubectl get svc
# NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
# ...
# kubia        ClusterIP   10.233.61.55   <none>        80/TCP    15m

Run Ingress

# kubia-ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: kubia
spec:
  rules:
    - http:
        paths:
          - path: /foo
            backend:
              serviceName: kubia
              servicePort: 80
kubectl create -f kubia-ingress.yaml
kubectl get ing kubia
# NAME    HOSTS   ADDRESS        PORTS   AGE
# kubia   *       10.0.100.181   80      21m

Test

curl 10.0.100.181/foo
# You've hit kubia-r9ts9
curl 10.0.100.181/foo
# You've hit kubia-7pfmd
curl 10.0.100.181/foo
# You've hit kubia-r9ts9
curl 10.0.100.181/foo
# You've hit kubia-ks8k7

Written with StackEdit.