Задать вопрос
chemtech
@chemtech
Линуксойд, DevOps

Как запустить работу kubernetes-sigs/kind с ingress и каким-нибудь сервисом на машине разработчика?

https://github.com/kubernetes-sigs/kind - version 0.4.0
Create kubernetes from kubernetes-sigs/kind

Создаем файл kind-example-config.yaml
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
nodes:
- role: control-plane
- role: worker
  extraPortMappings:
  - containerPort: 80
    hostPort: 80
    listenAddress: "192.168.0.9" # Адрес сетевой карты


kind create cluster --config kind-example-config.yaml
Creating cluster "kind" ...
     ✓ Ensuring node image (kindest/node:v1.15.0)


kubectl create serviceaccount --namespace kube-system tiller

serviceaccount/tiller created

kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller

clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created


helm init --service-account tiller

Я установил metallb - но в итоге не работает.
kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml
namespace/metallb-system created
serviceaccount/controller created
serviceaccount/speaker created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
role.rbac.authorization.k8s.io/config-watcher created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/config-watcher created
daemonset.apps/speaker created
deployment.apps/controller created


Проверил статус подов

kubectl get pod --namespace=metallb-system


vi metallb-config.yaml
apiVersion: v1 
kind: ConfigMap 
metadata: 
  namespace: metallb-system 
  name: config 
data: 
  config: | 
    address-pools: 
    - name: default 
      protocol: layer2 
      addresses: 
      - 192.168.0.9-192.168.0.9


kubectl apply -f metallb-config.yaml

helm install stable/nginx-ingress

kubectl get svc
NAME                                              TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)                      AGE
cranky-sabertooth-nginx-ingress-controller        LoadBalancer   10.106.200.229   192.168.0.240   80:31729/TCP,443:30973/TCP   11s
cranky-sabertooth-nginx-ingress-default-backend   ClusterIP      10.96.224.51     <none>          80/TCP                       11s
kubernetes                                        ClusterIP      10.96.0.1        <none>          443/TCP                      159m


helm install --name grafana stable/grafana --set=ingress.enabled=True,ingress.hosts={grafana.domain.com}

curl 127.0.0.1:80 -H "HOST: grafana.domain.com"
curl: (7) Failed to connect to 127.0.0.1 port 80: В соединении отказано


curl 192.168.0.240:80 -H "HOST: grafana.domain.com"
curl: (7) Failed to connect to 192.168.0.240 port 80: Нет маршрута до узла


ss -tnlp | grep 80
LISTEN   0         128                 0.0.0.0:8084             0.0.0.0:*


ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: enp2s0f0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN group default qlen 1000
    link/ether b8:70:f4:90:bb:b0 brd ff:ff:ff:ff:ff:ff
3: wlp3s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 78:e4:00:ee:cd:b4 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.9/24 brd 192.168.0.255 scope global dynamic noprefixroute wlp3s0
       valid_lft 12514sec preferred_lft 12514sec
    inet6 fe80::88cf:369f:53e3:b3e1/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
4: br-f4684c062c65: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:5b:72:65:61 brd ff:ff:ff:ff:ff:ff
    inet 172.20.0.1/16 brd 172.20.255.255 scope global br-f4684c062c65
       valid_lft forever preferred_lft forever
5: br-0afdc979a147: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:8d:d7:9a:e0 brd ff:ff:ff:ff:ff:ff
    inet 172.19.0.1/16 brd 172.19.255.255 scope global br-0afdc979a147
       valid_lft forever preferred_lft forever
6: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:f7:b4:57:22 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:f7ff:feb4:5722/64 scope link 
       valid_lft forever preferred_lft forever
7: ppp0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1400 qdisc fq_codel state UNKNOWN group default qlen 3
    link/ppp 
    inet 10.235.34.71 peer 10.6.78.67/32 scope global ppp0
       valid_lft forever preferred_lft forever
15: veth662cbf0@if14: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
    link/ether 96:cf:fd:ba:47:b2 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::94cf:fdff:feba:47b2/64 scope link 
       valid_lft forever preferred_lft forever


docker network ls
NETWORK ID          NAME                   DRIVER              SCOPE
9a02018d91e9        bridge                 bridge              local
54dc14a81a01        host                   host                local
504f4158bc3b        none                   null                local
0afdc979a147        stack-docker_default   bridge              local
f4684c062c65        stack-docker_stack     bridge              local


мне нужно тестировать в том числе и ingress.
NodePort, Minikube, port-forward не предлагать.
  • Вопрос задан
  • 235 просмотров
Подписаться 2 Средний 16 комментариев
Пригласить эксперта
Ваш ответ на вопрос

Войдите, чтобы написать ответ

Похожие вопросы