Задать вопрос
  • Почему не проходят HTTPS запросы из Docker контейнера?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Valentin Barbolin,
    # Generated by iptables-save v1.8.10 (nf_tables) on Thu Feb 13 19:20:05 2025
    *filter
    :INPUT DROP [16988:778409]
    :FORWARD DROP [0:0]
    :OUTPUT ACCEPT [2:80]
    :DOCKER - [0:0]
    :DOCKER-ISOLATION-STAGE-1 - [0:0]
    :DOCKER-ISOLATION-STAGE-2 - [0:0]
    :DOCKER-USER - [0:0]
    :ufw-after-forward - [0:0]
    :ufw-after-input - [0:0]
    :ufw-after-logging-forward - [0:0]
    :ufw-after-logging-input - [0:0]
    :ufw-after-logging-output - [0:0]
    :ufw-after-output - [0:0]
    :ufw-before-forward - [0:0]
    :ufw-before-input - [0:0]
    :ufw-before-logging-forward - [0:0]
    :ufw-before-logging-input - [0:0]
    :ufw-before-logging-output - [0:0]
    :ufw-before-output - [0:0]
    :ufw-docker-logging-deny - [0:0]
    :ufw-logging-allow - [0:0]
    :ufw-logging-deny - [0:0]
    :ufw-not-local - [0:0]
    :ufw-reject-forward - [0:0]
    :ufw-reject-input - [0:0]
    :ufw-reject-output - [0:0]
    :ufw-skip-to-policy-forward - [0:0]
    :ufw-skip-to-policy-input - [0:0]
    :ufw-skip-to-policy-output - [0:0]
    :ufw-track-forward - [0:0]
    :ufw-track-input - [0:0]
    :ufw-track-output - [0:0]
    :ufw-user-forward - [0:0]
    :ufw-user-input - [0:0]
    :ufw-user-limit - [0:0]
    :ufw-user-limit-accept - [0:0]
    :ufw-user-logging-forward - [0:0]
    :ufw-user-logging-input - [0:0]
    :ufw-user-logging-output - [0:0]
    :ufw-user-output - [0:0]
    -A INPUT -j ufw-before-logging-input
    -A INPUT -j ufw-before-input
    -A INPUT -j ufw-after-input
    -A INPUT -j ufw-after-logging-input
    -A INPUT -j ufw-reject-input
    -A INPUT -j ufw-track-input
    -A FORWARD -j DOCKER-USER
    -A FORWARD -j DOCKER-ISOLATION-STAGE-1
    -A FORWARD -o br-8f63e1fbea4e -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A FORWARD -o br-8f63e1fbea4e -j DOCKER
    -A FORWARD -i br-8f63e1fbea4e ! -o br-8f63e1fbea4e -j ACCEPT
    -A FORWARD -i br-8f63e1fbea4e -o br-8f63e1fbea4e -j ACCEPT
    -A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A FORWARD -o docker0 -j DOCKER
    -A FORWARD -i docker0 ! -o docker0 -j ACCEPT
    -A FORWARD -i docker0 -o docker0 -j ACCEPT
    -A FORWARD -o br-db9c155d696d -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A FORWARD -o br-db9c155d696d -j DOCKER
    -A FORWARD -i br-db9c155d696d ! -o br-db9c155d696d -j ACCEPT
    -A FORWARD -i br-db9c155d696d -o br-db9c155d696d -j ACCEPT
    -A FORWARD -j ufw-before-logging-forward
    -A FORWARD -j ufw-before-forward
    -A FORWARD -j ufw-after-forward
    -A FORWARD -j ufw-after-logging-forward
    -A FORWARD -j ufw-reject-forward
    -A FORWARD -j ufw-track-forward
    -A OUTPUT -j ufw-before-logging-output
    -A OUTPUT -j ufw-before-output
    -A OUTPUT -j ufw-after-output
    -A OUTPUT -j ufw-after-logging-output
    -A OUTPUT -j ufw-reject-output
    -A OUTPUT -j ufw-track-output
    -A DOCKER -d 172.18.0.3/32 ! -i br-8f63e1fbea4e -o br-8f63e1fbea4e -p tcp -m tcp --dport 6379 -j ACCEPT
    -A DOCKER -d 172.18.0.4/32 ! -i br-8f63e1fbea4e -o br-8f63e1fbea4e -p tcp -m tcp --dport 5432 -j ACCEPT
    -A DOCKER -d 172.18.0.5/32 ! -i br-8f63e1fbea4e -o br-8f63e1fbea4e -p tcp -m tcp --dport 80 -j ACCEPT
    -A DOCKER -d 172.18.0.5/32 ! -i br-8f63e1fbea4e -o br-8f63e1fbea4e -p tcp -m tcp --dport 443 -j ACCEPT
    -A DOCKER-ISOLATION-STAGE-1 -i br-8f63e1fbea4e ! -o br-8f63e1fbea4e -j DOCKER-ISOLATION-STAGE-2
    -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
    -A DOCKER-ISOLATION-STAGE-1 -i br-db9c155d696d ! -o br-db9c155d696d -j DOCKER-ISOLATION-STAGE-2
    -A DOCKER-ISOLATION-STAGE-1 -j RETURN
    -A DOCKER-ISOLATION-STAGE-2 -o br-8f63e1fbea4e -j DROP
    -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
    -A DOCKER-ISOLATION-STAGE-2 -o br-db9c155d696d -j DROP
    -A DOCKER-ISOLATION-STAGE-2 -j RETURN
    -A DOCKER-USER -j ufw-user-forward
    -A DOCKER-USER -s 10.0.0.0/8 -j RETURN
    -A DOCKER-USER -s 172.16.0.0/12 -j RETURN
    -A DOCKER-USER -s 192.168.0.0/16 -j RETURN
    -A DOCKER-USER -p udp -m udp --sport 53 --dport 1024:65535 -j RETURN
    -A DOCKER-USER -d 192.168.0.0/16 -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j ufw-docker-logging-deny
    -A DOCKER-USER -d 10.0.0.0/8 -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j ufw-docker-logging-deny
    -A DOCKER-USER -d 172.16.0.0/12 -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j ufw-docker-logging-deny
    -A DOCKER-USER -d 192.168.0.0/16 -p udp -m udp --dport 0:32767 -j ufw-docker-logging-deny
    -A DOCKER-USER -d 10.0.0.0/8 -p udp -m udp --dport 0:32767 -j ufw-docker-logging-deny
    -A DOCKER-USER -d 172.16.0.0/12 -p udp -m udp --dport 0:32767 -j ufw-docker-logging-deny
    -A DOCKER-USER -j RETURN
    -A ufw-after-input -p udp -m udp --dport 137 -j ufw-skip-to-policy-input
    -A ufw-after-input -p udp -m udp --dport 138 -j ufw-skip-to-policy-input
    -A ufw-after-input -p tcp -m tcp --dport 139 -j ufw-skip-to-policy-input
    -A ufw-after-input -p tcp -m tcp --dport 445 -j ufw-skip-to-policy-input
    -A ufw-after-input -p udp -m udp --dport 67 -j ufw-skip-to-policy-input
    -A ufw-after-input -p udp -m udp --dport 68 -j ufw-skip-to-policy-input
    -A ufw-after-input -m addrtype --dst-type BROADCAST -j ufw-skip-to-policy-input
    -A ufw-after-logging-forward -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW BLOCK] "
    -A ufw-after-logging-input -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW BLOCK] "
    -A ufw-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A ufw-before-forward -p icmp -m icmp --icmp-type 3 -j ACCEPT
    -A ufw-before-forward -p icmp -m icmp --icmp-type 11 -j ACCEPT
    -A ufw-before-forward -p icmp -m icmp --icmp-type 12 -j ACCEPT
    -A ufw-before-forward -p icmp -m icmp --icmp-type 8 -j ACCEPT
    -A ufw-before-forward -j ufw-user-forward
    -A ufw-before-input -i lo -j ACCEPT
    -A ufw-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A ufw-before-input -m conntrack --ctstate INVALID -j ufw-logging-deny
    -A ufw-before-input -m conntrack --ctstate INVALID -j DROP
    -A ufw-before-input -p icmp -m icmp --icmp-type 3 -j ACCEPT
    -A ufw-before-input -p icmp -m icmp --icmp-type 11 -j ACCEPT
    -A ufw-before-input -p icmp -m icmp --icmp-type 12 -j ACCEPT
    -A ufw-before-input -p icmp -m icmp --icmp-type 8 -j ACCEPT
    -A ufw-before-input -p udp -m udp --sport 67 --dport 68 -j ACCEPT
    -A ufw-before-input -j ufw-not-local
    -A ufw-before-input -d 224.0.0.251/32 -p udp -m udp --dport 5353 -j ACCEPT
    -A ufw-before-input -d 239.255.255.250/32 -p udp -m udp --dport 1900 -j ACCEPT
    -A ufw-before-input -j ufw-user-input
    -A ufw-before-output -o lo -j ACCEPT
    -A ufw-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A ufw-before-output -j ufw-user-output
    -A ufw-docker-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW DOCKER BLOCK] "
    -A ufw-docker-logging-deny -j DROP
    -A ufw-logging-allow -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW ALLOW] "
    -A ufw-logging-deny -m conntrack --ctstate INVALID -m limit --limit 3/min --limit-burst 10 -j RETURN
    -A ufw-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW BLOCK] "
    -A ufw-not-local -m addrtype --dst-type LOCAL -j RETURN
    -A ufw-not-local -m addrtype --dst-type MULTICAST -j RETURN
    -A ufw-not-local -m addrtype --dst-type BROADCAST -j RETURN
    -A ufw-not-local -m limit --limit 3/min --limit-burst 10 -j ufw-logging-deny
    -A ufw-not-local -j DROP
    -A ufw-skip-to-policy-forward -j DROP
    -A ufw-skip-to-policy-input -j DROP
    -A ufw-skip-to-policy-output -j ACCEPT
    -A ufw-track-output -p tcp -m conntrack --ctstate NEW -j ACCEPT
    -A ufw-track-output -p udp -m conntrack --ctstate NEW -j ACCEPT
    -A ufw-user-forward -p tcp -m tcp --dport 80 -j ACCEPT
    -A ufw-user-forward -p tcp -m tcp --dport 443 -j ACCEPT
    -A ufw-user-input -p tcp -m tcp --dport 80 -j ACCEPT
    -A ufw-user-input -p udp -m udp --dport 80 -j ACCEPT
    -A ufw-user-input -p tcp -m tcp --dport 443 -j ACCEPT
    -A ufw-user-input -p udp -m udp --dport 443 -j ACCEPT
    -A ufw-user-input -p tcp -m tcp --dport 13167 -j ACCEPT
    -A ufw-user-input -p udp -m udp --dport 13167 -j ACCEPT
    -A ufw-user-limit -m limit --limit 3/min -j LOG --log-prefix "[UFW LIMIT BLOCK] "
    -A ufw-user-limit -j REJECT --reject-with icmp-port-unreachable
    -A ufw-user-limit-accept -j ACCEPT
    COMMIT
    # Completed on Thu Feb 13 19:20:05 2025
    # Generated by iptables-save v1.8.10 (nf_tables) on Thu Feb 13 19:20:05 2025
    *nat
    :PREROUTING ACCEPT [67409:3180715]
    :INPUT ACCEPT [0:0]
    :OUTPUT ACCEPT [17785:1628832]
    :POSTROUTING ACCEPT [164562:9693130]
    :DOCKER - [0:0]
    -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
    -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
    -A POSTROUTING -s 172.18.0.0/16 ! -o br-8f63e1fbea4e -j MASQUERADE
    -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
    -A POSTROUTING -s 172.19.0.0/16 ! -o br-db9c155d696d -j MASQUERADE
    -A POSTROUTING -s 172.18.0.3/32 -d 172.18.0.3/32 -p tcp -m tcp --dport 6379 -j MASQUERADE
    -A POSTROUTING -s 172.18.0.4/32 -d 172.18.0.4/32 -p tcp -m tcp --dport 5432 -j MASQUERADE
    -A POSTROUTING -s 172.18.0.5/32 -d 172.18.0.5/32 -p tcp -m tcp --dport 80 -j MASQUERADE
    -A POSTROUTING -s 172.18.0.5/32 -d 172.18.0.5/32 -p tcp -m tcp --dport 443 -j MASQUERADE
    -A DOCKER -i br-8f63e1fbea4e -j RETURN
    -A DOCKER -i docker0 -j RETURN
    -A DOCKER -i br-db9c155d696d -j RETURN
    -A DOCKER -d 127.0.0.1/32 ! -i br-8f63e1fbea4e -p tcp -m tcp --dport 6379 -j DNAT --to-destination 172.18.0.3:6379
    -A DOCKER -d 127.0.0.1/32 ! -i br-8f63e1fbea4e -p tcp -m tcp --dport 5432 -j DNAT --to-destination 172.18.0.4:5432
    -A DOCKER ! -i br-8f63e1fbea4e -p tcp -m tcp --dport 80 -j DNAT --to-destination 172.18.0.5:80
    -A DOCKER ! -i br-8f63e1fbea4e -p tcp -m tcp --dport 443 -j DNAT --to-destination 172.18.0.5:443
    COMMIT
    # Completed on Thu Feb 13 19:20:05 2025
    Написано
  • Как убрать отступы в диаграмме?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Только заместо того, чтобы указывать barThickness для каждого набора данных, я указал его в options. Значение применяется сразу для всех наборов.
    options: {
        ...,
        barThickness: 150,
      },
    Написано
  • Как убрать отступы в диаграмме?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Супер, то что нужно! Еще на одном форуме посоветовали сделать так:
    options: {
        ...,
        barPercentage: 1,
        categoryPercentage: 1,
      },

    Но ваше решение мне больше подходит.
    Написано
  • Почему Microk8s ingress не редиректит к поду?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Дмитрий Шицков,
    Что если прокинуть порт сервиса и открыть в браузере, работает так же как и при обращении на Pod?

    Да, также
    и с соседней консоли

    Да

    Вообще не понимаю в чем может быть проблема :(
  • Почему Microk8s ingress не редиректит к поду?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Дмитрий Шицков, Это текущий конфиг, целиком. Сейчас всё развернул и запускаю через хельм, потому конфиги не совпадают с изначальными.
    Открыть конфиг

    ---
    # Source: yii/templates/serviceaccount.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: myapp-yii
      labels:
        helm.sh/chart: yii-0.1.0
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
        app.kubernetes.io/version: "1.4.0"
        app.kubernetes.io/managed-by: Helm
    ---
    # Source: yii/templates/host-config.yaml
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: backend-host-config
    data:
      default.conf: |
        server {
          client_max_body_size 10m;
    
          root /var/www/web;
    
          location /health {
            return 200;
          }
    
          location / {
              # try to serve file directly, fallback to app.php
              try_files $uri /index.php$is_args$args;
          }
    
          location ~* ^.+\\.(css|js)$ {
              access_log off;
              log_not_found off;
              expires max;
              add_header Access-Control-Allow-Origin \"*\";
          }
    
          location ~* ^.+\\.(svg|svgz|eot|otf|woff|woff2|ttf|ttc|rss|atom|jpg|jpeg|gif|png|ico|zip|tgz|gz|rar|bz2|doc|xls|exe|ppt|tar|wav|bmp|rtf)$ {
              access_log off;
              expires max;
              add_header Access-Control-Allow-Origin \"*\";
          }
    
          location ~ ^/index\\.php(/|$) {
              fastcgi_pass localhost:9000;
              fastcgi_split_path_info ^(.+\\.php)(/.*)$;
              include fastcgi_params;
              fastcgi_send_timeout 300s;
              fastcgi_read_timeout 60s;
              fastcgi_param HTTPS on;
              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
              fastcgi_param DOCUMENT_ROOT $realpath_root;
              http2_push_preload on;
              proxy_buffer_size          2048k;
              proxy_buffers              4 2048k;
              proxy_busy_buffers_size    4096k;
              fastcgi_buffers            16 256k;
              fastcgi_buffer_size        512k;
              internal;
          }
    
          location ~ \\.php$ {
              return 404;
          }
        }
    ---
    # Source: yii/templates/service.yaml
    apiVersion: v1
    kind: Service
    metadata:
      name: myapp-yii
      labels:
        helm.sh/chart: yii-0.1.0
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
        app.kubernetes.io/version: "1.4.0"
        app.kubernetes.io/managed-by: Helm
    spec:
      type: ClusterIP
      ports:
        - port: 80
          targetPort: http
          protocol: TCP
          name: http
        - port: 9000
          name: fastcgi
      selector:
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
    ---
    # Source: yii/templates/deployment.yaml
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: myapp-yii
      labels:
        helm.sh/chart: yii-0.1.0
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
        app.kubernetes.io/version: "1.4.0"
        app.kubernetes.io/managed-by: Helm
    spec:
      revisionHistoryLimit: 3
      replicas: 1
      selector:
        matchLabels:
          app.kubernetes.io/name: yii
          app.kubernetes.io/instance: myapp
      template:
        metadata:
          labels:
            app.kubernetes.io/name: yii
            app.kubernetes.io/instance: myapp
        spec:
          imagePullSecrets:
            - name: docker-token
          serviceAccountName: myapp-yii
          securityContext:
            {}
          volumes:
            - name: vendor-dir
              emptyDir: { }
            - name: upload-dir
              emptyDir: { }
            - name: runtime-dir
              emptyDir: { }
            - name: assets-dir
              emptyDir: { }
            - name: host-config
              configMap:
                name: backend-host-config
          initContainers:
            - name: app-dependencies
              image: "[app-image]"
              imagePullPolicy: IfNotPresent
              command: ['sh', '-c', 'composer install --no-interaction --classmap-authoritative']
              envFrom: &envFrom
                - secretRef:
                    name: backend-env
              volumeMounts:
                - mountPath: /var/www/vendor
                  name: vendor-dir
                - mountPath: /var/www/web/upload
                  name: upload-dir
                - mountPath: /var/www/runtime
                  name: runtime-dir
            - name: app-migrations
              image: "[app-image]"
              imagePullPolicy: IfNotPresent
              envFrom: *envFrom
              volumeMounts:
                - mountPath: /var/www/vendor
                  name: vendor-dir
                - mountPath: /var/www/web/upload
                  name: upload-dir
                - mountPath: /var/www/runtime
                  name: runtime-dir
              command: [ 'sh', '-c', './yii migrate --interactive=0' ]
            - name: app-cache-clear
              image: "[app-image]"
              imagePullPolicy: IfNotPresent
              envFrom: *envFrom
              volumeMounts:
                - mountPath: /var/www/vendor
                  name: vendor-dir
                - mountPath: /var/www/web/upload
                  name: upload-dir
                - mountPath: /var/www/runtime
                  name: runtime-dir
              command: [ 'sh', '-c', './yii cache/flush-all --interactive=0' ]
          containers:
            - name: app
              securityContext:
                {}
              image: "[app-image]"
              envFrom: *envFrom
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /var/www/vendor
                  name: vendor-dir
                - mountPath: /var/www/web/upload
                  name: upload-dir
                - mountPath: /var/www/runtime
                  name: runtime-dir
              ports:
                - name: http
                  containerPort: 80
                  protocol: TCP
              livenessProbe:
                httpGet:
                  path: /health
                  port: http
              resources:
                {}
            - name: webapp
              image: "[webapp-image]"
              volumeMounts:
                - mountPath: /etc/nginx/conf.d
                  name: host-config
    ---
    # Source: yii/templates/ingress.yaml
    apiVersion: networking.k8s.io/v1
    kind: Ingress
    metadata:
      name: myapp-yii
      labels:
        helm.sh/chart: yii-0.1.0
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
        app.kubernetes.io/version: "1.4.0"
        app.kubernetes.io/managed-by: Helm
      annotations:
        cert-manager.io/issuer: cert-issuer-staging
        certmanager.k8s.io/issuer: cert-issuer-staging
        ingress.kubernetes.io/ssl-redirect: "true"
        kubernetes.io/ingress.class: nginx
        kubernetes.io/tls-acme: "true"
        nginx.ingress.kubernetes.io/backend-protocol: http
        nginx.ingress.kubernetes.io/client-body-buffer-size: 1M
        nginx.ingress.kubernetes.io/fastcgi-index: index.php
        nginx.ingress.kubernetes.io/fastcgi-params-configmap: default/fastcgi-config
        nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
        nginx.ingress.kubernetes.io/http2-push-preload: "true"
    spec:
      ingressClassName: nginx
      tls:
        - hosts:
            - "myapp.site"
          secretName: myapp-tls
      rules:
        - host: "myapp.site"
          http:
            paths:
              - path: /
                pathType: ImplementationSpecific
                backend:
                  service:
                    name: myapp-yii
                    port:
                      name: http
    ---
    # Source: yii/templates/issuer.yaml
    apiVersion: cert-manager.io/v1
    kind: Issuer
    metadata:
      name: cert-issuer-staging
    spec:
      acme:
        server: https://acme-staging-v02.api.letsencrypt.org/directory
        email: "[email]"
        privateKeySecretRef:
          name: myapp-tls
        solvers:
          - http01:
              ingress:
                class: nginx
    ---
    # Source: yii/templates/tests/test-connection.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: "myapp-yii-test-connection"
      labels:
        helm.sh/chart: yii-0.1.0
        app.kubernetes.io/name: yii
        app.kubernetes.io/instance: myapp
        app.kubernetes.io/version: "1.4.0"
        app.kubernetes.io/managed-by: Helm
      annotations:
        "helm.sh/hook": test
    spec:
      containers:
        - name: wget
          image: busybox
          command: ['wget']
          args: ['myapp-yii:80/health']
      restartPolicy: Never

  • Почему Microk8s ingress не редиректит к поду?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Если в сервисе прописать
    spec:
      externalIPs:
        - [server-ip]

    то доступ с браузера напрямую в под проходит
  • Почему Microk8s ingress не редиректит к поду?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    1) Да
    2)
    kubectl get deploy --all-namespaces
    NAMESPACE      NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
    kube-system    coredns                     1/1     1            1           7d18h
    kube-system    kubernetes-dashboard        1/1     1            1           7d18h
    kube-system    dashboard-metrics-scraper   1/1     1            1           7d18h
    kube-system    metrics-server              1/1     1            1           7d18h
    kube-system    calico-kube-controllers     1/1     1            1           7d18h
    cert-manager   cert-manager-cainjector     1/1     1            1           3h22m
    cert-manager   cert-manager                1/1     1            1           3h22m
    cert-manager   cert-manager-webhook        1/1     1            1           3h22m
    myapp          myapp-yii                   1/1     1            1           8h
    
    kubectl get pod --all-namespaces
    NAMESPACE      NAME                                         READY   STATUS    RESTARTS   AGE
    kube-system    kubernetes-dashboard-fc86bcc89-6lg2j         1/1     Running   0          7d18h
    kube-system    dashboard-metrics-scraper-5cb4f4bb9c-w4ddd   1/1     Running   0          7d18h
    kube-system    coredns-7745f9f87f-hjv54                     1/1     Running   0          7d18h
    kube-system    metrics-server-7747f8d66b-xhk7v              1/1     Running   0          7d18h
    kube-system    calico-node-xlmmq                            1/1     Running   0          6d
    kube-system    calico-kube-controllers-59bd664565-x9ph6     1/1     Running   0          6d
    cert-manager   cert-manager-cainjector-69d6f4d488-rz75n     1/1     Running   0          3h24m
    cert-manager   cert-manager-75d57c8d4b-nh849                1/1     Running   0          3h24m
    cert-manager   cert-manager-webhook-869b6c65c4-jqr4q        1/1     Running   0          76m
    myapp          myapp-yii-775597f9d8-2q286                   2/2     Running   0          38m
    ingress        nginx-ingress-microk8s-controller-ns42j      1/1     Running   0          6m35s
    
    kubectl logs myapp-yii-775597f9d8-2q286 -n myapp
    app container:
    [27-Oct-2023 22:49:39] NOTICE: [pool www] 'user' directive is ignored when FPM is not running as root
    [27-Oct-2023 22:49:39] NOTICE: [pool www] 'user' directive is ignored when FPM is not running as root
    [27-Oct-2023 22:49:39] NOTICE: [pool www] 'group' directive is ignored when FPM is not running as root
    [27-Oct-2023 22:49:39] NOTICE: [pool www] 'group' directive is ignored when FPM is not running as root
    [27-Oct-2023 22:49:39] NOTICE: fpm is running, pid 1
    [27-Oct-2023 22:49:39] NOTICE: ready to handle connections


    webapp container:
    /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
    /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
    10-listen-on-ipv6-by-default.sh: info: can not modify /etc/nginx/conf.d/default.conf (read-only file system?)
    /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
    /docker-entrypoint.sh: Configuration complete; ready for start up
    2023/10/27 22:49:39 [warn] 1#1: the "http2_push_preload" directive is obsolete, ignored in /etc/nginx/conf.d/default.conf:37
    nginx: [warn] the "http2_push_preload" directive is obsolete, ignored in /etc/nginx/conf.d/default.conf:37
    2023/10/27 22:49:39 [notice] 1#1: using the "epoll" event method
    2023/10/27 22:49:39 [notice] 1#1: nginx/1.25.3
    2023/10/27 22:49:39 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14) 
    2023/10/27 22:49:39 [notice] 1#1: OS: Linux 5.15.0-87-generic
    2023/10/27 22:49:39 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 65536:65536
    2023/10/27 22:49:39 [notice] 1#1: start worker processes
    2023/10/27 22:49:39 [notice] 1#1: start worker process 21
    2023/10/27 22:49:39 [notice] 1#1: start worker process 22
    2023/10/27 22:49:39 [notice] 1#1: start worker process 23
    2023/10/27 22:49:39 [notice] 1#1: start worker process 24
    2023/10/27 22:49:39 [notice] 1#1: start worker process 25
    2023/10/27 22:49:39 [notice] 1#1: start worker process 26
    2023/10/27 22:49:39 [notice] 1#1: start worker process 27
    2023/10/27 22:49:39 [notice] 1#1: start worker process 28
    2023/10/27 22:49:39 [notice] 1#1: start worker process 29
    2023/10/27 22:49:39 [notice] 1#1: start worker process 30
    2023/10/27 22:49:39 [notice] 1#1: start worker process 31
    2023/10/27 22:49:39 [notice] 1#1: start worker process 32
    10.0.1.1 - - [27/Oct/2023:22:49:47 +0000] "GET /health HTTP/1.1" 200 0 "-" "kube-probe/1.27" "-"
    10.0.1.1 - - [27/Oct/2023:22:49:57 +0000] "GET /health HTTP/1.1" 200 0 "-" "kube-probe/1.27" "-"
    10.0.1.1 - - [27/Oct/2023:22:50:07 +0000] "GET /health HTTP/1.1" 200 0 "-" "kube-probe/1.27" "-"
    10.0.1.1 - - [27/Oct/2023:22:50:17 +0000] "GET /health HTTP/1.1" 200 0 "-" "kube-probe/1.27" "-"
    10.0.1.1 - - [27/Oct/2023:22:50:27 +0000] "GET /health HTTP/1.1" 200 0 "-" "kube-probe/1.27" "-"
  • Как правильно составить запрос ActiveQuery в Yii2?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Нет, просто не совсем точно скопировал.
  • Как в статических функциях абстрактного класса получить доступ к переменным дочернего класса?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Дмитрий, Задача тех методов - сделать простое действие, например создать индекс. Делаю их статичными, чтобы лишний раз не создавать экземпляр класса. Или это не особо правильно?
  • Как в статических функциях абстрактного класса получить доступ к переменным дочернего класса?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Да, действительно работает. Я про это читал, даже написал, но phpstorm почему то подчеркивал этот код и я даже не стал проверять его работоспособность.

    Пишет "field not found in Searcheable"
  • Yii2. Как реализовать реферальную систему?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Дмитрий, Если на главной странице, то вот так:
    $this->redirect(Yii::app()->getHomeUrl());
  • Как проверить было ли доставлено письмо в MailGun API?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    Это я понимаю. Но не могу разобраться с тем, как мне получить информацию по определенному письму. При отправке я заношу письма в свою бд, после чего мне необходимо отметить было ли оно доставлено.
  • Почему время на сервере и при выполнении cron-задач отличается?

    DaniLaFokc
    @DaniLaFokc Автор вопроса
    DaniLaFokc, Нашёл, но там уже стоит date.timezone = Europe/Moscow