Name: calico-node-hxx4v
Namespace: kube-system
Priority: 2000001000
Priority Class Name: system-node-critical
Node: dev-kube-node01.tickeron.local/192.168.11.223
Start Time: Wed, 11 Aug 2021 12:57:46 +0400
Labels: controller-revision-hash=f68f5dfc5
k8s-app=calico-node
pod-template-generation=1
Annotations: <none>
Status: Running
IP: 192.168.11.223
IPs:
IP: 192.168.11.223
Controlled By: DaemonSet/calico-node
Init Containers:
upgrade-ipam:
Container ID: docker://c09e0def7db35ca134b7578477faf36f950b9c7ec684b6623a0ccb940440001b
Image: quay.io/calico/cni:v3.17.4
Image ID: docker-pullable://quay.io/calico/cni@sha256:5f463b8cfcf11b4e4b9e564c3faad85fe34df8ccf2f535f6a25829b01b40cb72
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/calico-ipam
-upgrade
State: Terminated
Reason: Completed
Exit Code: 0
Started: Wed, 11 Aug 2021 12:57:48 +0400
Finished: Wed, 11 Aug 2021 12:57:48 +0400
Ready: True
Restart Count: 0
Environment:
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
Mounts:
/host/opt/cni/bin from cni-bin-dir (rw)
/var/lib/cni/networks from host-local-net-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from calico-node-token-5nctz (ro)
install-cni:
Container ID: docker://ded9533efa28717d6410c6ec4339f18f073a5d7b390d8dc1df899dd7b0018227
Image: quay.io/calico/cni:v3.17.4
Image ID: docker-pullable://quay.io/calico/cni@sha256:5f463b8cfcf11b4e4b9e564c3faad85fe34df8ccf2f535f6a25829b01b40cb72
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/install
State: Terminated
Reason: Completed
Exit Code: 0
Started: Wed, 11 Aug 2021 12:57:49 +0400
Finished: Wed, 11 Aug 2021 12:57:52 +0400
Ready: True
Restart Count: 0
Environment:
CNI_CONF_NAME: 10-calico.conflist
UPDATE_CNI_BINARIES: true
CNI_NETWORK_CONFIG_FILE: /host/etc/cni/net.d/calico.conflist.template
SLEEP: false
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/host/opt/cni/bin from cni-bin-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from calico-node-token-5nctz (ro)
Containers:
calico-node:
Container ID: docker://17bd549a62fda5de6f1194c9974a3631a1a43a451c3fca07ca64a3bbd1991483
Image: quay.io/calico/node:v3.17.4
Image ID: docker-pullable://quay.io/calico/node@sha256:5ad7469e6015496ceb4a0d3f2132ed88e174b0cdb0efe9b998c59c466d95e346
Port: <none>
Host Port: <none>
State: Running
Started: Wed, 11 Aug 2021 12:57:53 +0400
Ready: True
Restart Count: 0
Limits:
cpu: 300m
memory: 500M
Requests:
cpu: 150m
memory: 64M
Liveness: exec [/bin/calico-node -felix-live -bird-live] delay=5s timeout=1s period=10s #success=1 #failure=6
Readiness: exec [/bin/calico-node -bird-ready -felix-ready] delay=0s timeout=1s period=10s #success=1 #failure=6
Environment:
DATASTORE_TYPE: kubernetes
WAIT_FOR_DATASTORE: true
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
CLUSTER_TYPE: <set to the key 'cluster_type' of config map 'calico-config'> Optional: false
CALICO_K8S_NODE_REF: (v1:spec.nodeName)
CALICO_DISABLE_FILE_LOGGING: true
FELIX_DEFAULTENDPOINTTOHOSTACTION: RETURN
FELIX_HEALTHHOST: localhost
FELIX_IPTABLESBACKEND: Legacy
FELIX_IPTABLESLOCKTIMEOUTSECS: 10
CALICO_IPV4POOL_IPIP: Off
FELIX_IPV6SUPPORT: False
FELIX_LOGSEVERITYSCREEN: info
CALICO_STARTUP_LOGLEVEL: error
FELIX_USAGEREPORTINGENABLED: False
FELIX_CHAININSERTMODE: Insert
FELIX_PROMETHEUSMETRICSENABLED: False
FELIX_PROMETHEUSMETRICSPORT: 9091
FELIX_PROMETHEUSGOMETRICSENABLED: True
FELIX_PROMETHEUSPROCESSMETRICSENABLED: True
NODEIP: (v1:status.hostIP)
IP_AUTODETECTION_METHOD: can-reach=$(NODEIP)
IP: autodetect
NODENAME: (v1:spec.nodeName)
FELIX_HEALTHENABLED: true
FELIX_IGNORELOOSERPF: False
CALICO_MANAGE_CNI: true
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/lib/modules from lib-modules (ro)
/run/xtables.lock from xtables-lock (rw)
/var/lib/calico from var-lib-calico (rw)
/var/run/calico from var-run-calico (rw)
/var/run/secrets/kubernetes.io/serviceaccount from calico-node-token-5nctz (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
lib-modules:
Type: HostPath (bare host directory volume)
Path: /lib/modules
HostPathType:
var-run-calico:
Type: HostPath (bare host directory volume)
Path: /var/run/calico
HostPathType:
var-lib-calico:
Type: HostPath (bare host directory volume)
Path: /var/lib/calico
HostPathType:
cni-net-dir:
Type: HostPath (bare host directory volume)
Path: /etc/cni/net.d
HostPathType:
cni-bin-dir:
Type: HostPath (bare host directory volume)
Path: /opt/cni/bin
HostPathType:
xtables-lock:
Type: HostPath (bare host directory volume)
Path: /run/xtables.lock
HostPathType: FileOrCreate
host-local-net-dir:
Type: HostPath (bare host directory volume)
Path: /var/lib/cni/networks
HostPathType:
calico-node-token-5nctz:
Type: Secret (a volume populated by a Secret)
SecretName: calico-node-token-5nctz
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: op=Exists
node.kubernetes.io/disk-pressure:NoSchedule op=Exists
node.kubernetes.io/memory-pressure:NoSchedule op=Exists
node.kubernetes.io/network-unavailable:NoSchedule op=Exists
node.kubernetes.io/not-ready:NoExecute op=Exists
node.kubernetes.io/pid-pressure:NoSchedule op=Exists
node.kubernetes.io/unreachable:NoExecute op=Exists
node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning Unhealthy 19m (x769 over 26h) kubelet Readiness probe failed:
Warning Unhealthy 6m54s (x719 over 26h) kubelet Liveness probe failed:
2021-08-12 09:31:02.069 [INFO][1] main.go 88: Loaded configuration from environment config=&config.Config{LogLevel:"info", WorkloadEndpointWorkers:1, ProfileWorkers:1, PolicyWorkers:1, NodeWorkers:1, Kubeconfig:"", DatastoreType:"kubernetes"}
W0812 09:31:02.348153 1 client_config.go:543] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.
2021-08-12 09:31:02.358 [INFO][1] main.go 109: Ensuring Calico datastore is initialized
2021-08-12 09:31:02.611 [INFO][1] main.go 149: Getting initial config snapshot from datastore
2021-08-12 09:31:02.871 [INFO][1] main.go 152: Got initial config snapshot
2021-08-12 09:31:02.872 [INFO][1] watchersyncer.go 89: Start called
2021-08-12 09:31:02.872 [INFO][1] main.go 169: Starting status report routine
2021-08-12 09:31:02.872 [INFO][1] main.go 402: Starting controller ControllerType="Node"
2021-08-12 09:31:02.872 [INFO][1] node_controller.go 138: Starting Node controller
2021-08-12 09:31:02.872 [INFO][1] resources.go 349: Main client watcher loop
2021-08-12 09:31:02.872 [INFO][1] watchersyncer.go 127: Sending status update Status=wait-for-ready
2021-08-12 09:31:02.872 [INFO][1] node_syncer.go 40: Node controller syncer status updated: wait-for-ready
2021-08-12 09:31:02.872 [INFO][1] watchersyncer.go 147: Starting main event processing loop
2021-08-12 09:31:02.872 [INFO][1] watchercache.go 174: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/nodes"
2021-08-12 09:31:03.260 [INFO][1] watchercache.go 271: Sending synced update ListRoot="/calico/resources/v3/projectcalico.org/nodes"
2021-08-12 09:31:03.260 [INFO][1] watchersyncer.go 127: Sending status update Status=resync
2021-08-12 09:31:03.260 [INFO][1] node_syncer.go 40: Node controller syncer status updated: resync
2021-08-12 09:31:03.260 [INFO][1] watchersyncer.go 209: Received InSync event from one of the watcher caches
2021-08-12 09:31:03.260 [INFO][1] watchersyncer.go 221: All watchers have sync'd data - sending data and final sync
2021-08-12 09:31:03.260 [INFO][1] watchersyncer.go 127: Sending status update Status=in-sync
2021-08-12 09:31:03.260 [INFO][1] node_syncer.go 40: Node controller syncer status updated: in-sync
2021-08-12 09:31:03.357 [INFO][1] node_controller.go 151: Node controller is now running
2021-08-12 09:31:03.357 [INFO][1] ipam.go 45: Synchronizing IPAM data
2021-08-12 09:31:03.458 [INFO][1] hostendpoints.go 90: successfully synced all hostendpoints
2021-08-12 09:31:03.776 [INFO][1] ipam.go 191: Node and IPAM data is in sync
2021-08-12 10:12:39.283 [INFO][1] watchercache.go 96: Watch channel closed by remote - recreate watcher ListRoot="/calico/resources/v3/projectcalico.org/nodes"
2021-08-12 10:27:02.590 [INFO][1] resources.go 377: Terminating main client watcher loop
2021-08-12 10:27:02.623 [INFO][1] resources.go 349: Main client watcher loop
root@~# cat /etc/apache2/sites-enabled/cmdb.conf
<VirtualHost *:80>
ServerName site.domain.local
ServerAdmin webmaster@localhost
DocumentRoot /var/www/itop
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
a2ensite cmdb.conf
172.16.11.70 - - [28/Jun/2021:10:45:46 +0000] "GET / HTTP/1.1" 200 3477 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36"