[root@k8s-master ~]# systemctl restart docker && systemctl enable docker
[root@k8s-master ~]# systemctl restart kubelet && systemctl enable kubelet
999 swapoff -a
1000 yum list --showduplicates kubeadm --disableexcludes=kubernetes
1001 yum install -y kubeadm-1.17.0-0 --disableexcludes=kubernetes
1002 kubeadm version
1003 kubectl drain $CP_NODE --ignore-daemonsets
1004 kubectl get nodes
1005 kubectl drain expc2018
1006 kubectl describe nodes
1007 kubectl describe nodes
1002 export kubever=$(kubectl version | base64 | tr -d '\n')
1003 kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
kubeadm token generate
kubeadm join 137.15.210.118:6443 --token ctrs8q.wr3dnzzs3awh1oz3 \
--discovery-token-ca-cert-hash sha256:78d1e52f37983d795be38ace45f8e1fa8d0eda2c8e9316b94268ad5cf0a8e980
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker
# Install Docker CE
## Set up the repository
### Install required packages.
yum install yum-utils device-mapper-persistent-data lvm2
### Add Docker repository.
yum-config-manager --add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
## Install Docker CE.
yum update && yum install \
containerd.io-1.2.10 \
docker-ce-19.03.4 \
docker-ce-cli-19.03.4
## Create /etc/docker directory.
mkdir /etc/docker
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart Docker
systemctl daemon-reload
systemctl restart docker
Tuesday, December 31, 2019
Kubernetes: Init Log
# kubeadm init
W1230 11:40:24.994597 2582 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1230 11:40:24.994684 2582 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.0
[preflight] Running pre-flight checks
[WARNING HTTPProxy]: Connection to "https://137.15.210.118" uses proxy "http://proxy.csd.toronto.ca:8888". If that is not intended, adjust your proxy settings
[WARNING HTTPProxyCIDR]: connection to "10.96.0.0/12" uses proxy "http://proxy.csd.toronto.ca:8888". This may lead to malfunctional cluster setup. Make sure that Pod and Services IP ranges specified correctly as exceptions in proxy configuration
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [expc2018 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 137.15.210.118]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [expc2018 localhost] and IPs [137.15.210.118 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [expc2018 localhost] and IPs [137.15.210.118 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W1230 11:41:11.035610 2582 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W1230 11:41:11.036793 2582 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.507597 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node expc2018 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node expc2018 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: ctrs8q.wr3dnzzs3awh1oz3
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 137.15.210.118:6443 --token ctrs8q.wr3dnzzs3awh1oz3 \
--discovery-token-ca-cert-hash sha256:78d1e52f37983d795be38ace45f8e1fa8d0eda2c8e9316b94268ad5cf0a8e980
W1230 11:40:24.994597 2582 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1230 11:40:24.994684 2582 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.0
[preflight] Running pre-flight checks
[WARNING HTTPProxy]: Connection to "https://137.15.210.118" uses proxy "http://proxy.csd.toronto.ca:8888". If that is not intended, adjust your proxy settings
[WARNING HTTPProxyCIDR]: connection to "10.96.0.0/12" uses proxy "http://proxy.csd.toronto.ca:8888". This may lead to malfunctional cluster setup. Make sure that Pod and Services IP ranges specified correctly as exceptions in proxy configuration
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [expc2018 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 137.15.210.118]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [expc2018 localhost] and IPs [137.15.210.118 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [expc2018 localhost] and IPs [137.15.210.118 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W1230 11:41:11.035610 2582 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W1230 11:41:11.036793 2582 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.507597 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node expc2018 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node expc2018 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: ctrs8q.wr3dnzzs3awh1oz3
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 137.15.210.118:6443 --token ctrs8q.wr3dnzzs3awh1oz3 \
--discovery-token-ca-cert-hash sha256:78d1e52f37983d795be38ace45f8e1fa8d0eda2c8e9316b94268ad5cf0a8e980
Kubernetes Nodes: Ready,SchedulingDisabled
Check nodes status
# kubectl get nodesNAME STATUS ROLES AGE VERSION
srv2020 Ready master 24h v1.17.0
srv2021 Ready,SchedulingDisabled <none> 23h v1.17.0
Remove a node from service
# kubectl drain srv2021
Put a node back to service
# kubectl uncordon srv2021node/srv2021 uncordoned
Kubernetes: lookup registry-1.docker.io on 17.15.20.19:53: no such host
Problem:
Error response from daemon: Get https://registry-1.docker.io/v2/: dial tcp: lookup registry-1.docker.io on 17.15.20.19:53: no such host
Reason:
Docker running behind proxy
Solution:
https://docs.docker.com/config/daemon/systemd/
# cat /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://proxy.goweekend.ca:3288/"
Environment="HTTPS_PROXY=http://proxy.goweekend.ca:3288/"
Error response from daemon: Get https://registry-1.docker.io/v2/: dial tcp: lookup registry-1.docker.io on 17.15.20.19:53: no such host
Reason:
Docker running behind proxy
Solution:
https://docs.docker.com/config/daemon/systemd/
# cat /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://proxy.goweekend.ca:3288/"
Environment="HTTPS_PROXY=http://proxy.goweekend.ca:3288/"
failed to find subsystem mount for required subsystem: pids
Failed to start ContainerManager failed to initialize top level QOS containers: failed to update top level Burstable QOS cgroup : failed to set supported cgroup subsystems for cgroup [kubepods burstable]: failed to find subsystem mount for required subsystem: pids
# pwd
/var/lib/kubelet
# cat kubeadm-flags.env
KUBELET_KUBEADM_ARGS="--cgroup-driver=cgroupfs --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.1 --cgroups-per-qos=false --enforce-node-allocatable="
Monday, December 30, 2019
NetworkReady=false
Problem: runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized #1031
Fixed by downloading portmap to /opt/cni/bin
https://github.com/projectcalico/cni-plugin/releases/download/v1.9.1/portmap
Monday, December 23, 2019
Wednesday, December 18, 2019
Github Beginner
You have an empty repository
To get started you will need to run these commands in your terminal.
New to Git? Learn the basic Git commands
Configure Git for the first time
git config --global user.name "System Administrator"
git config --global user.email "gitadmin@goweekend.ca"
Working with your repository
I just want to clone this repository
If you want to simply clone this empty repository then run this command in your terminal.
git clone https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
My code is ready to be pushed
If you already have code ready to be pushed to this repository then run this in your terminal.
cd existing-project
git init
git add --all
git commit -m "Initial Commit"
git remote add origin https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
git push -u origin master
My code is already tracked by Git
If your code is already tracked by Git then set this repository as your "origin" to push to.
cd existing-project
git remote set-url origin https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
git push -u origin --all
git push origin --tags
To get started you will need to run these commands in your terminal.
New to Git? Learn the basic Git commands
Configure Git for the first time
git config --global user.name "System Administrator"
git config --global user.email "gitadmin@goweekend.ca"
Working with your repository
I just want to clone this repository
If you want to simply clone this empty repository then run this command in your terminal.
git clone https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
My code is ready to be pushed
If you already have code ready to be pushed to this repository then run this in your terminal.
cd existing-project
git init
git add --all
git commit -m "Initial Commit"
git remote add origin https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
git push -u origin master
My code is already tracked by Git
If your code is already tracked by Git then set this repository as your "origin" to push to.
cd existing-project
git remote set-url origin https://sysadmin@git.goweekend.ca/scm/ar/arttest.git
git push -u origin --all
git push origin --tags
The rm -r command will recursively remove your folder:
git rm -r folder-name
Commit the change:
git commit -m "Remove duplicated directory"
Push the change to your remote repository:
git push origin master
Tuesday, December 17, 2019
Oracle RAC Troubleshooting
Login as root
crsctl check cluster -all
crsctl stat res -t -init
crsctl stop crs
crsctl start crs
srvctl config scan
srvctl config scan_listener
ps -elf |grep tns
script /var/tmp/`hostname`_listener_status.txt
lsnrctl status LISTENER
lsnrctl status LISTENER_SCAN1
lsnrctl status ASMNET1LSNR_ASM
lsnrctl service LISTENER
lsnrctl service LISTENER_SCAN1
lsnrctl service ASMNET1LSNR_ASM
exit
script /var/tmp/`hostname`_listener_status.txt
script /var/tmp/`hostname`_listener_status_`date +%s`.txt
find . -mmin -20 -type f -exec zip -r /var/tmp/`hostname`_logs.zip {} \;
zip -r /var/tmp/`hostname`_logs.zip $(find . -mmin -20 -type f)
Monday, December 16, 2019
SQLPLUS debug mode
Insert below entries in sqlnet.ora on client machine.
DIAG_ADR_ENABLED = OFF
TRACE_LEVEL_CLIENT = SUPPORT
TRACE_DIRECTORY_CLIENT = /var/tmp/sqlplus
TRACE_TIMESTAMP_CLIENT = ON
Tuesday, December 10, 2019
Oracle Database Listener TCP Validation
https://blog.dbi-services.com/oracle-12cr2-dataguard-and-tcp-valid_node_checking/
Standalone $ORACLE_HOME/network/admin
NAMES.DIRECTORY_PATH= (TNSNAMES, ONAMES, HOSTNAME, EZCONNECT)
SQLNET.ALLOWED_LOGON_VERSION_SERVER = 11
ADR_BASE = /usr2/app/oracle
tcp.validnode_checking = yes
#tcp.invited_nodes = (8.5.2.163, 8.5.2.7,)
tcp.invited_nodes = (127.0.0.1, 8.5.2.163, 8.5.2.7, 8.5.19.50)
tcp.excluded_nodes = (8.5.2.165)
SQLNET.ALLOWED_LOGON_VERSION_SERVER = 11
ADR_BASE = /usr2/app/oracle
tcp.validnode_checking = yes
# in subnet fasion:
#tcp.invited_nodes = (8.5.2.163/24, 8.5.2.7/24,)
tcp.invited_nodes = (127.0.0.1, 8.5.2.163, 8.5.2.7, 8.5.19.50)
tcp.excluded_nodes = (8.5.2.165)
Standalone $ORACLE_HOME/network/admin
NAMES.DIRECTORY_PATH= (TNSNAMES, ONAMES, HOSTNAME, EZCONNECT)
SQLNET.ALLOWED_LOGON_VERSION_SERVER = 11
ADR_BASE = /usr2/app/oracle
tcp.validnode_checking = yes
#tcp.invited_nodes = (8.5.2.163, 8.5.2.7,)
tcp.invited_nodes = (127.0.0.1, 8.5.2.163, 8.5.2.7, 8.5.19.50)
tcp.excluded_nodes = (8.5.2.165)
RAC $GRID_HOME/network/admin
NAMES.DIRECTORY_PATH= (TNSNAMES, ONAMES, HOSTNAME, EZCONNECT)SQLNET.ALLOWED_LOGON_VERSION_SERVER = 11
ADR_BASE = /usr2/app/oracle
tcp.validnode_checking = yes
# in subnet fasion:
#tcp.invited_nodes = (8.5.2.163/24, 8.5.2.7/24,)
tcp.invited_nodes = (127.0.0.1, 8.5.2.163, 8.5.2.7, 8.5.19.50)
tcp.excluded_nodes = (8.5.2.165)
Subscribe to:
Posts (Atom)