# Debian 12(代号为Bookworm)阿里镜像源 cat > /etc/apt/sources.list << EOF deb https://mirrors.aliyun.com/debian/ bookworm main non-free non-free-firmware contrib deb-src https://mirrors.aliyun.com/debian/ bookworm main non-free non-free-firmware contrib
deb https://mirrors.aliyun.com/debian-security/ bookworm-security main deb-src https://mirrors.aliyun.com/debian-security/ bookworm-security main
deb https://mirrors.aliyun.com/debian/ bookworm-updates main non-free non-free-firmware contrib deb-src https://mirrors.aliyun.com/debian/ bookworm-updates main non-free non-free-firmware contrib
deb https://mirrors.aliyun.com/debian/ bookworm-backports main non-free non-free-firmware contrib deb-src https://mirrors.aliyun.com/debian/ bookworm-backports main non-free non-free-firmware contrib
# This system was installed using small removable media # (e.g. netinst, live or single CD). The matching "deb cdrom" # entries were disabled at the end of the installation process. # For information about how to configure apt package sources, # see the sources.list(5) manual. EOF
执行sysctl -p /etc/sysctl.d/kubernetes.conf命令提示:”No such file or directory“
1 2
sysctl: cannot stat /proc/sys/net/netfilter/nf_conntrack_max:Nosuchfileordirectory sysctl: cannot stat /proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established:Nosuchfileordirectory
# 使用aliyuncs的镜像仓库 sed -i "s/registry.k8s.io\/pause:3.6/registry.aliyuncs.com\/google_containers\/pause:3.6/g" /etc/containerd/config.toml sed -i "s/registry.k8s.io\/pause:3.8/registry.aliyuncs.com\/google_containers\/pause:3.8/g" /etc/containerd/config.toml
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
root@k8s-master:~# kubectl get nodes -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME k8s-master NotReady control-plane 3m40s v1.28.2 10.2.102.241 <none> Debian GNU/Linux 12 (bookworm) 6.1.0-7-amd64 containerd://1.7.8
root@k8s-master:~# kubectl cluster-info Kubernetes control plane is running at https://k8s-master:6443 CoreDNS is running at https://k8s-master:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
6. 列出所有的 CRI 容器列表,且都为 Running 状态 root@k8s-master:~# crictl ps -a CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD b9ce7283ea12b c120fed2beb84 2 minutes ago Running kube-proxy 0 dbe51de138e00 kube-proxy-qjkfp 86347ca767e8c 7a5d9d67a13f6 3 minutes ago Running kube-scheduler 0 5ac0fb9aa591f kube-scheduler-k8s-master c4602ab9c2a32 cdcab12b2dd16 3 minutes ago Running kube-apiserver 0 35c1b0320b68f kube-apiserver-k8s-master b9c2ec66a3580 55f13c92defb1 3 minutes ago Running kube-controller-manager 0 40d312589fdfe kube-controller-manager-k8s-master 668707e9ab707 73deb9a3f7025 3 minutes ago Running etcd 0 ea104d6e8cef7 etcd-k8s-master
# 测试 API-Server 端口连通性 root@k8s-node1:~# nmap -p 6443 -Pn 10.2.102.241 Starting Nmap 7.93 ( https://nmap.org ) at 2023-11-13 02:33 CST Nmap scan report for k8s-master (10.2.102.241) Host is up (0.00026s latency).
PORT STATE SERVICE 6443/tcp open sun-sr-https MAC Address: 00:50:56:80:16:51 (VMware)
Nmap done: 1 IP address (1 host up) scanned in 0.15 seconds
# 从 “kubeadm init” 命令的输出中复制如下命令 root@k8s-node1:~# kubeadm join k8s-master:6443 --token nrd1gc.itd7fmgzfpznt1zx --discovery-token-ca-cert-hash sha256:3fa47c723879848c7ad77a4605569e9524914fa329cccbf4f6e20968c8bb67b2 [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
在 Master 节点上验证集群节点是否可用:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
root@k8s-master:~# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-master NotReady control-plane 14m v1.28.2 k8s-node1 NotReady <none> 3m37s v1.28.2 k8s-node2 NotReady <none> 25s v1.28.2
root@k8s-master:~# kubectl get ns NAME STATUS AGE default Active 28m kube-node-lease Active 28m kube-public Active 28m kube-system Active 28m tigera-operator Active 43s
root@k8s-master:~# kubectl get pods -n tigera-operator NAME READY STATUS RESTARTS AGE tigera-operator-597bf4ddf6-l4j6n 1/1 Running 0 110s
# 修改 ip 池,需与初始化时一致 root@k8s-master:~# sed -i 's/192.168.0.0/10.244.0.0/' custom-resources.yaml
# 安装 Calico root@k8s-master:~# kubectl create -f custom-resources.yaml installation.operator.tigera.io/default created apiserver.operator.tigera.io/default created
root@k8s-master:~# kubectl get ns NAME STATUS AGE calico-system Active 20s default Active 33m kube-node-lease Active 33m kube-public Active 33m kube-system Active 33m tigera-operator Active 5m8s
root@k8s-master:~# kubectl apply -f kube-flannel.yml # flannel安装命令 namespace/kube-flannel created serviceaccount/flannel created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds created
# 可以获取 Kubernetes 集群中 `kube-dns` 服务的 IP root@k8s-master:~# kubectl get svc -n kube-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 63m
# 使用 dig 命令通过指定 DNS 服务器(上面的 IP)来查询特定域名的解析 root@k8s-master:~# dig -t a www.baidu.com @10.96.0.10
4. 使用任一工作节点的主机名来访问 root@k8s-master:~# curl http://k8s-node1:31517 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html>
5. 查看 Pod IP root@k8s-master:~# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-app-5777b5f95-lmrnk 1/1 Running 0 11m 10.244.169.134 k8s-node2 <none> <none> nginx-app-5777b5f95-pvkj2 1/1 Running 0 11m 10.244.36.67 k8s-node1 <none> <none>
6. 通过 IP 访问 root@k8s-master:~# nmap -p 80 -Pn 10.244.169.134 Starting Nmap 7.93 ( https://nmap.org ) at 2023-11-13 12:21 CST Nmap scan report for 10.244.169.134 Host is up (0.00029s latency).
PORT STATE SERVICE 80/tcp open http
Nmap done: 1 IP address (1 host up) scanned in 0.25 seconds
1. 使用 Helm 添加 MetalLB 的 chart 仓库 root@k8s-master:~# helm repo add metallb https://metallb.github.io/metallb "metallb" has been added to your repositories
2. 更新 Helm 的 chart 列表 root@k8s-master:~# helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "metallb" chart repository Update Complete. ⎈Happy Helming!⎈
3. 安装 MetalLB 到 metallb-system 命名空间下 root@k8s-master:~# helm install metallb metallb/metallb --namespace metallb-system --create-namespace NAME: metallb LAST DEPLOYED: Thu Dec 28 01:18:01 2023 NAMESPACE: metallb-system STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MetalLB is now running in the cluster.
Now you can configure it via its CRs. Please refer to the metallb official docs on how to use the CRs.
4. 查看 MetalLB 的 Pods 是否已成功运行 root@k8s-master:~# kubectl get pods -n metallb-system NAME READY STATUS RESTARTS AGE metallb-controller-5f9bb77dcd-m6n4r 1/1 Running 0 32s metallb-speaker-7s7m6 4/4 Running 0 32s metallb-speaker-7tbbp 4/4 Running 0 32s metallb-speaker-dmsng 4/4 Running 0 32s
# 创建的 MetalLB 的配置对象(IPAddressPool 和 L2Advertisement) root@k8s-master:~# kubectl apply -f metallb-config.yaml ipaddresspool.metallb.io/ip-pool created l2advertisement.metallb.io/l2-mode-config created
# 查看这些资源的状态 root@k8s-master:~# kubectl get ipaddresspool -n metallb-system NAME AGE ip-pool 59s root@k8s-master:~# kubectl get l2advertisement -n metallb-system NAME AGE l2-mode-config 64s