lsmod | grep br_netfilter # br_netfilter 32768 0 # bridge 270336 1 br_netfilter # 配置 sysctl # 注意:需要更换下列net.ipv4.conf.enp1s0.rp_filter=0中的网卡 cat > /etc/sysctl.conf << "EOF" # sysctl settings are defined through files in # /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/. # # Vendors settings live in /usr/lib/sysctl.d/. # To override a whole file, create a new file with the same in # /etc/sysctl.d/ and put new settings there. To override # only specific settings, add a file with a lexically later # name in /etc/sysctl.d/ and put new settings there. # # For more information, see sysctl.conf(5) and sysctl.d(5). kernel.sysrq=0 net.ipv4.ip_forward=0 net.ipv4.conf.all.send_redirects=0 net.ipv4.conf.default.send_redirects=0 net.ipv4.conf.all.accept_source_route=0 net.ipv4.conf.default.accept_source_route=0 net.ipv4.conf.all.accept_redirects=0 net.ipv4.conf.default.accept_redirects=0 net.ipv4.conf.all.secure_redirects=0 net.ipv4.conf.default.secure_redirects=0 net.ipv4.icmp_echo_ignore_broadcasts=1 net.ipv4.icmp_ignore_bogus_error_responses=1 net.ipv4.conf.all.rp_filter=1 net.ipv4.conf.default.rp_filter=1 net.ipv4.tcp_syncookies=1 kernel.dmesg_restrict=1 net.ipv6.conf.all.accept_redirects=0 net.ipv6.conf.default.accept_redirects=0 # # add by io.plus lmk # [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist # [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-ip6tables]: /proc/sys/net/bridge/bridge-nf-call-ip6tables does not exist net.bridge.bridge-nf-call-iptables=1 net.ipv4.ip_forward=1 # ip6 net.ipv6.conf.all.forwarding=1 net.bridge.bridge-nf-call-ip6tables=1 # fs inode fs.inotify.max_user_watches = 524288 fs.inotify.max_user_instances = 512 # many interface of linux node for spical vlan can connect config net.ipv4.conf.default.rp_filter=0 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.lo.rp_filter=0 net.ipv4.conf.enp1s0.rp_filter=0 EOF
sysctl -p # yum install tar -y # install containerd # https://github.com/containerd/containerd/blob/main/docs/getting-started.md cd ${install_dir}
which runc # /usr/local/sbin/runc \rm ${f1} # install cni plugins cd ${install_dir} f1='cni-plugins-linux-amd64-v1.4.1.tgz' wget ${ioplus_repo}/k8s_deps/${f1} -O ${f1} mkdir -p /opt/cni/bin tar Cxzvf /opt/cni/bin ${f1} chown -R root:root /opt/cni \rm ${f1} # config containerd f1='/etc/containerd/config.toml' mkdir -p /etc/containerd/ wget ${ioplus_repo}/k8s_deps/${f1} -O ${f1} # containerd service # If you intend to start containerd via systemd, you should also download the containerd.service unit file from https://raw.githubusercontent.com/containerd/containerd/main/containerd.service into /usr/local/lib/systemd/system/containerd.service, and run the following commands: f1='containerd.service' mkdir -p /usr/local/lib/systemd/system/ wget ${ioplus_repo}/k8s_deps/${f1} \ -O /usr/local/lib/systemd/system/${f1} # start containerd systemctl daemon-reload systemctl enable --now containerd systemctl status containerd # Active: active (running) # load k8s and ctr images
ctr -n k8s.io i ls # WARN[0000] DEPRECATION: The `mirrors` property of `[plugins."io.containerd.grpc.v1.cri".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. Use `config_path` instead. # WARN[0000] DEPRECATION: The `configs` property of `[plugins."io.containerd.grpc.v1.cri".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. Use `config_path` instead. # REF TYPE DIGEST SIZE PLATFORMS LABELS
cd ${install_dir} f1='k8s_1_29_1_cilium_1_15_3_images.tar' wget wget ${ioplus_repo}/k8s_deps/${f1} \ -O ${f1}
# Alternatively, if you are the root user, you can run:
# export KUBECONFIG=/etc/kubernetes/admin.conf
# You should now deploy a pod network to the cluster. # Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: # https://kubernetes.io/docs/concepts/cluster-administration/addons/
# You can now join any number of the control-plane node running the following command on each as root:
# Please note that the certificate-key gives access to cluster sensitive data, keep it secret! # As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use # "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
# Then you can join any number of worker nodes by running the following on each as root: