### 集群网络规划
阿里云vpc:10.34.0.0/16
cluser-ip:10.35.0.0/16
pod-ip:10.36.0.0/16
### 节点信息
| IP address |HOST NAME | | |
| --- | --- | --- | --- |
| | | | |
| | | | |
### 修改内核
vi /etc/sysctl.conf
# docker
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
# swap off
vm.swappiness = 0
## 配置 kubespray
### 安装基础软件
# 安装 git
yum -y install git
# 安装 centos 额外的yum源
yum install -y epel-release
# make 缓存
yum clean all && yum makecache
# 安装 软件
yum install -y python-pip python34 python-netaddr python34-pip ansible
# 升级 Jinja2
pip install --upgrade Jinja2
### 下载源码
git clone https://github.com/kubernetes-incubator/kubespray
# Install dependencies from ``requirements.txt``
pip install -r requirements.txt
### 修改镜像下载源
sed -i 's/gcr\.io\/google_containers\//harbor-infra.aliyun-cn-shanghai-e.dr.dianrong.io\/google_containers\//g' roles/download/defaults/main.yml
sed -i 's/gcr\.io\/google_containers\//harbor-infra.aliyun-cn-shanghai-e.dr.dianrong.io\/google_containers\//g' roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
sed -i 's/gcr\.io\/google_containers\//harbor-infra.aliyun-cn-shanghai-e.dr.dianrong.io\/google_containers\//g' roles/kubernetes-apps/ansible/defaults/main.yml
sed -i 's/gcr\.io\/google-containers\//harbor-infra.aliyun-cn-shanghai-e.dr.dianrong.io\/google-containers\//g' roles/download/defaults/main.yml
### Flannel configuration file should have been created there
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.0/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
> flannel 配置文件需要手动在每个节点创建
### 安装失败清理
rm -rf /etc/kubernetes/
rm -rf /var/lib/kubelet
rm -rf /var/lib/etcd
rm -rf /usr/local/bin/kubectl
rm -rf /etc/systemd/system/calico-node.service
rm -rf /etc/systemd/system/kubelet.service
systemctl stop etcd.service
systemctl disable etcd.service
systemctl stop calico-node.service
systemctl disable calico-node.service
docker stop $(docker ps -q)
docker rm $(docker ps -a -q)
service docker restart
### 删除taint
kubectl taint node --all node.cloudprovider.kubernetes.io/uninitialized-
### ingress 修改dns 策略
ingress 服务使用host 模式部署,所以dns 地址会继承宿主机地址。导致无法解析service
cat ingress-nginx-controller-ds.yml
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: ingress-nginx-controller
namespace: kube-system
labels:
k8s-app: ingress-nginx
version: v0.15.0
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
selector:
matchLabels:
k8s-app: ingress-nginx
version: v0.15.0
template:
metadata:
labels:
k8s-app: ingress-nginx
version: v0.15.0
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet # 此处修改DNS策略
nodeSelector:
node-role.kubernetes.io/ingress: "true"
terminationGracePeriodSeconds: 60
containers:
- name: ingress-nginx-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
imagePullPolicy: IfNotPresent
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend
- --configmap=$(POD_NAMESPACE)/ingress-nginx
- --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
securityContext:
runAsNonRoot: false
serviceAccountName: ingress-nginx
### Linux上配置http上网代理
因为下载二进制程序包及gcr.io
- 方案设计
- 使用kubespray部署k8s集群
- 使用ingress 访问dashboard v1.8.3
- 在 Kubernetes 中配置私有 DNS 和上游域名服务器(coredns forward)
- master 节点添加taint
- limitrang
- 配置私有仓库
- 利用NFS动态提供Kubernetes后端存储卷
- ingress 添加https 客户端双向认证
- 应用日采集
- eureka 迁移 k8s 集群
- 镜像下载地址
- helm install
- Install istio
- zookeeper 集群部署
- 基于prometheus自定义rabbitmq—exporter指标HPA弹性伸缩
- nacos huaweicloud