组件 | 数量 | ip |
---|---|---|
etcd | 2(要奇数个不然不会选主,这里两个是没有意义的) | 192.168.144.128,192.168.144.129 |
master | 2 | 192.168.144.128,192.168.144.129 |
node | 2 | 192.168.144.128,192.168.144.129 |
192.168.144.128 安装有ansible用来管理节点 注意:由于两个master没有做haproxy和keeplive所以在kubeconfig中server目前直接写成 192.168.144.128 注意:目录有的要手动创建 看看roles对应 网盘文件:k8s+ansible笔记 roles编写参考:https://github.com/easzlab/kubeasz
参考 https://www.jianshu.com/p/fc88132924d5
all:
children:
etcd:
hosts:
192.168.144.128:
ETCD_NODE_NAME: etcd1
192.168.144.129:
ETCD_NODE_NAME: etcd2
k8s-master:
hosts:
192.168.144.128:
API_NODE_NAME: k8s-master1
192.168.144.129:
API_NODE_NAME: k8s-master2
[root@k8s-master1 tasks]# cat main.yml
- name: print TMP_NODE value
debug:
msg: "{{TMP_NODES}}"
- name: prepare some dirs
file: name={{ item }} state=directory
with_items:
- "/var/lib/etcd" # etcd 工作目录
- name: download etcd
copy: src=../files/{{ item }} dest=/usr/bin/{{ item }} mode=0755
with_items:
- etcd
- etcdctl
- name: create systemd unit
template: src=etcd.service.j2 dest=/etc/systemd/system/etcd.service
- name: enable etcd boot
shell: systemctl enable etcd
ignore_errors: true
- name: restart etcd
shell: systemctl daemon-reload && systemctl restart etcd
ignore_errors: true
- name: wait etcd running
shell: "systemctl status etcd.service|grep Active"
register: etcd_status
until: '"running" in etcd_status.stdout'
retries: 8
delay: 8
[root@k8s-master1 tasks]# etcdctl member list
27d9777c62a65711: name=etcd1 peerURLs=http://192.168.144.128:2380 clientURLs=http://192.168.144.128:2379 isLeader=false
a18ccad74b84a0ab: name=etcd2 peerURLs=http://192.168.144.129:2380 clientURLs=http://192.168.144.129:2379 isLeader=true
[root@k8s-master1 tasks]# etcdctl cluster-health
member 27d9777c62a65711 is healthy: got healthy result from http://192.168.144.128:2379
member a18ccad74b84a0ab is healthy: got healthy result from http://192.168.144.129:2379
cluster is healthy
更多tasks,defaults,templates,files见网盘
---
- include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- name: Install Docker.
package:
name: "{{ docker_package }}"
state: "{{ docker_package_state }}"
notify: restart docker
- name: Ensure Docker is started and enabled at boot.
service:
name: docker
state: "{{ docker_service_state }}"
enabled: "{{ docker_service_enabled }}"
- name: Ensure handlers are notified now to avoid firewall conflicts.
meta: flush_handlers
更多tasks,defaults,templates,files见网盘
[root@k8s-master1 tasks]# cat main.yml
---
- include: openssl.yml
- include: setup.yml
[root@k8s-master1 tasks]# cat openssl.yml
- name: print node name11
debug:
msg: "{{ API_NODE_NAME }}"
- name: create ca dir
file: name={{ item }} state=directory
with_items:
- /root/ssl
- name: 分发ca证书
copy: src=../files/{{ item }} dest=/root/ssl/{{ item }} mode=0755
with_items:
- ca.key
- ca.crt
- name: print node name
debug:
msg: "{{ API_NODE_NAME }}"
- name: 创建 kubernetes 证书签名请求
template: src=apiserver_ssl.cnf.j2 dest=/root/ssl/apiserver_ssl.cnf
- name: 创建私钥
shell:
chdir=/root/ssl
openssl genrsa -out apiserver.key 2048
- name: 创建请求证书
shell:
chdir=/root/ssl
openssl req -new -key apiserver.key -subj "/CN={{ API_NODE_NAME }}" -config apiserver_ssl.cnf -out apiserver.csr
- name: 创建证书
shell:
chdir=/root/ssl
openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -extensions v3_req -extfile apiserver_ssl.cnf -out apiserver.crt
更多tasks,defaults,templates,files见网盘
[root@k8s-master1 tasks]# cat main.yml
- name: 下载 kubelet,kube-proxy 二进制和基础 cni plugins
copy: src=../files/{{ item }} dest=/usr/bin/{{ item }} mode=0755
with_items:
- kubectl
- kubelet
- kube-proxy
- name: 分发 kubeconfig配置文件
template: src=kubeconfig.j2 dest=/root/kubernetes/kubelet.kubeconfig
- name: create kubelet key
shell:
chdir=/root/ssl
openssl genrsa -out client.key
- name: create kubelet csr and crt
shell:
chdir=/root/ssl
openssl req -new -key client.key -subj "/CN={{inventory_hostname}}" -out client.csr
openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 5000 -out client.crt
- name: 创建kubelet的systemd unit文件
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
- name: 开机启用kubelet 服务
shell: systemctl enable kubelet
ignore_errors: true
- name: 开启kubelet 服务
shell: systemctl daemon-reload && systemctl restart kubelet
- name: create kube-proxy config
template: src=kube-proxyconfig.j2 dest=/root/kubernetes/kube-proxyconfig
- name: create kube-proxy systemd unit
template: src=kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
- name: 开机启用kube-proxy 服务
shell: systemctl enable kube-proxy
ignore_errors: true
- name: 开启kube-proxy 服务
shell: systemctl daemon-reload && systemctl restart kube-proxy
更多tasks,defaults,templates,files见网盘
参考https://blog.csdn.net/wangzhkai/article/details/80294762 因为偷懒 flannel 没写成ansible是手动在两台机器安装中。 安装步骤 0.将flannel信息存入etcd中
//172.16.0.0/16 是docker的网段
etcdctl set /kubernetes/network/config '{ "Network": "172.16.0.0/16", "Backend": { "Type": "vxlan" } }'
1.flanneld 和 mk-docker-opts.sh上传到服务器并改下权限 chmod 777 flanneld 和 chmod 777 mk-docker-opts.sh 2.编写system unit文件并设置开机启动
[root@k8s-master1 tasks]# cat /etc/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
ExecStart=/root/flannel/flanneld \
-etcd-endpoints=http://192.168.144.128:2379,http://192.168.144.129:2379 \
-etcd-prefix=/kubernetes/network \
-iface=ens33
ExecStartPost=/root/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
3.修改docker配置
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
4.重启 flannel 重启docker
CA
单向ca认证 服务端要配置ca证书,服务端证书,私钥。客户端不配或者就配置ca证书
双向认证 服务端要配置ca证书,服务端证书,私钥。 客户端要配置ca证书,客户端对应的客户端参数要配置客户端私钥和客户端证书
因为双向认证服务端也要知道请求的是谁所以客户端要配置自己的证书和私钥
pod会重建 ip会变化
所以我们不可能在程序中直接使用podip来调用pod,当pod失败重新ip的变了。
service封装了后端多个pod(使用label select来选择对应的pod)
这个使用我们使用service来访问。。service会生成一个虚拟ip不变。。
当我们调用一个service的时候。。service可以一个概念 具体是通过kube-proxy组件来完成
kube-proxy会在本地节点建立一个socketServer来接受请求 然后将请求发送到后端pod(使用ROund Robin来负载,,,可以设置同一个ip访问某一个固定pod来保持session)
kube-proxy在运行过程中会动态创建service相关的iptables规则,
将不管是 节点ip+nodeport还是、clusterip+targetport、、
的请求最终重定向到对应服务的代理端口。
从上面来看 我们可以在集群中使用cluster ip来访问对应的service
但是service的cluster ip是由集群分配的
这样就导致我们不能预先知道ip,,,要等建立好service后,,使用kubectl get svc来查询
所以我们要使用kube-dns来处理 直接使用域名来访问service
dns是域名解析 将对应的域名转成ip地址
SkyDNS
etcd 存储skydns需要的数据 比如域名和ip的映射
kube2sky 监听k8s service资源变化将数据写入etcd中 (通过kubernetes的Service调用k8s的list和watch API)
skydns 读取etcd的数据,提供域名解析的功能
域名格式
<service_name>.<namespace>.svc.<cluster_domain>
通常我们只是使用service_name就可以调用
因为namespace默认是default
cluster_domain 在kubelet组件配置 --cluster-domain
同时要kube2sky的--domain也要配置 两个要一样
为了在Pod中调用其他Service,kubelet会自动在容器中创建域名解析配置(/etc/resolv.conf)
我想问下etcd这个容器可以省掉吗?k8s集群有etcd不可以共用吗?
理论上可以共用etcd。从隔离性的角度考虑来说还是分开好,这样kube-dns服务不会对整个k8s集群的稳定性产生影响。另外如果把kube-dns看做一个微服务的话,那么应该保证内部组件不依赖外部,可以独立运行。
//已处理
ca证书的理解 见另一篇skyDns。
//已处理
//etcd报出问题--这个是etcd版本问题,不影响使用,我是yum install etcd搞的
//参考https://www.cnblogs.com/iiiiher/p/7879587.html
watch chan error: etcdserver: mvcc: required revision has been compacted
//已处理
//pause镜像拉取不下来
docker pull mirrorgooglecontainers/pause:3.1
//改成自己的 xujinding:3.1在kubelet启动参数中配置
docker tag mirrorgooglecontainers/pause:3.1 xujinding:3.1
//未处理
//kube-controller-manager.ERROR
no cloud provider provided, services of type LoadBalancer will fail
kubelet.ERROR
E0523 11:04:06.491945 9040 kubelet.go:1252] Image garbage collection failed once. Stats initialization may not have completed yet: failed to get imageFs info: unable to find data for container /
//已处理
使用kubectl get node/pod 没问题 但是使用 log pod—name就有出现下面错误 即使使用了如下面带证书的命令也会
[root@k8s-master1 ~]# kubectl --server=https://192.168.144.128:6443 --certificate-authority=/root/ssl/ca.crt --client-certificate=/root/ssl/client.crt --client-key=/root/ssl/client.key log mysql-rc-9zgcp
error: You must be logged in to the server (the server has asked for the client to provide credentials ( pods/log mysql-rc-9zgcp))
原因是在kubelet.service 将匿名登录关了(--anonymous-auth=false)
改成true就行了
已处理
[root@k8s-master2 ~]# kubectl exec -it mysql-rc-v2-l8wtj /bin/sh
error: unable to upgrade connection: Unauthorized
登录不了容器
原因:同上一个问题
疑问??
假设我在机器128 建一个service 并执行 映射端口33060
为什么 使用129的机器的ip也可以访问
//未处理
ansible执行很慢慢
//未处理
从ansible hosts读取变量问题
all:
hosts:
49.108.221.144:
api_node_name: "ddddddddddddddd"
ansible_port: 22
ansible_user: root
ansible_ssh_pass: root
blog:
ansible_host: 49.108.221.144
ansible_port: 22
ansible_user: root
ansible_ssh_pass: root
children:
node_testiii:
hosts:
39.108.231.124:
api_node_name: "haha124124"
49.108.221.144:
api_node_name: "haha14444444"
node_test:
hosts:
39.108.231.124:
api_node_name: "haha22221241241234"
49.108.221.144:
api_node_name: "haha22224444444444444"
ansible 打印api_node_name??输出什么??
---
- hosts: node_test
tasks:
- name: test
debug:
msg: "{{ api_node_name }}"
- name:
debug:
msg: "{{ api_node_name.second }}"
- name: jjj
debug:
msg: "dddd"
各种情况 已经蒙蔽
所以实际上先命名不同