在master上安装kubelet和kube-proxy将master作为node加入集群,然后打上污点,避免pod调度到上面
kubectl taint nodes k8s-master1 node-role.kubernetes.io/k8s-master=:NoSchedule
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml

且查看容器日志有:

原因是没有开启API聚合。
如果你是 kubeadm 或者官方的 kube-up.sh 脚本部署 Kubernetes 集群的话, API聚合模式就是默认开启的;如果是手动二进制搭建的话,你就需要开启API聚合。
开启API聚合:
创建proxy-client-csr.json文件
{
"CN": "kubernetes",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "SuZhou",
"L": "SuZhou",
"O": "k8s",
"OU": "System"
}
]
}
生成证书和密钥:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -profile=kubernetes proxy-client-csr.json |cfssljson -bare proxy-client
cp proxy-client.pem proxy-client-key.pem /etc/kubernetes/ssl/
/etc/kubernetes/kube-apiserver.conf里加上聚合API的配置:(红字部分)
如果未在 master 节点上运行 kube-proxy,则必须确保 kube-apiserver 启动参数中包含–enable-aggregator-routing=true
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address=192.168.2.135 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.255.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.2.135:2379,https://192.168.2.136:2379,https://192.168.2.140:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \
--proxy-client-cert-file=/etc/kubernetes/ssl/proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/ssl/proxy-client-key.pem \
--v=4"
重启kube-apiserver服务:
systemctl restart kube-apiserver
root@k8s-master1:~# kubectl get apiservice v1beta1.metrics.k8s.io -o yaml
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apiregistration.k8s.io/v1","kind":"APIService","metadata":{"annotations":{},"labels":{"k8s-app":"metrics-server"},"name":"v1beta1.metrics.k8s.io"},"spec":{"group":"metrics.k8s.io","groupPriorityMinimum":100,"insecureSkipTLSVerify":true,"service":{"name":"metrics-server","namespace":"kube-system"},"version":"v1beta1","versionPriority":100}}
creationTimestamp: "2024-08-30T06:40:11Z"
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
resourceVersion: "341124"
uid: 168aea79-79fd-4345-ace1-8b27c195f68c
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
port: 443
version: v1beta1
versionPriority: 100
status:
conditions:
- lastTransitionTime: "2024-08-30T09:28:21Z"
message: all checks passed
reason: Passed
status: "True"
type: Available

php-apache.yaml:
apiVersion: apps/v1
kind: Deployment
metadata:
name: php-apache
spec:
selector:
matchLabels:
run: php-apache
template:
metadata:
labels:
run: php-apache
spec:
containers:
- name: php-apache
image: registry.k8s.io/hpa-example
ports:
- containerPort: 80
resources:
limits:
cpu: 500m
requests:
cpu: 200m
---
apiVersion: v1
kind: Service
metadata:
name: php-apache
labels:
run: php-apache
spec:
ports:
- port: 8082
protocol: TCP
targetPort: 80
selector:
run: php-apache
type: NodePort
kubect apply -f php-apache.yaml
root@k8s-master1:~# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.255.0.1 <none> 443/TCP 23d
php-apache NodePort 10.255.180.35 <none> 8082:48581/TCP 109m
设置自动扩缩:
kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
压测:
kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://10.255.180.35:8082; done"
观察:
root@k8s-master1:~# kubectl get hpa php-apache --watch
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
php-apache Deployment/php-apache 186%/50% 1 10 1 6m8s
php-apache Deployment/php-apache 243%/50% 1 10 4 6m11s
php-apache Deployment/php-apache 243%/50% 1 10 5 6m21s
php-apache Deployment/php-apache 176%/50% 1 10 5 6m31s
php-apache Deployment/php-apache 164%/50% 1 10 5 6m41s
php-apache Deployment/php-apache 164%/50% 1 10 7 6m51s
php-apache Deployment/php-apache 140%/50% 1 10 7 7m1s
php-apache Deployment/php-apache 72%/50% 1 10 7 7m11s
php-apache Deployment/php-apache 62%/50% 1 10 7 7m31s
php-apache Deployment/php-apache 63%/50% 1 10 7 7m41s
php-apache Deployment/php-apache 63%/50% 1 10 7 8m1s
php-apache Deployment/php-apache 57%/50% 1 10 7 8m11s
php-apache Deployment/php-apache 68%/50% 1 10 7 8m32s
php-apache Deployment/php-apache 59%/50% 1 10 7 8m42s
php-apache Deployment/php-apache 62%/50% 1 10 7 9m2s
php-apache Deployment/php-apache 61%/50% 1 10 7 9m12s
php-apache Deployment/php-apache 59%/50% 1 10 7 9m32s
php-apache Deployment/php-apache 64%/50% 1 10 7 9m42s
php-apache Deployment/php-apache 56%/50% 1 10 7 10m
php-apache Deployment/php-apache 62%/50% 1 10 7 10m
php-apache Deployment/php-apache 60%/50% 1 10 7 10m
php-apache Deployment/php-apache 65%/50% 1 10 7 10m
php-apache Deployment/php-apache 59%/50% 1 10 7 11m
php-apache Deployment/php-apache 64%/50% 1 10 7 11m
php-apache Deployment/php-apache 61%/50% 1 10 7 11m
php-apache Deployment/php-apache 47%/50% 1 10 7 11m
php-apache Deployment/php-apache 3%/50% 1 10 7 12m
php-apache Deployment/php-apache 0%/50% 1 10 7 12m
php-apache Deployment/php-apache 1%/50% 1 10 7 12m
php-apache Deployment/php-apache 0%/50% 1 10 7 13m
php-apache Deployment/php-apache 0%/50% 1 10 7 16m
php-apache Deployment/php-apache 0%/50% 1 10 4 17m
php-apache Deployment/php-apache 0%/50% 1 10 4 17m
php-apache Deployment/php-apache 0%/50% 1 10 3 17m
php-apache Deployment/php-apache 0%/50% 1 10 3 17m
php-apache Deployment/php-apache 0%/50% 1 10 1 18m
- kubectl get apiservice v1beta1.metrics.k8s.io -o yaml 报错:

kubectl describe svc metrics-server -n kube-system查看到endpoints缺失:

root@m1:/data/work# kubectl describe pod metrics-server-85d6fcf458-rhn7p -n kube-system 查看pod事件:

解决方法,是在components.yaml里加一行配置:- –kubelet-insecure-tls

然后:kubectl delete -f components.yaml;kubectl apply -f components.yaml
kubectl describe pod metrics-server-f95dcdc9c-55b58 -n kube-system 警告不见了
kubectl get apiservice v1beta1.metrics.k8s.io -o yaml,一切正常:
