未完待续

港控/mmm° 2021-12-08 19:25 554阅读 0赞

1 机器规划
























机器规划 角色 部署服务
wyl01 master

kube-apiserver

kube-controller-manager
kube-scheduler
etcd
wyl02 node kube-apiserver
kube-controller-manager
kube-scheduler
etcd
wyl03 node kubelet
kube-proxy
docker
flannel
etcd

2 安装docker并启动

阿里docker镜像,下载yum源,然后进行安装,三台都要安装。

  1. [root@wyl01 opt]# yum install docker-ce -y
  2. [root@wyl01 opt]# systemctl start docker.service

3 创建TLS证书
































组件 使用的证书
etcd ca.pem, server.pem, server-key.pem
flannel ca.pem, server.pem, server-key.pem
kube-apiserver ca.pem, server.pem, server-key.pem
kubelet ca.pem, ca-key.pem
kube-proxy ca.pem, kube-proxy.pem, kube-proxy-key.pem
kubectl ca.pem, admin.pem, admin-key.pem
  1. mkdir /opt/kubernetes/{bin,cfg,ssl} -p
  2. cd /opt/kubernetes/ssl
  3. # 下载证书工具
  4. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  5. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  6. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  7. #改变指令权限
  8. chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
  9. # 放到bin目录下
  10. mv cfssl_linux-amd64 /usr/local/bin/cfssl
  11. mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
  12. mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
  13. [root@wyl01 ssl]# ls
  14. certificate.sh cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 cfssl_linux-amd64

4 生成证书

在master节点上操作

  1. [root@wyl01 ssl]# cat certificate.sh
  2. cat > ca-config.json <<EOF
  3. {
  4. "signing": {
  5. "default": {
  6. "expiry": "87600h"
  7. },
  8. "profiles": {
  9. "kubernetes": {
  10. "expiry": "87600h",
  11. "usages": [
  12. "signing",
  13. "key encipherment",
  14. "server auth",
  15. "client auth"
  16. ]
  17. }
  18. }
  19. }
  20. }
  21. EOF
  22. cat > ca-csr.json <<EOF
  23. {
  24. "CN": "kubernetes",
  25. "key": {
  26. "algo": "rsa",
  27. "size": 2048
  28. },
  29. "names": [
  30. {
  31. "C": "CN",
  32. "L": "Beijing",
  33. "ST": "Beijing",
  34. "O": "k8s",
  35. "OU": "System"
  36. }
  37. ]
  38. }
  39. EOF
  40. cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
  41. #-----------------------
  42. cat > server-csr.json <<EOF
  43. {
  44. "CN": "kubernetes",
  45. "hosts": [
  46. "127.0.0.1",
  47. "192.168.52.128",
  48. "192.168.52.129",
  49. "192.168.52.130",
  50. "10.10.10.1",
  51. "kubernetes",
  52. "kubernetes.default",
  53. "kubernetes.default.svc",
  54. "kubernetes.default.svc.cluster",
  55. "kubernetes.default.svc.cluster.local"
  56. ],
  57. "key": {
  58. "algo": "rsa",
  59. "size": 2048
  60. },
  61. "names": [
  62. {
  63. "C": "CN",
  64. "L": "BeiJing",
  65. "ST": "BeiJing",
  66. "O": "k8s",
  67. "OU": "System"
  68. }
  69. ]
  70. }
  71. EOF
  72. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
  73. #-----------------------
  74. cat > admin-csr.json <<EOF
  75. {
  76. "CN": "admin",
  77. "hosts": [],
  78. "key": {
  79. "algo": "rsa",
  80. "size": 2048
  81. },
  82. "names": [
  83. {
  84. "C": "CN",
  85. "L": "BeiJing",
  86. "ST": "BeiJing",
  87. "O": "system:masters",
  88. "OU": "System"
  89. }
  90. ]
  91. }
  92. EOF
  93. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
  94. #-----------------------
  95. cat > kube-proxy-csr.json <<EOF
  96. {
  97. "CN": "system:kube-proxy",
  98. "hosts": [],
  99. "key": {
  100. "algo": "rsa",
  101. "size": 2048
  102. },
  103. "names": [
  104. {
  105. "C": "CN",
  106. "L": "BeiJing",
  107. "ST": "BeiJing",
  108. "O": "k8s",
  109. "OU": "System"
  110. }
  111. ]
  112. }
  113. EOF
  114. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
  115. # 执行脚本
  116. [root@wyl01 ssl]# sh certificate.sh
  117. # 删除其他文件,只留下证书的文件
  118. [root@wyl01 ssl]# ls |grep -v pem|xargs -i rm {}
  119. [root@wyl01 ssl]# ll
  120. total 32
  121. -rw------- 1 root root 1679 Jul 19 09:51 admin-key.pem
  122. -rw-r--r-- 1 root root 1399 Jul 19 09:51 admin.pem
  123. -rw------- 1 root root 1679 Jul 19 09:51 ca-key.pem
  124. -rw-r--r-- 1 root root 1359 Jul 19 09:51 ca.pem
  125. -rw------- 1 root root 1679 Jul 19 09:51 kube-proxy-key.pem
  126. -rw-r--r-- 1 root root 1403 Jul 19 09:51 kube-proxy.pem
  127. -rw------- 1 root root 1679 Jul 19 09:51 server-key.pem
  128. -rw-r--r-- 1 root root 1627 Jul 19 09:51 server.pem

5 部署etcd服务(三台)

  1. [root@wyl01 software]# tar -xf etcd-v3.2.12-linux-amd64.tar.gz
  2. [root@wyl01 software]# ls
  3. etcd.sh etcd-v3.2.12-linux-amd64 etcd-v3.2.12-linux-amd64.tar.gz
  4. [root@wyl01 software]# cd etcd-v3.2.12-linux-amd64/
  5. [root@wyl01 etcd-v3.2.12-linux-amd64]# ls
  6. Documentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md
  7. [root@wyl01 etcd-v3.2.12-linux-amd64]# cp -r etcd* /opt/kubernetes/bin/
  8. [root@wyl01 etcd-v3.2.12-linux-amd64]# ll /opt/kubernetes/bin/
  9. total 32284
  10. -rwxr-xr-x 1 root root 17817664 Jul 15 14:47 etcd
  11. -rwxr-xr-x 1 root root 15234432 Jul 15 14:47 etcdctl
  12. [root@wyl01 software]# vim /opt/kubernetes/cfg/etcd
  13. # 另外2台node节点,只要修改第2行到第9行的ip为ip为本机的ip,ETCD_NAME的值分别为etcd02,etcd03.
  14. #[Member]
  15. ETCD_NAME="etcd01"
  16. ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  17. ETCD_LISTEN_PEER_URLS="https://192.168.52.128:2380"
  18. ETCD_LISTEN_CLIENT_URLS="https://192.168.52.128:2379"
  19. #[Clustering]
  20. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.52.128:2380"
  21. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.52.128:2379"
  22. ETCD_INITIAL_CLUSTER="etcd01=https://192.168.52.128:2380,etcd02=https://192.168.52.129:2380,etcd03=https://192.168.52.130:2380"
  23. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  24. ETCD_INITIAL_CLUSTER_STATE="new"
  25. [root@wyl01 software]# vim /etc/systemd/system/etcd.service
  26. [Unit]
  27. Description=Etcd Server
  28. After=network.target
  29. After=network-online.target
  30. Wants=network-online.target
  31. [Service]
  32. Type=notify
  33. EnvironmentFile=-/opt/kubernetes/cfg/etcd
  34. ExecStart=/opt/kubernetes/bin/etcd \
  35. --name=${ETCD_NAME} \
  36. --data-dir=${ETCD_DATA_DIR} \
  37. --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
  38. --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
  39. --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
  40. --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
  41. --initial-cluster=${ETCD_INITIAL_CLUSTER} \
  42. --initial-cluster-token=${ETCD_INITIAL_CLUSTER} \
  43. --initial-cluster-state=new \
  44. --cert-file=/opt/kubernetes/ssl/server.pem \
  45. --key-file=/opt/kubernetes/ssl/server-key.pem \
  46. --peer-cert-file=/opt/kubernetes/ssl/server.pem \
  47. --peer-key-file=/opt/kubernetes/ssl/server-key.pem \
  48. --trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
  49. --peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
  50. Restart=on-failure
  51. LimitNOFILE=65536
  52. [Install]
  53. WantedBy=multi-user.target

启动并查看服务

  1. [root@wyl01 software]# systemctl daemon-reload
  2. [root@wyl01 software]# systemctl start etcd.service

20190719103547339.png

6 部署Flannel容器集群网络

部署master节点服务

部署kube-apiserver服务:

  1. vim kube-apiserver
  2. #!/bin/bash
  3. MASTER_ADDRESS=${1:-"192.168.52.128"}
  4. ETCD_SERVERS=${2:-"https://192.168.52.128:2379,https://192.168.52.129:2379,https://192.168.52.130:2379"}
  5. cat <<EOF >/opt/kubernetes/config/kube-apiserver
  6. KUBE_APISERVER_OPTS="--logtostderr=true \\
  7. --v=4 \\
  8. --etcd-servers=${ETCD_SERVERS} \\
  9. --insecure-bind-address=127.0.0.1 \\
  10. --bind-address=${MASTER_ADDRESS} \\
  11. --insecure-port=8080 \\
  12. --secure-port=6443 \\
  13. --advertise-address=${MASTER_ADDRESS} \\
  14. --allow-privileged=true \\
  15. --service-cluster-ip-range=10.10.10.0/24 \\
  16. --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
  17. --authorization-mode=RBAC,Node \\
  18. --kubelet-https=true \\
  19. --enable-bootstrap-token-auth \\
  20. --token-auth-file=/opt/kubernetes/config/token.csv \\
  21. --service-node-port-range=30000-50000 \\
  22. --tls-cert-file=/opt/kubernetes/ssl/server.pem \\
  23. --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
  24. --client-ca-file=/opt/kubernetes/ssl/ca.pem \\
  25. --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  26. --etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
  27. --etcd-certfile=/opt/kubernetes/ssl/server.pem \\
  28. --etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"
  29. EOF
  30. cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
  31. [Unit]
  32. Description=Kubernetes API Server
  33. Documentation=https://github.com/kubernetes/kubernetes
  34. [Service]
  35. EnvironmentFile=/opt/kubernetes/config/kube-apiserver
  36. ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
  37. Restart=on-failure
  38. [Install]
  39. WantedBy=multi-user.target
  40. EOF
  41. systemctl daemon-reload
  42. systemctl enable kube-apiserver
  43. systemctl restart kube-apiserver

这里启动报错,将token文件拷贝到上面/opt/kubernetes/config目录下

  1. cp /opt/kubernetes/ssl/token.csv /opt/kubernetes/config/token.csv
  2. systemctl start kube-apiserver #启动服务

watermark_type_ZmFuZ3poZW5naGVpdGk_shadow_10_text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3d5bDk1Mjc_size_16_color_FFFFFF_t_70

部署kube-controller

  1. [root@wyl01 master]# vim controller-manager.sh #不需要修改
  2. #!/bin/bash
  3. MASTER_ADDRESS=${1:-"127.0.0.1"}
  4. cat <<EOF >/opt/kubernetes/config/kube-controller-manager
  5. KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
  6. --v=4 \\
  7. --master=${MASTER_ADDRESS}:8080 \\
  8. --leader-elect=true \\
  9. --address=127.0.0.1 \\
  10. --service-cluster-ip-range=10.10.10.0/24 \\
  11. --cluster-name=kubernetes \\
  12. --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
  13. --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  14. --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  15. --root-ca-file=/opt/kubernetes/ssl/ca.pem"
  16. EOF
  17. cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
  18. [Unit]
  19. Description=Kubernetes Controller Manager
  20. Documentation=https://github.com/kubernetes/kubernetes
  21. [Service]
  22. EnvironmentFile=-/opt/kubernetes/config/kube-controller-manager
  23. ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
  24. Restart=on-failure
  25. [Install]
  26. WantedBy=multi-user.target
  27. EOF
  28. systemctl daemon-reload
  29. systemctl enable kube-controller-manager
  30. systemctl restart kube-controller-manager
  31. [root@wyl01 master]# sh controller-manager.sh 127.0.0.1

watermark_type_ZmFuZ3poZW5naGVpdGk_shadow_10_text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3d5bDk1Mjc_size_16_color_FFFFFF_t_70 1

  1. [root@wyl01 master]# vim scheduler.sh
  2. #!/bin/bash
  3. MASTER_ADDRESS=${1:-"127.0.0.1"}
  4. cat <<EOF >/opt/kubernetes/config/kube-scheduler
  5. KUBE_SCHEDULER_OPTS="--logtostderr=true \\
  6. --v=4 \\
  7. --master=${MASTER_ADDRESS}:8080 \\
  8. --leader-elect"
  9. EOF
  10. cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
  11. [Unit]
  12. Description=Kubernetes Scheduler
  13. Documentation=https://github.com/kubernetes/kubernetes
  14. [Service]
  15. EnvironmentFile=-/opt/kubernetes/config/kube-scheduler
  16. ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
  17. Restart=on-failure
  18. [Install]
  19. WantedBy=multi-user.target
  20. EOF
  21. systemctl daemon-reload
  22. systemctl enable kube-scheduler
  23. systemctl restart kube-scheduler
  24. [root@wyl01 master]# sh scheduler.sh 127.0.0.1
  25. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
  26. [root@wyl01 master]#

watermark_type_ZmFuZ3poZW5naGVpdGk_shadow_10_text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3d5bDk1Mjc_size_16_color_FFFFFF_t_70 2

检查

20190715201832228.png

部署node节点组件:

在node节点上我们要部署2个服务,一个kubelet,还有一个是kube-proxy,先wyl02,wy03上运行kubelet.sh脚本,部署kubelet服务,脚本内容如下:

  1. [root@wyl03 software]# sh kubelet.sh 192.168.52.130 10.10.10.2
  2. 脚本内容:
  3. #!/bin/bash
  4. NODE_ADDRESS=${1:-"192.168.52.130"}
  5. DNS_SERVER_IP=${2:-"10.10.10.2"}
  6. cat <<EOF >/opt/kubernetes/config/kubelet
  7. KUBELET_OPTS="--logtostderr=true \\
  8. --v=4 \\
  9. --address=${NODE_ADDRESS} \\
  10. --hostname-override=${NODE_ADDRESS} \\
  11. --kubeconfig=/opt/kubernetes/config/kubelet.kubeconfig \\
  12. --experimental-bootstrap-kubeconfig=/opt/kubernetes/config/bootstrap.kubeconfig \\
  13. --cert-dir=/opt/kubernetes/ssl \\
  14. --allow-privileged=true \\
  15. --cluster-dns=${DNS_SERVER_IP} \\
  16. --cluster-domain=cluster.local \\
  17. --fail-swap-on=false \\
  18. --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
  19. EOF
  20. cat <<EOF >/usr/lib/systemd/system/kubelet.service
  21. [Unit]
  22. Description=Kubernetes Kubelet
  23. After=docker.service
  24. Requires=docker.service
  25. [Service]
  26. EnvironmentFile=-/opt/kubernetes/config/kubelet
  27. ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
  28. Restart=on-failure
  29. KillMode=process
  30. [Install]
  31. WantedBy=multi-user.target
  32. EOF
  33. systemctl daemon-reload
  34. systemctl enable kubelet
  35. systemctl restart kubelet

运行中可能会报一下错误:

  1. error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope

解决方案:

创建角色用户,在master节点创建下面这个用户

watermark_type_ZmFuZ3poZW5naGVpdGk_shadow_10_text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3d5bDk1Mjc_size_16_color_FFFFFF_t_70 3

  1. [root@wyl01 ssl]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
  2. clusterrolebinding "kubelet-bootstrap" created

创建完,我们再执行脚本,查看进程。

watermark_type_ZmFuZ3poZW5naGVpdGk_shadow_10_text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3d5bDk1Mjc_size_16_color_FFFFFF_t_70 4

部署kubelet-proxy服务,直接运行proxy.sh脚本即可,ps查看进程。

  1. [root@wyl02 software]# sh proxy.sh 192.168.52.129

启动后,一切都正常后,也要再wyl03节点上启动这2个服务。

master节点认证

  1. [root@wyl01 ssl]# kubectl get csr
  2. NAME AGE REQUESTOR CONDITION
  3. node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8 1m kubelet-bootstrap Pending
  4. node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY 3m kubelet-bootstrap Pending
  5. # 认证
  6. [root@wyl01 ssl]# kubectl certificate approve node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8
  7. certificatesigningrequest "node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8" approved
  8. [root@wyl01 ssl]# kubectl certificate approve node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY
  9. certificatesigningrequest "node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY" approved
  10. [root@wyl01 ssl]# kubectl get csr
  11. NAME AGE REQUESTOR CONDITION
  12. node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8 3m kubelet-bootstrap Approved,Issued
  13. node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY 4m kubelet-bootstrap Approved,Issued
  14. [root@wyl01 ssl]# kubectl get node
  15. NAME STATUS ROLES AGE VERSION
  16. 192.168.52.129 Ready <none> 54s v1.9.0
  17. 192.168.52.130 Ready <none> 1m v1.9.0

运行一个nginx实例

  1. [root@wyl01 ssl]# kubectl run nginx --image=nginx --replicas=3
  2. deployment "nginx" created
  3. [root@wyl01 ssl]# kubectl get pod
  4. NAME READY STATUS RESTARTS AGE
  5. nginx-8586cf59-c7bmj 1/1 Running 0 1m
  6. nginx-8586cf59-ntwgf 1/1 Running 0 1m
  7. nginx-8586cf59-pj45w 1/1 Running 0 1m
  8. [root@wyl01 ssl]# kubectl get pod -o wide
  9. NAME READY STATUS RESTARTS AGE IP NODE
  10. nginx-8586cf59-c7bmj 1/1 Running 0 1m 172.17.57.2 192.168.52.130
  11. nginx-8586cf59-ntwgf 1/1 Running 0 1m 172.17.51.2 192.168.52.129
  12. nginx-8586cf59-pj45w 1/1 Running 0 1m 172.17.57.3 192.168.52.130
  13. [root@wyl01 ssl]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
  14. service "nginx" exposed
  15. [root@wyl01 ssl]# kubectl get svc nginx
  16. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  17. nginx NodePort 10.10.10.132 <none> 88:39327/TCP 1h
  18. node节点输入curl 10.10.10.132:88或者在pc端输入本机node节点ip:39327访问

部署dashboard管理页面

  1. [root@wyl01 ui]# ll
  2. total 12
  3. -rw-r--r-- 1 root root 1148 Jul 13 17:16 dashboard-deployment.yaml
  4. -rw-r--r-- 1 root root 612 Jul 13 17:16 dashboard-rbac.yaml
  5. -rw-r--r-- 1 root root 338 Jul 13 17:16 dashboard-service.yaml
  6. [root@wyl01 ui]# kubectl create -f dashboard-rbac.yaml
  7. serviceaccount "kubernetes-dashboard" created
  8. clusterrolebinding "kubernetes-dashboard-minimal" created
  9. [root@wyl01 ui]# kubectl create -f dashboard-deployment.yaml
  10. deployment "kubernetes-dashboard" created
  11. [root@wyl01 ui]# kubectl create -f dashboard-service.yaml
  12. service "kubernetes-dashboard" created

发表评论

表情:
评论列表 (有 0 条评论,554人围观)

还没有评论,来说两句吧...

相关阅读

    相关 17,18待续

    一个很短的序 昨天,在 2017 年最后一天跑了人生中第一次 10 公里,留下了 17 年最后的纪念。今天,在 2018 年第一天写下第一篇博客文章,开始新的人生旅程。

    相关 List接口(待续

    List接口 List集合代表一个有序集合,集合中的每个元素都有其对应的顺序索引。 List集合允许使用重复元素,可以通过索引来访问指定位置的集合的元素。List集合默认按