Fork me on GitHub

使用Helm安装harbor到Kubernates集群

Harbor

Harbor是一个开源的镜像中心,用于存储、管理容器镜像。

添加harbor的chart仓库

1
2
3

helm repo add harbor https://helm.goharbor.io
helm repo update

获取harbor的chart压缩包

1
2
3
helm fetch harbor/harbor
tar xzvf harbor-1.1.1.tgz
cd harbor

查看chart包的values.yaml

安装 Helm Chart 包最重要的当然是values.yaml文件了,我们可以通过覆盖该文件中的属性来改变配置:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355

expose:
# 设置暴露服务的方式。将类型设置为 ingress、clusterIP或nodePort并补充对应部分的信息。
type: ingress
tls:
# 是否开启 tls,注意:如果类型是 ingress 并且tls被禁用,则在pull/push镜像时,则必须包含端口。详细查看文档:https://github.com/goharbor/harbor/issues/5291。
enabled: true
# 如果你想使用自己的 TLS 证书和私钥,请填写这个 secret 的名称,这个 secret 必须包含名为 tls.crt 和 tls.key 的证书和私钥文件,如果没有设置则会自动生成证书和私钥文件。
secretName: ""
# 默认 Notary 服务会使用上面相同的证书和私钥文件,如果你想用一个独立的则填充下面的字段,注意只有类型是 ingress 的时候才需要。
notarySecretName: ""
# common name 是用于生成证书的,当类型是 clusterIP 或者 nodePort 并且 secretName 为空的时候才需要
commonName: ""
ingress:
hosts:
core: core.harbor.domain
notary: notary.harbor.domain
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
clusterIP:
# ClusterIP 服务的名称
name: harbor
ports:
httpPort: 80
httpsPort: 443
# Notary 服务监听端口,只有当 notary.enabled 设置为 true 的时候有效
notaryPort: 4443
nodePort:
# NodePort 服务名称
name: harbor
ports:
http:
port: 80
nodePort: 30002
https:
port: 443
nodePort: 30003
notary:
port: 4443
nodePort: 30004

# Harbor 核心服务外部访问 URL。主要用于:# 1) 补全 portal 页面上面显示的 docker/helm 命令# 2) 补全返回给 docker/notary 客户端的 token 服务 URL

# 格式:protocol://domain[:port]。# 1) 如果 expose.type=ingress,"domain"的值就是 expose.ingress.hosts.core 的值# 2) 如果 expose.type=clusterIP,"domain"的值就是 expose.clusterIP.name 的值# 3) 如果 expose.type=nodePort,"domain"的值就是 k8s 节点的 IP 地址

# 如果在代理后面部署 Harbor,请将其设置为代理的 URL
externalURL: https://core.harbor.domain

# 默认情况下开启数据持久化,在k8s集群中需要动态的挂载卷默认需要一个StorageClass对象。# 如果你有已经存在可以使用的持久卷,需要在"storageClass"中指定你的 storageClass 或者设置 "existingClaim"。## 对于存储 docker 镜像和 Helm charts 包,你也可以用 "azure"、"gcs"、"s3"、"swift" 或者 "oss",直接在 "imageChartStorage" 区域设置即可
persistence:
enabled: true
# 设置成"keep"避免在执行 helm 删除操作期间移除 PVC,留空则在 chart 被删除后删除 PVC
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
# 使用一个存在的 PVC(必须在绑定前先手动创建)
existingClaim: ""
# 指定"storageClass",或者使用默认的 StorageClass 对象,设置成"-"禁用动态分配挂载卷
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
chartmuseum:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
jobservice:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
# 如果使用外部的数据库服务,下面的设置将会被忽略
database:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
# 如果使用外部的 Redis 服务,下面的设置将会被忽略
redis:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
# 定义使用什么存储后端来存储镜像和 charts 包,详细文档地址:https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
imageChartStorage:
# 正对镜像和chart存储是否禁用跳转,对于一些不支持的后端(例如对于使用minio的`s3`存储),需要禁用它。为了禁止跳转,只需要设置`disableredirect=true`即可,详细文档地址:https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect
disableredirect: false
# 指定存储类型:"filesystem", "azure", "gcs", "s3", "swift", "oss",在相应的区域填上对应的信息。
# 如果你想使用 pv 则必须设置成"filesystem"类型
type: filesystem
filesystem:
rootdirectory: /storage
#maxthreads: 100
azure:
accountname: accountname
accountkey: base64encodedaccountkey
container: containername
#realm: core.windows.net
gcs:
bucket: bucketname
# The base64 encoded json file which contains the key
encodedkey: base64-encoded-json-key-file
#rootdirectory: /gcs/object/name/prefix
#chunksize: "5242880"
s3:
region: us-west-1
bucket: bucketname
#accesskey: awsaccesskey
#secretkey: awssecretkey
#regionendpoint: http://myobjects.local
#encrypt: false
#keyid: mykeyid
#secure: true
#v4auth: true
#chunksize: "5242880"
#rootdirectory: /s3/object/name/prefix
#storageclass: STANDARD
swift:
authurl: https://storage.myprovider.com/v3/auth
username: username
password: password
container: containername
#region: fr
#tenant: tenantname
#tenantid: tenantid
#domain: domainname
#domainid: domainid
#trustid: trustid
#insecureskipverify: false
#chunksize: 5M
#prefix:
#secretkey: secretkey
#accesskey: accesskey
#authversion: 3
#endpointtype: public
#tempurlcontainerkey: false
#tempurlmethods:
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: regionname
bucket: bucketname
#endpoint: endpoint
#internal: false
#encrypt: false
#secure: true
#chunksize: 10M
#rootdirectory: rootdirectory

imagePullPolicy: IfNotPresent

logLevel: debug
# Harbor admin 初始密码,Harbor 启动后通过 Portal 修改该密码
harborAdminPassword: "Harbor12345"# 用于加密的一个 secret key,必须是一个16位的字符串
secretKey: "not-a-secure-key"

# 如果你通过"ingress"保留服务,则下面的Nginx不会被使用
nginx:
image:
repository: goharbor/nginx-photon
tag: v1.7.0
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## 额外的 Deployment 的一些 annotations
podAnnotations: {}

portal:
image:
repository: goharbor/harbor-portal
tag: v1.7.0
replicas: 1# resources:# requests:# memory: 256Mi# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

core:
image:
repository: goharbor/harbor-core
tag: v1.7.0
replicas: 1# resources:# requests:# memory: 256Mi# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

adminserver:
image:
repository: goharbor/harbor-adminserver
tag: v1.7.0
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

jobservice:
image:
repository: goharbor/harbor-jobservice
tag: v1.7.0
replicas: 1
maxJobWorkers: 10
# jobs 的日志收集器:"file", "database" or "stdout"
jobLogger: file
# resources:# requests:# memory: 256Mi# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

registry:
registry:
image:
repository: goharbor/registry-photon
tag: v2.6.2-v1.7.0
controller:
image:
repository: goharbor/harbor-registryctl
tag: v1.7.0
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

chartmuseum:
enabled: true
image:
repository: goharbor/chartmuseum-photon
tag: v0.7.1-v1.7.0
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

clair:
enabled: true
image:
repository: goharbor/clair-photon
tag: v2.0.7-v1.7.0
replicas: 1
# 用于从 Internet 更新漏洞数据库的http(s)代理
httpProxy:
httpsProxy:
# clair 更新程序的间隔,单位为小时,设置为0来禁用
updatersInterval: 12
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

notary:
enabled: true
server:
image:
repository: goharbor/notary-server-photon
tag: v0.6.1-v1.7.0
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
signer:
image:
repository: goharbor/notary-signer-photon
tag: v0.6.1-v1.7.0
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}

database:
# 如果使用外部的数据库,则设置 type=external,然后填写 external 区域的一些连接信息
type: internal
internal:
image:
repository: goharbor/harbor-db
tag: v1.7.0
# 内部的数据库的初始化超级用户的密码
password: "changeit"
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
external:
host: "192.168.0.1"
port: "5432"
username: "user"
password: "password"
coreDatabase: "registry"
clairDatabase: "clair"
notaryServerDatabase: "notary_server"
notarySignerDatabase: "notary_signer"
sslmode: "disable"
podAnnotations: {}

redis:
# 如果使用外部的 Redis 服务,设置 type=external,然后补充 external 部分的连接信息。
type: internal
internal:
image:
repository: goharbor/redis-photon
tag: v1.7.0
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
external:
host: "192.168.0.2"
port: "6379"
# coreDatabaseIndex 必须设置为0
coreDatabaseIndex: "0"
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"
chartmuseumDatabaseIndex: "3"
password: ""
podAnnotations: {}

创建动态存储StorageClass

10.1.80.72是nfs服务器的ip地址,/var/nfs/k8s是在nfs服务器上的共享目录

nfs-client.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.azk8s.cn/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.1.80.72
- name: NFS_PATH
value: /var/nfs/k8s
volumes:
- name: nfs-client-root
nfs:
server: 10.1.80.72
path: /var/nfs/k8s

nfs-client-sa.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io

应用nfs-client-sa.yaml

1
kubectl apply -f nfs-client-sa.yaml

harbor-data-sc.yaml 创建名为harbor-data的storeclass

1
2
3
4
5
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: harbor-data
provisioner: fuseim.pri/ifs

应用harbor-data-sc.yaml

1
kubectl apply -f harbor-data-sc.yaml

生成证书

用之前部署kubernetes的ca证书生成 harbor的 tls.key,tls.crt,10.1.80.77是VIP

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
cat EOF << registry.jaychang.cn-csr.json{
"CN": "harbor",
"hosts": [
"10.1.80.71",
"10.1.80.72",
"10.1.80.73",
"10.1.80.77",
"registry.jaychang.cn"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "HZ",
"L": "HangZhou"
}
]
}
EOF

生成harbor.pem,harbor-key.pem

1
2
3
4
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes registry.jaychang.cn-csr.json | cfssljson -bare harbor

创建tls类型的secret

使用tr -d ‘\r\n’去掉回车换行符

1
2
3
4
5
6
# ca.crt
tr -d '\r\n' < /etc/kubernetes/cert/ca.pem |base64
# tls.crt
tr -d '\r\n' < ./harbor.pem |base64
# tls.key
tr -d '\r\n' < ./harbor-key.pem |base64

根据上一步返回的结果,替换harbor-secret.yaml文件中的{ca.crt},{tls.crt},{tls.key}

1
2
3
4
5
6
7
8
9
10
apiVersion: v1
kind: Secret
metadata:
name: harbor-secret
namespace: kube-ops
type: kubernetes.io/tls
data:
ca.crt: {ca.crt}
tls.crt: {tls.crt}
tls.key: {tls.key}

创建名为harbor-secret的secret

1
kubectl apply -f harbor-secret.yaml

创建harbor-values.yaml文件

其中需要修改的内容很少,我们这里主要是用到了自定义的secret,所以需要指定secretName,使用默认的igress暴露服务,其他需要我们修改的就是数据持久化,我们需要提前为上面这些服务创建好PVC或StorageClass。比如我们这里就是使用了一个名为harbor-data的StorageClass资源对象,当然也可以根据我们实际的需求修改 accessMode 或者存储容量:(harbor-data-sc.yaml)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
expose:
type: ingress
tls:
enabled: true
secretName: harbor-secret
ingress:
hosts:
core: registry.jaychang.cn
notary: notary.jaychang.cn
annotations:
kubernetes.io/ingress.class: "nginx"
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"

externalURL: https://registry.jaychang.cn

persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
storageClass: "harbor-data"
chartmuseum:
storageClass: "harbor-data"
jobservice:
storageClass: "harbor-data"
database:
storageClass: "harbor-data"
redis:
storageClass: "harbor-data"

安装

使用上面定义的harbor-values.yaml进行安装
在解压出来的目录里面执行,注意这里的.表示当前所在目录。

1
helm install . --name harbor --namespace kube-ops -f harbor-values.yaml

查看ingress资源对象

1
2
3
$ kubectl get ing -o wide -n kube-ops
NAME HOSTS ADDRESS PORTS AGE
harbor-harbor-ingress registry.jaychang.cn,notary.jaychang.cn 80, 443 77m

查看Harbor Portal

添加完成后,在浏览器中输入registry.jaychang.cn就可以打开熟悉的 Harbor 的 Portal 界面了,当然我们配置的 Ingress 中会强制跳转到 https,所以如果你的浏览器有什么安全限制的话,需要信任我们这里 Ingress 对应的证书,证书文件可以通过查看 Secret 资源对象获取。

upload successful
原图

然后输入用户名:admin,密码:Harbor12345(当然我们也可以通过 Helm 安装的时候自己覆盖 harborAdminPassword)即可登录进入 Portal 首页:

upload successful

与此同时我们可以看到,证书起作用了(当然先要将自建的ca证书导入到受信任根证书)
upload successful

我们可以看到有很多功能,默认情况下会有一个名叫library的项目,改项目默认是公开访问权限的,进入项目可以看到里面还有 Helm Chart 包的管理,可以手动在这里上传,也可以对改项目里面的镜像进行一些配置,比如是否开启自动扫描镜像功能:

upload successful

docker cli测试login,pull,push

然后我们来测试下使用 docker cli 来进行 pull/push 镜像,由于上面我们安装的时候通过 Ingress 来暴露的 Harbor 的服务,而且强制使用了 https,所以如果我们要在终端中使用我们这里的私有仓库的话,就需要配置上相应的证书:

1
2
3
4
$ docker login registry.jaychang.cn
Username: admin
Password:
Error response from daemon: Get https://registry.jaychang.cn/v2/: x509: certificate signed by unknown authority

这是因为我们没有提供证书文件,我们将使用到的ca.crt文件复制到/etc/docker/certs.d/registry.jaychang.cn目录下面,如果该目录不存在,则创建它。ca.crt 这个证书文件我们可以通过 Ingress 中使用的 Secret 资源对象来提供:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
root@host00:/opt/k8s/test/harbor-helm# kubectl get secret harbor-secret -n kube-ops -o yaml
apiVersion: v1
data:
ca.crt: {ca.crt}
tls.crt: {tls.crt}
tls.key: {tls.key}
kind: Secret
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","data":{"ca.crt":"ca的内容"},"kind":"Secret","metadata":{"annotations":{},"name":"harbor-secret","namespace":"kube-ops"},"type":"kubernetes.io/tls"}
creationTimestamp: "2019-07-09T02:49:17Z"
name: harbor-secret
namespace: kube-ops
resourceVersion: "422332"
selfLink: /api/v1/namespaces/kube-ops/secrets/harbor-secret
uid: 24a65b58-a1f4-11e9-adf7-0800276ce4ac
type: kubernetes.io/tls

其中 data 区域中 ca.crt 对应的值就是我们需要证书,不过需要注意还需要做一个 base64 的解码,这样证书配置上以后就可以正常访问了。

将我们自建的ca.crt拷贝到/etc/docker/certs.d/registry.jaychang.cn目录后,再次login果然就可以了

1
2
3
4
5
6
7
8
$ docker login registry.jaychang.cn
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

不过由于上面的方法较为繁琐,所以一般情况下面我们在使用 docker cli 的时候是在 docker 启动参数后面添加一个–insecure-registry参数来忽略证书的校验的,在 docker 启动配置文件docker.service中修改ExecStart的启动参数:

1
ExecStart=/usr/bin/dockerd --insecure-registry registry.jaychang.cn

修改/etc/docker/daemon.json

1
2
3
4

省略...
"insecure-registries": ["registry.jaychang.cn"],
省略...

然后保存重启 docker,再使用 docker cli 就没有任何问题了

下面我们测试下docker push ,我们先pull一个busybox镜像

1
2
3
4
5
6
7
8
$ docker pull busybox
Using default tag: latest


latest: Pulling from library/busybox
8e674ad76dce: Pull complete
Digest: sha256:c94cf1b87ccb80f2e6414ef913c748b105060debda482058d2b8d0fce39f11b9
Status: Downloaded newer image for busybox:latest

然后重命名tag

1
2
3
$ docker  images
REPOSITORY TAG IMAGE ID CREATED SIZE
busybox latest e4db68de4ff2 3 weeks ago 1.22MB

1
2
3
4
5
6
$ docker tag busybox:latest registry.jaychang.cn/library/busybox:latest

$ docker push registry.jaychang.cn/library/busybox:latest
The push refers to repository [registry.jaychang.cn/library/busybox]
6194458b07fc: Pushed
latest: digest: sha256:bf510723d2cd2d4e3f5ce7e93bf1e52c8fd76831995ac3bd3f90ecc866643aff size: 527

我们去harbor portal的library下看看,busybox已经成功上传到harbor了
upload successful

镜像 push 成功,同样可以测试下 pull,为了测试我们可以先删除busybox这个镜像:

1
2
3
4
5
$ docker rmi e4db68de4ff2
Untagged: busybox:latest
Untagged: busybox@sha256:c94cf1b87ccb80f2e6414ef913c748b105060debda482058d2b8d0fce39f11b9
Deleted: sha256:e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b
Deleted: sha256:6194458b07fcf01f1483d96cd6c34302ffff7f382bb151a6d023c4e80ba3050a

pull也成功了

1
2
3
4
5
$ docker pull registry.jaychang.cn/library/busybox:latest
latest: Pulling from library/busybox
8e674ad76dce: Pull complete
Digest: sha256:bf510723d2cd2d4e3f5ce7e93bf1e52c8fd76831995ac3bd3f90ecc866643aff
Status: Downloaded newer image for registry.jaychang.cn/library/busybox:latest

到这里证明上面我们的私有 docker 仓库搭建成功了,大家可以尝试去创建一个私有的项目,然后创建一个新的用户,使用这个用户来进行 pull/push 镜像,Harbor 还具有其他的一些功能,比如镜像复制,大家可以自行测试,感受下 Harbor 和官方自带的 registry 仓库的差别。

查看harbor-secret以及自动创建的pv,pvc

查看kubernetes dashboard可以看到我们创建的harbor-secret
upload successful

可以看到由StorageClass自动创建的pv,pvc
upload successful

Q&A

如何使用PV,PVC来搞定数据持久存储

在nfs服务器上创建相应目录

1
2
3
4
5
mkdir -p /var/nfs/k8s/harbor/registry
mkdir -p /var/nfs/k8s/harbor/chartmuseum
mkdir -p /var/nfs/k8s/harbor/jobservice
mkdir -p /var/nfs/k8s/harbor/database
mkdir -p /var/nfs/k8s/harbor/redis

只需要将上面步骤中的创建动态存储StorageClass的步骤,改为使用harbor-pvc.yaml来创建pv,pvc

harbor-pvc.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-registry-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 10.1.80.72
path: /var/nfs/k8s/harbor/registry

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry-pvc
namespace: kube-ops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-chartmuseum-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 10.1.80.72
path: /var/nfs/k8s/harbor/chartmuseum

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-chartmuseum-pvc
namespace: kube-ops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-jobservice-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 10.1.80.72
path: /var/nfs/k8s/harbor/jobservice

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-jobservice-pvc
namespace: kube-ops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-database-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 10.1.80.72
path: /var/nfs/k8s/harbor/database

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-database-pvc
namespace: kube-ops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-redis-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 10.1.80.72
path: /var/nfs/k8s/harbor/redis

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-redis-pvc
namespace: kube-ops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

创建pv,pvc

1
$ kubectl apply -f harbor-pvc.yaml

并修改harbor-values.yaml,指定各个persistence.persistentVolumeClaim

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
expose:
type: ingress
tls:
enabled: true
secretName: "harbor-secret"
ingress:
hosts:
core: registry.jaychang.cn
notary: notary.jaychang.cn
annotations:
kubernetes.io/ingress.class: "nginx"
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"

externalURL: https://registry.jaychang.cn

persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
existingClaim: "harbor-registry-pvc"
chartmuseum:
existingClaim: "harbor-chartmuseum-pvc"
jobservice:
existingClaim: "harbor-jobservice-pvc"
database:
existingClaim: "harbor-database-pvc"
redis:
existingClaim: "harbor-redis-pvc"

安装harbor到kubernetes集群中

1
$ helm install . --name harbor --namespace kube-ops -f harbor-values.yaml

如何从kubernetes集群中卸载harbor

1
2
3
4
5
6
7
$ helm delete harbor --purge
These resources were kept due to the resource policy:
[PersistentVolumeClaim] harbor-harbor-chartmuseum
[PersistentVolumeClaim] harbor-harbor-jobservice
[PersistentVolumeClaim] harbor-harbor-registry

release "harbor" deleted

如果想保留数据的话,下面的步骤就不要执行了

删除StorageClass

1
2
$ kubectl delete -f harbor-data-sc.yaml
storageclass.storage.k8s.io "harbor-data" deleted

删除ServiceAccount,删除nfs-provisioner

1
2
3
4
5
6
7
$ kubectl delete -f nfs-client-sa.yaml
serviceaccount "nfs-client-provisioner" deleted
clusterrole.rbac.authorization.k8s.io "nfs-client-provisioner-runner" deleted
clusterrolebinding.rbac.authorization.k8s.io "run-nfs-client-provisioner" deleted

$ kubectl delete -f nfs-client.yaml
deployment.extensions "nfs-client-provisioner" deleted

kubernetes dashboard上的持久化存储卷,持久化存储卷声明里删除自动创建的pv,pvc

总结

本文介绍了如何使用helm将harbor的chart安装包下载到本地,然后根据values.yaml可配置项,用自己的自定义配置覆盖values.yaml的默认配置项,首先是介绍了如何使用StorageClass来作为数据持久化存储,在QA中又介绍了如何使用pv,pvc来作为数据持久化存储(实际使用中感觉还是pv,pvc作为数据持久化存储比较好),以及介绍了如何从kubernetes集群中卸载harbor

参考

https://www.qikqiak.com/post/harbor-quick-install/

https://github.com/goharbor/harbor-helm