forked from goharbor/harbor-helm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
1060 lines (1031 loc) · 37.9 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
expose:
# Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
# and fill the information in the corresponding section
type: ingress
tls:
# Enable TLS or not.
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
# Note: if the "expose.type" is "ingress" and TLS is disabled,
# the port must be included in the command when pulling/pushing images.
# Refer to https://github.com/goharbor/harbor/issues/5291 for details.
enabled: true
# The source of the tls certificate. Set as "auto", "secret"
# or "none" and fill the information in the corresponding section
# 1) auto: generate the tls certificate automatically
# 2) secret: read the tls certificate from the specified secret.
# The tls certificate can be generated manually or by cert manager
# 3) none: configure no tls certificate for the ingress. If the default
# tls certificate is configured in the ingress controller, choose this option
certSource: auto
auto:
# The common name used to generate the certificate, it's necessary
# when the type isn't "ingress"
commonName: ""
secret:
# The name of secret which contains keys named:
# "tls.crt" - the certificate
# "tls.key" - the private key
secretName: ""
ingress:
hosts:
core: core.harbor.domain
# set to the type of ingress controller if it has specific requirements.
# leave as `default` for most ingress controllers.
# set to `gce` if using the GCE ingress controller
# set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
# set to `alb` if using the ALB ingress controller
# set to `f5-bigip` if using the F5 BIG-IP ingress controller
controller: default
## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
kubeVersionOverride: ""
className: ""
annotations:
# note different ingress controllers may require a different ssl-redirect annotation
# for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
# ingress-specific labels
labels: {}
clusterIP:
# The name of ClusterIP service
name: harbor
# The ip address of the ClusterIP service (leave empty for acquiring dynamic ip)
staticClusterIP: ""
ports:
# The service port Harbor listens on when serving HTTP
httpPort: 80
# The service port Harbor listens on when serving HTTPS
httpsPort: 443
# Annotations on the ClusterIP service
annotations: {}
# ClusterIP-specific labels
labels: {}
nodePort:
# The name of NodePort service
name: harbor
ports:
http:
# The service port Harbor listens on when serving HTTP
port: 80
# The node port Harbor listens on when serving HTTP
nodePort: 30002
https:
# The service port Harbor listens on when serving HTTPS
port: 443
# The node port Harbor listens on when serving HTTPS
nodePort: 30003
# Annotations on the nodePort service
annotations: {}
# nodePort-specific labels
labels: {}
loadBalancer:
# The name of LoadBalancer service
name: harbor
# Set the IP if the LoadBalancer supports assigning IP
IP: ""
ports:
# The service port Harbor listens on when serving HTTP
httpPort: 80
# The service port Harbor listens on when serving HTTPS
httpsPort: 443
# Annotations on the loadBalancer service
annotations: {}
# loadBalancer-specific labels
labels: {}
sourceRanges: []
# The external URL for Harbor core service. It is used to
# 1) populate the docker/helm commands showed on portal
# 2) populate the token service URL returned to docker client
#
# Format: protocol://domain[:port]. Usually:
# 1) if "expose.type" is "ingress", the "domain" should be
# the value of "expose.ingress.hosts.core"
# 2) if "expose.type" is "clusterIP", the "domain" should be
# the value of "expose.clusterIP.name"
# 3) if "expose.type" is "nodePort", the "domain" should be
# the IP address of k8s node
#
# If Harbor is deployed behind the proxy, set it as the URL of proxy
externalURL: https://core.harbor.domain
# The persistence is enabled by default and a default StorageClass
# is needed in the k8s cluster to provision volumes dynamically.
# Specify another StorageClass in the "storageClass" or set "existingClaim"
# if you already have existing persistent volumes to use
#
# For storing images and charts, you can also use "azure", "gcs", "s3",
# "swift" or "oss". Set it in the "imageChartStorage" section
persistence:
enabled: true
# Setting it to "keep" to avoid removing PVCs during a helm delete
# operation. Leaving it empty will delete PVCs after the chart deleted
# (this does not apply for PVCs that are created for internal database
# and redis components, i.e. they are never deleted automatically)
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
# Use the existing PVC which must be created manually before bound,
# and specify the "subPath" if the PVC is shared with other components
existingClaim: ""
# Specify the "storageClass" used to provision the volume. Or the default
# StorageClass will be used (the default).
# Set it to "-" to disable dynamic provisioning
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
annotations: {}
jobservice:
jobLog:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
# If external database is used, the following settings for database will
# be ignored
database:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
# If external Redis is used, the following settings for Redis will
# be ignored
redis:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
trivy:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
annotations: {}
# Define which storage backend is used for registry to store
# images and charts. Refer to
# https://github.com/distribution/distribution/blob/release/2.8/docs/configuration.md#storage
# for the detail.
imageChartStorage:
# Specify whether to disable `redirect` for images and chart storage, for
# backends which not supported it (such as using minio for `s3` storage type), please disable
# it. To disable redirects, simply set `disableredirect` to `true` instead.
# Refer to
# https://github.com/distribution/distribution/blob/release/2.8/docs/configuration.md#redirect
# for the detail.
disableredirect: false
# Specify the "caBundleSecretName" if the storage service uses a self-signed certificate.
# The secret must contain keys named "ca.crt" which will be injected into the trust store
# of registry's containers.
# caBundleSecretName:
# Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift",
# "oss" and fill the information needed in the corresponding section. The type
# must be "filesystem" if you want to use persistent volumes for registry
type: filesystem
filesystem:
rootdirectory: /storage
#maxthreads: 100
azure:
accountname: accountname
accountkey: base64encodedaccountkey
container: containername
#realm: core.windows.net
# To use existing secret, the key must be AZURE_STORAGE_ACCESS_KEY
existingSecret: ""
gcs:
bucket: bucketname
# The base64 encoded json file which contains the key
encodedkey: base64-encoded-json-key-file
#rootdirectory: /gcs/object/name/prefix
#chunksize: "5242880"
# To use existing secret, the key must be GCS_KEY_DATA
existingSecret: ""
useWorkloadIdentity: false
s3:
# Set an existing secret for S3 accesskey and secretkey
# keys in the secret should be REGISTRY_STORAGE_S3_ACCESSKEY and REGISTRY_STORAGE_S3_SECRETKEY for registry
#existingSecret: ""
region: us-west-1
bucket: bucketname
#accesskey: awsaccesskey
#secretkey: awssecretkey
#regionendpoint: http://myobjects.local
#encrypt: false
#keyid: mykeyid
#secure: true
#skipverify: false
#v4auth: true
#chunksize: "5242880"
#rootdirectory: /s3/object/name/prefix
#storageclass: STANDARD
#multipartcopychunksize: "33554432"
#multipartcopymaxconcurrency: 100
#multipartcopythresholdsize: "33554432"
swift:
authurl: https://storage.myprovider.com/v3/auth
username: username
password: password
container: containername
# keys in existing secret must be REGISTRY_STORAGE_SWIFT_PASSWORD, REGISTRY_STORAGE_SWIFT_SECRETKEY, REGISTRY_STORAGE_SWIFT_ACCESSKEY
existingSecret: ""
#region: fr
#tenant: tenantname
#tenantid: tenantid
#domain: domainname
#domainid: domainid
#trustid: trustid
#insecureskipverify: false
#chunksize: 5M
#prefix:
#secretkey: secretkey
#accesskey: accesskey
#authversion: 3
#endpointtype: public
#tempurlcontainerkey: false
#tempurlmethods:
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: regionname
bucket: bucketname
# key in existingSecret must be REGISTRY_STORAGE_OSS_ACCESSKEYSECRET
existingSecret: ""
#endpoint: endpoint
#internal: false
#encrypt: false
#secure: true
#chunksize: 10M
#rootdirectory: rootdirectory
# The initial password of Harbor admin. Change it from portal after launching Harbor
# or give an existing secret for it
# key in secret is given via (default to HARBOR_ADMIN_PASSWORD)
# existingSecretAdminPassword:
existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD
harborAdminPassword: "Harbor12345"
# The internal TLS used for harbor components secure communicating. In order to enable https
# in each component tls cert files need to provided in advance.
internalTLS:
# If internal TLS enabled
enabled: false
# enable strong ssl ciphers (default: false)
strong_ssl_ciphers: false
# There are three ways to provide tls
# 1) "auto" will generate cert automatically
# 2) "manual" need provide cert file manually in following value
# 3) "secret" internal certificates from secret
certSource: "auto"
# The content of trust ca, only available when `certSource` is "manual"
trustCa: ""
# core related cert configuration
core:
# secret name for core's tls certs
secretName: ""
# Content of core's TLS cert file, only available when `certSource` is "manual"
crt: ""
# Content of core's TLS key file, only available when `certSource` is "manual"
key: ""
# jobservice related cert configuration
jobservice:
# secret name for jobservice's tls certs
secretName: ""
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
key: ""
# registry related cert configuration
registry:
# secret name for registry's tls certs
secretName: ""
# Content of registry's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of registry's TLS key file, only available when `certSource` is "manual"
key: ""
# portal related cert configuration
portal:
# secret name for portal's tls certs
secretName: ""
# Content of portal's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of portal's TLS key file, only available when `certSource` is "manual"
key: ""
# trivy related cert configuration
trivy:
# secret name for trivy's tls certs
secretName: ""
# Content of trivy's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of trivy's TLS key file, only available when `certSource` is "manual"
key: ""
ipFamily:
# ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
ipv6:
enabled: true
# ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
ipv4:
enabled: true
imagePullPolicy: IfNotPresent
# Use this set to assign a list of default pullSecrets
imagePullSecrets:
# - name: docker-registry-secret
# - name: internal-registry-secret
# The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate"
# Set it as "Recreate" when "RWM" for volumes isn't supported
updateStrategy:
type: RollingUpdate
# debug, info, warning, error or fatal
logLevel: info
# The name of the secret which contains key named "ca.crt". Setting this enables the
# download link on portal to download the CA certificate when the certificate isn't
# generated automatically
caSecretName: ""
# The secret key used for encryption. Must be a string of 16 chars.
secretKey: "not-a-secure-key"
# If using existingSecretSecretKey, the key must be secretKey
existingSecretSecretKey: ""
# The proxy settings for updating trivy vulnerabilities from the Internet and replicating
# artifacts from/to the registries that cannot be reached directly
proxy:
httpProxy:
httpsProxy:
noProxy: 127.0.0.1,localhost,.local,.internal
components:
- core
- jobservice
- trivy
# Run the migration job via helm hook
enableMigrateHelmHook: false
# The custom ca bundle secret, the secret must contain key named "ca.crt"
# which will be injected into the trust store for core, jobservice, registry, trivy components
# caBundleSecretName: ""
## UAA Authentication Options
# If you're using UAA for authentication behind a self-signed
# certificate you will need to provide the CA Cert.
# Set uaaSecretName below to provide a pre-created secret that
# contains a base64 encoded CA Certificate named `ca.crt`.
# uaaSecretName:
metrics:
enabled: false
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
## Create prometheus serviceMonitor to scrape harbor metrics.
## This requires the monitoring.coreos.com/v1 CRD. Please see
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md
##
serviceMonitor:
enabled: false
additionalLabels: {}
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# Metric relabel configs to apply to samples before ingestion.
metricRelabelings:
[]
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# Relabel configs to apply to samples before ingestion.
relabelings:
[]
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
trace:
enabled: false
# trace provider: jaeger or otel
# jaeger should be 1.26+
provider: jaeger
# set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
sample_rate: 1
# namespace used to differentiate different harbor services
# namespace:
# attributes is a key value dict contains user defined attributes used to initialize trace provider
# attributes:
# application: harbor
jaeger:
# jaeger supports two modes:
# collector mode(uncomment endpoint and uncomment username, password if needed)
# agent mode(uncomment agent_host and agent_port)
endpoint: http://hostname:14268/api/traces
# username:
# password:
# agent_host: hostname
# export trace data by jaeger.thrift in compact mode
# agent_port: 6831
otel:
endpoint: hostname:4318
url_path: /v1/traces
compression: false
insecure: true
# timeout is in seconds
timeout: 10
# cache layer configurations
# if this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which help to improve the performance of high concurrent pulling manifest.
cache:
# default is not enabled.
enabled: false
# default keep cache for one day.
expireHours: 24
## set Container Security Context to comply with PSP restricted policy if necessary
## each of the conatiner will apply the same security context
## containerSecurityContext:{} is initially an empty yaml that you could edit it on demand, we just filled with a common template for convenience
containerSecurityContext:
privileged: false
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
capabilities:
drop:
- ALL
# If service exposed via "ingress", the Nginx will not be used
nginx:
image:
repository: goharbor/nginx-photon
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
portal:
image:
repository: goharbor/harbor-portal
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## Additional service annotations
serviceAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
core:
image:
repository: goharbor/harbor-core
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
## Startup probe values
startupProbe:
enabled: true
initialDelaySeconds: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## Additional service annotations
serviceAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
## User settings configuration json string
configureUserSettings:
# The provider for updating project quota(usage), there are 2 options, redis or db.
# By default it is implemented by db but you can configure it to redis which
# can improve the performance of high concurrent pushing to the same project,
# and reduce the database connections spike and occupies.
# Using redis will bring up some delay for quota usage updation for display, so only
# suggest switch provider to redis if you were ran into the db connections spike around
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
quotaUpdateProvider: db # Or redis
# Secret is used when core server communicates with other components.
# If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret
# Must be a string of 16 chars.
secret: ""
# Fill in the name of a kubernetes secret if you want to use your own
# If using existingSecret, the key must be secret
existingSecret: ""
# Fill the name of a kubernetes secret if you want to use your own
# TLS certificate and private key for token encryption/decryption.
# The secret must contain keys named:
# "tls.key" - the private key
# "tls.crt" - the certificate
secretName: ""
# If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead.
# If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated.
# tokenKey and tokenCert must BOTH be set or BOTH unset.
# The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line.
tokenKey: |
# If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line.
tokenCert: |
# The XSRF key. Will be generated automatically if it isn't specified
# While you specified, Please make sure it is 32 characters, otherwise would have validation issue at the harbor-core runtime
# https://github.com/goharbor/harbor/pull/21154
xsrfKey: ""
# If using existingSecret, the key is defined by core.existingXsrfSecretKey
existingXsrfSecret: ""
# If using existingSecret, the key
existingXsrfSecretKey: CSRF_KEY
# The time duration for async update artifact pull_time and repository
# pull_count, the unit is second. Will be 10 seconds if it isn't set.
# eg. artifactPullAsyncFlushDuration: 10
artifactPullAsyncFlushDuration:
gdpr:
deleteUser: false
auditLogsCompliant: false
jobservice:
image:
repository: goharbor/harbor-jobservice
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
maxJobWorkers: 10
# The logger for jobs: "file", "database" or "stdout"
jobLoggers:
- file
# - database
# - stdout
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
loggerSweeperDuration: 14 #days
notification:
webhook_job_max_retry: 3
webhook_job_http_client_timeout: 3 # in seconds
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168
# Secret is used when job service communicates with other components.
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# Use an existing secret resource
existingSecret: ""
# Key within the existing secret for the job service secret
existingSecretKey: JOBSERVICE_SECRET
registry:
registry:
image:
repository: goharbor/registry-photon
tag: dev
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
controller:
image:
repository: goharbor/harbor-registryctl
tag: dev
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# Secret is used to secure the upload state from client
# and registry storage backend.
# See: https://github.com/distribution/distribution/blob/release/2.8/docs/configuration.md#http
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# Use an existing secret resource
existingSecret: ""
# Key within the existing secret for the registry service secret
existingSecretKey: REGISTRY_HTTP_SECRET
# If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
relativeurls: false
credentials:
username: "harbor_registry_user"
password: "harbor_registry_password"
# If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD
existingSecret: ""
# Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
# htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
htpasswdString: ""
middleware:
enabled: false
type: cloudFront
cloudFront:
baseurl: example.cloudfront.net
keypairid: KEYPAIRID
duration: 3000s
ipfilteredby: none
# The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
# that allows access to CloudFront
privateKeySecret: "my-secret"
# enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
trivy:
# enabled the flag to enable Trivy scanner
enabled: true
image:
# repository the repository for Trivy adapter image
repository: goharbor/trivy-adapter-photon
# tag the tag for Trivy adapter image
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# replicas the number of Pod replicas
replicas: 1
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1
memory: 1Gi
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
debugMode: false
# vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
vulnType: "os,library"
# severity a comma-separated list of severities to be checked
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
# ignoreUnfixed the flag to display only fixed vulnerabilities
ignoreUnfixed: false
# insecure the flag to skip verifying registry certificate
insecure: false
# gitHubToken the GitHub access token to download Trivy DB
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
# timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
# Currently, the database is updated every 12 hours and published as a new release to GitHub.
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://developer.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
gitHubToken: ""
# skipUpdate the flag to disable Trivy DB downloads from GitHub
#
# You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
# `/home/scanner/.cache/trivy/db/trivy.db` path.
skipUpdate: false
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
#
skipJavaDBUpdate: false
# The offlineScan option prevents Trivy from sending API requests to identify dependencies.
#
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
offlineScan: false
# Comma-separated list of what security issues to detect. Defaults to `vuln`.
securityCheck: "vuln"
# The duration to wait for scan completion
timeout: 5m0s
database:
# if external database is used, set "type" to "external"
# and fill the connection information in "external" section
type: internal
internal:
image:
repository: goharbor/harbor-db
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
# The timeout used in livenessProbe; 1 to 5 seconds
livenessProbe:
timeoutSeconds: 1
# The timeout used in readinessProbe; 1 to 5 seconds
readinessProbe:
timeoutSeconds: 1
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
extrInitContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# The initial superuser password for internal database
password: "changeit"
# The size limit for Shared memory, pgSQL use it for shared_buffer
# More details see:
# https://github.com/goharbor/harbor/issues/15034
shmSizeLimit: 512Mi
initContainer:
migrator: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
permissions: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
external:
host: "192.168.0.1"
port: "5432"
username: "user"
password: "password"
coreDatabase: "registry"
# if using existing secret, the key must be "password"
existingSecret: ""
# "disable" - No SSL
# "require" - Always SSL (skip verification)
# "verify-ca" - Always SSL (verify that the certificate presented by the
# server was signed by a trusted CA)
# "verify-full" - Always SSL (verify that the certification presented by the
# server was signed by a trusted CA and the server host name matches the one
# in the certificate)
sslmode: "disable"
# The maximum number of connections in the idle connection pool per pod (core+exporter).
# If it <=0, no idle connections are retained.
maxIdleConns: 100
# The maximum number of open connections to the database per pod (core+exporter).
# If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for harbor's postgres.
maxOpenConns: 900
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
redis:
# if external Redis is used, set "type" to "external"
# and fill the connection information in "external" section
type: internal
internal:
image:
repository: goharbor/redis-photon
tag: dev
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# # jobserviceDatabaseIndex defaults to "1"
# # registryDatabaseIndex defaults to "2"
# # trivyAdapterIndex defaults to "5"
# # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
# # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"