Skip to content

Commit f8bbb58

Browse files
committed
Merge branch 'binlog-config' of https://github.com/weekface/tidb-operator into binlog-config
2 parents 9531a77 + 051876c commit f8bbb58

12 files changed

+289
-3
lines changed

charts/tidb-backup/templates/backup-job.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@ spec:
2626
- name: backup
2727
image: {{ .Values.image.backup }}
2828
imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
29+
{{- if .Values.resources }}
30+
resources:
31+
{{ toYaml .Values.resources | indent 10 }}
32+
{{- end }}
2933
command:
3034
- /bin/sh
3135
- -c

charts/tidb-backup/values.yaml

+8
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,14 @@ extraLabels: {}
2323
# kubectl create secret generic backup-secret --namespace=<namespace> --from-literal=user=root --from-literal=password=<password>
2424
secretName: backup-secret
2525

26+
resources:
27+
limits:
28+
cpu: 4000m
29+
memory: 8Gi
30+
requests:
31+
cpu: 2000m
32+
memory: 4Gi
33+
2634
storage:
2735
className: local-storage
2836
size: 100Gi

charts/tidb-drainer/.helmignore

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# Patterns to ignore when building packages.
2+
# This supports shell glob matching, relative path matching, and
3+
# negation (prefixed with !). Only one pattern per line.
4+
.DS_Store
5+
# Common VCS dirs
6+
.git/
7+
.gitignore
8+
.bzr/
9+
.bzrignore
10+
.hg/
11+
.hgignore
12+
.svn/
13+
# Common backup files
14+
*.swp
15+
*.bak
16+
*.tmp
17+
*~
18+
# Various IDEs
19+
.project
20+
.idea/
21+
*.tmproj

charts/tidb-drainer/Chart.yaml

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
apiVersion: v1
2+
description: A Helm chart for TiDB Binlog drainer.
3+
name: tidb-drainer
4+
version: dev
5+
home: https://github.com/pingcap/tidb-operator
6+
sources:
7+
- https://github.com/pingcap/tidb-operator
8+
keywords:
9+
- newsql
10+
- htap
11+
- database
12+
- mysql
13+
- cdc
+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
StatefulSet {{ include "drainer.name" . }} installed.
2+
3+
1. Watch if the drainer instance gets created:
4+
watch kubectl get pod -n {{ .Release.Namespace }} {{ include "drainer.name" . }}-0
5+
2. Check if the drainer instance works properly:
6+
kubectl logs -f -n {{ .Release.Namespace }} {{ include "drainer.name" . }}-0
+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{{- define "drainer.name" -}}
2+
{{ .Values.clusterName }}-{{ .Release.Name }}-drainer
3+
{{- end -}}
4+
5+
{{/*
6+
Encapsulate config data for consistent digest calculation
7+
*/}}
8+
{{- define "drainer-configmap.data" -}}
9+
config-file: |-
10+
{{- if .Values.config }}
11+
{{ .Values.config | indent 2 }}
12+
{{- end -}}
13+
{{- end -}}
14+
15+
{{- define "drainer-configmap.name" -}}
16+
{{ include "drainer.name" . }}-{{ include "drainer-configmap.data" . | sha256sum | trunc 8 }}
17+
{{- end -}}
18+
19+
{{- define "helm-toolkit.utils.template" -}}
20+
{{- $name := index . 0 -}}
21+
{{- $context := index . 1 -}}
22+
{{- $last := base $context.Template.Name }}
23+
{{- $wtf := $context.Template.Name | replace $last $name -}}
24+
{{ include $wtf $context }}
25+
{{- end -}}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
apiVersion: v1
2+
kind: ConfigMap
3+
metadata:
4+
name: {{ include "drainer-configmap.name" . }}
5+
labels:
6+
app.kubernetes.io/name: {{ include "drainer-configmap.name" . }}
7+
app.kubernetes.io/managed-by: {{ .Release.Service }}
8+
app.kubernetes.io/instance: {{ .Release.Name }}
9+
app.kubernetes.io/component: drainer
10+
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
11+
data:
12+
{{ include "drainer-configmap.data" . | indent 2 }}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
apiVersion: v1
2+
kind: Service
3+
metadata:
4+
name: {{ include "drainer.name" . }}
5+
labels:
6+
app.kubernetes.io/name: {{ include "drainer.name" . }}
7+
app.kubernetes.io/managed-by: {{ .Release.Service }}
8+
app.kubernetes.io/instance: {{ .Release.Name }}
9+
app.kubernetes.io/component: drainer
10+
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
11+
spec:
12+
clusterIP: None
13+
ports:
14+
- name: drainer
15+
port: 8249
16+
selector:
17+
app.kubernetes.io/name: {{ include "drainer.name" . }}
18+
app.kubernetes.io/instance: {{ .Release.Name }}
19+
app.kubernetes.io/component: drainer
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
apiVersion: apps/v1
2+
kind: StatefulSet
3+
metadata:
4+
name: {{ include "drainer.name" . }}
5+
labels:
6+
app.kubernetes.io/name: {{ include "drainer.name" . }}
7+
app.kubernetes.io/managed-by: {{ .Release.Service }}
8+
app.kubernetes.io/instance: {{ .Release.Name }}
9+
app.kubernetes.io/component: drainer
10+
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
11+
spec:
12+
selector:
13+
matchLabels:
14+
app.kubernetes.io/name: {{ include "drainer.name" . }}
15+
app.kubernetes.io/instance: {{ .Release.Name }}
16+
app.kubernetes.io/managed-by: {{ .Release.Service }}
17+
app.kubernetes.io/component: drainer
18+
serviceName: {{ include "drainer.name" . }}
19+
replicas: 1
20+
template:
21+
metadata:
22+
annotations:
23+
prometheus.io/scrape: "true"
24+
prometheus.io/path: "/metrics"
25+
prometheus.io/port: "8249"
26+
labels:
27+
app.kubernetes.io/name: {{ include "drainer.name" . }}
28+
app.kubernetes.io/instance: {{ .Release.Name }}
29+
app.kubernetes.io/managed-by: {{ .Release.Service }}
30+
app.kubernetes.io/component: drainer
31+
spec:
32+
containers:
33+
- name: drainer
34+
image: {{ .Values.baseImage }}:{{ .Values.clusterVersion }}
35+
imagePullPolicy: {{ .Values.imagePullPolicy | default "IfNotPresent" }}
36+
command:
37+
- /bin/sh
38+
- -c
39+
- |-
40+
{{ tuple "scripts/_start_drainer.sh.tpl" . | include "helm-toolkit.utils.template" | indent 10 }}
41+
ports:
42+
- containerPort: 8249
43+
name: drainer
44+
volumeMounts:
45+
- name: data
46+
mountPath: /data
47+
- name: config
48+
mountPath: /etc/drainer
49+
resources:
50+
{{ toYaml .Values.resources | indent 10 }}
51+
volumes:
52+
- name: config
53+
configMap:
54+
name: {{ include "drainer-configmap.name" . }}
55+
items:
56+
- key: config-file
57+
path: drainer.toml
58+
{{- with .Values.nodeSelector }}
59+
nodeSelector:
60+
{{ toYaml . | indent 8 }}
61+
{{- end }}
62+
{{- with .Values.affinity }}
63+
affinity:
64+
{{ toYaml . | indent 8 }}
65+
{{- end }}
66+
{{- with .Values.tolerations }}
67+
tolerations:
68+
{{ toYaml . | indent 8 }}
69+
{{- end }}
70+
volumeClaimTemplates:
71+
- metadata:
72+
name: data
73+
spec:
74+
accessModes: [ "ReadWriteOnce" ]
75+
storageClassName: {{ .Values.storageClassName }}
76+
resources:
77+
requests:
78+
storage: {{ .Values.storage }}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
set -euo pipefail
2+
3+
domain=`echo ${HOSTNAME}`.{{ include "drainer.name" . }}
4+
5+
elapseTime=0
6+
period=1
7+
threshold=30
8+
while true; do
9+
sleep ${period}
10+
elapseTime=$(( elapseTime+period ))
11+
12+
if [[ ${elapseTime} -ge ${threshold} ]]
13+
then
14+
echo "waiting for drainer domain ready timeout" >&2
15+
exit 1
16+
fi
17+
18+
if nslookup ${domain} 2>/dev/null
19+
then
20+
echo "nslookup domain ${domain} success"
21+
break
22+
else
23+
echo "nslookup domain ${domain} failed" >&2
24+
fi
25+
done
26+
27+
/drainer \
28+
-L={{ .Values.logLevel | default "info" }} \
29+
-pd-urls=http://{{ .Values.clusterName }}-pd:2379 \
30+
-addr=`echo ${HOSTNAME}`.{{ include "drainer.name" . }}:8249 \
31+
-config=/etc/drainer/drainer.toml \
32+
-disable-detect={{ .Values.disableDetect | default false }} \
33+
-initial-commit-ts={{ .Values.initialCommitTs | default 0 }} \
34+
-log-file=""

charts/tidb-drainer/values.yaml

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Default values for tidb-drainer.
2+
# This is a YAML-formatted file.
3+
# Declare variables to be passed into your templates
4+
5+
# clusterName is the TiDB cluster name that should backup from or restore to.
6+
clusterName: demo
7+
clusterVersion: v3.0.1
8+
9+
baseImage: pingcap/tidb-binlog
10+
imagePullPolicy: IfNotPresent
11+
12+
logLevel: info
13+
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
14+
# different classes might map to quality-of-service levels, or to backup policies,
15+
# or to arbitrary policies determined by the cluster administrators.
16+
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
17+
storageClassName: local-storage
18+
storage: 10Gi
19+
# disbale detect causality
20+
disableDetect: false
21+
# if drainer donesn't have checkpoint, use initial commitTS to initial checkpoint
22+
initialCommitTs: 0
23+
24+
# Refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/drainer/drainer.toml
25+
config: |
26+
[syncer]
27+
worker-count = 16
28+
detect-interval = 10
29+
disable-dispatch = false
30+
ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
31+
safe-mode = false
32+
txn-batch = 20
33+
db-type = "pb"
34+
[syncer.to]
35+
dir = "/data/pb"
36+
compression = "gzip"
37+
38+
resources: {}
39+
# We usually recommend not to specify default resources and to leave this as a conscious
40+
# choice for the user. This also increases chances charts run on environments with little
41+
# resources, such as Minikube. If you do want to specify resources, uncomment the following
42+
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
43+
# limits:
44+
# cpu: 100m
45+
# memory: 128Mi
46+
# requests:
47+
# cpu: 100m
48+
nodeSelector: {}
49+
50+
tolerations: []
51+
52+
affinity: {}

tests/actions.go

+17-3
Original file line numberDiff line numberDiff line change
@@ -2359,13 +2359,22 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
23592359
return false, nil
23602360
}
23612361

2362+
// v1.0.0 don't have affinity test case
2363+
// https://github.com/pingcap/tidb-operator/pull/746
2364+
isv1 := info.OperatorTag == "v1.0.0"
2365+
23622366
for _, pod := range pods.Items {
23632367
if !oa.pumpHealth(info, pod.Spec.Hostname) {
23642368
glog.Errorf("some pods is not health %s", pumpStatefulSetName)
23652369
// return false, nil
23662370
}
2371+
2372+
if isv1 {
2373+
continue
2374+
}
2375+
23672376
glog.Info(pod.Spec.Affinity)
2368-
if len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
2377+
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
23692378
return true, fmt.Errorf("pump pod %s/%s should have affinity set", pod.Namespace, pod.Name)
23702379
}
23712380
glog.Info(pod.Spec.Tolerations)
@@ -2415,8 +2424,13 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
24152424
glog.Errorf("some pods is not health %s", drainerStatefulSetName)
24162425
// return false, nil
24172426
}
2427+
2428+
if isv1 {
2429+
continue
2430+
}
2431+
24182432
glog.Info(pod.Spec.Affinity)
2419-
if len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
2433+
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
24202434
return true, fmt.Errorf("drainer pod %s/%s should have spec.affinity set", pod.Namespace, pod.Name)
24212435
}
24222436
glog.Info(pod.Spec.Tolerations)
@@ -2705,7 +2719,7 @@ func (oa *operatorActions) CheckManualPauseTiDB(info *TidbClusterConfig) error {
27052719
}
27062720

27072721
// wait for the tidb statefulset is upgrade to the protect one
2708-
if err = wait.Poll(DefaultPollInterval, DefaultPollTimeout, fn); err != nil {
2722+
if err = wait.Poll(DefaultPollInterval, 30*time.Minute, fn); err != nil {
27092723
return fmt.Errorf("fail to upgrade to annotation TiDB pod : %v", err)
27102724
}
27112725

0 commit comments

Comments
 (0)