Skip to content

Commit 913310d

Browse files
Ben Howardopenshift-merge-robot
Ben Howard
authored andcommittedNov 17, 2020
Gangplank/ocp: use per-function kubernetes client
The use of a package level client for Kuberntes restricts running Gangplank in in a cluster. To make testing easer, func `k8sClient()` has been renamed to `k8sInClusterClient()`, which is called by code that requires an incluster client. The goal of this refactor is allow for: - mocking the Kube API's for testing - make testing of a buildapv1.Build object easier - allow for `cosa remote` to directly build worker pods without going through and intermediary pod - reduce the package scope variable use Signed-off-by: Ben Howard <[email protected]>
1 parent e2864c4 commit 913310d

File tree

8 files changed

+194
-135
lines changed

8 files changed

+194
-135
lines changed
 

‎gangplank/cmd/ci.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ func init() {
3232
// useful.
3333
func runCI(c *cobra.Command, args []string) {
3434
defer cancel()
35-
m, err := ocp.NewCIBuilder(ctx, cosaOverrideImage, serviceAccount, specFile)
35+
m, err := ocp.NewCIBuilder(ctx, true, cosaOverrideImage, serviceAccount, specFile)
3636
if err != nil {
3737
log.Fatalf("failed to define CI builder: %v", err)
3838
}

‎gangplank/ocp/bc.go

+18-14
Original file line numberDiff line numberDiff line change
@@ -74,13 +74,8 @@ func newBC() (*buildConfig, error) {
7474
}
7575
}
7676

77-
// Init the API Client for k8s itself
78-
// The API client requires that the pod/buildconfig include a service account.
79-
k8sAPIErr := k8sAPIClient()
80-
if k8sAPIErr != nil && k8sAPIErr != ErrNotInCluster {
81-
log.Errorf("Failed to initalized Kubernetes in cluster API client: %v", k8sAPIErr)
82-
return nil, k8sAPIErr
83-
}
77+
// Open the Kubernetes Client
78+
ac, pn, kubeErr := k8sInClusterClient()
8479

8580
// Init the OpenShift Build API Client.
8681
buildAPIErr := ocpBuildClient()
@@ -89,6 +84,10 @@ func newBC() (*buildConfig, error) {
8984
return nil, buildAPIErr
9085
}
9186

87+
if kubeErr != nil && buildAPIErr != nil {
88+
return nil, ErrInvalidOCPMode
89+
}
90+
9291
// Query Kubernetes to find out what this pods network identity is.
9392
// TODO: remove this CI exception once we have a kubernetes mock
9493
if !forceNotInCluster {
@@ -102,7 +101,7 @@ func newBC() (*buildConfig, error) {
102101
v.HostIP = apiBuild.Annotations[fmt.Sprintf(ciAnnotation, "IP")]
103102
} else {
104103
log.Info("Querying for pod ID")
105-
hIP, err := getPodIP(v.HostPod)
104+
hIP, err := getPodIP(ac, pn, v.HostPod)
106105
if err != nil {
107106
log.Errorf("Failed to determine buildconfig's pod")
108107
}
@@ -117,11 +116,6 @@ func newBC() (*buildConfig, error) {
117116
}).Info("found build.openshift.io/buildconfig identity")
118117
}
119118

120-
// Build requires either a Build API Client or Kuberneres Cluster client.
121-
if k8sAPIErr != nil && buildAPIErr != nil {
122-
return nil, ErrInvalidOCPMode
123-
}
124-
125119
if _, err := os.Stat(cosaSrvDir); os.IsNotExist(err) {
126120
return nil, fmt.Errorf("Context dir %q does not exist", cosaSrvDir)
127121
}
@@ -158,6 +152,11 @@ func (bc *buildConfig) Exec(ctx context.Context) error {
158152
return err
159153
}
160154

155+
ac, pn, err := k8sInClusterClient()
156+
if err != nil {
157+
return fmt.Errorf("failed create a kubernetes client: %w", err)
158+
}
159+
161160
// Define, but do not start minio.
162161
m := newMinioServer()
163162
m.dir = cosaSrvDir
@@ -297,7 +296,12 @@ binary build interface.`)
297296
}
298297

299298
index := n + 1
300-
if err := createWorkerPod(ctx, index, eVars); err != nil {
299+
cpod, err := NewCosaPodder(ctx, apiBuild, ac, pn, index)
300+
if err != nil {
301+
log.Errorf("FAILED TO CREATE POD DEFINITION: %v", err)
302+
continue
303+
}
304+
if err := cpod.WorkerRunner(eVars); err != nil {
301305
log.Errorf("FAILED stage: %v", err)
302306
}
303307
}

‎gangplank/ocp/bc_ci_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ func init() {
1919

2020
func TestNoEnv(t *testing.T) {
2121
if _, err := newBC(); err != ErrInvalidOCPMode {
22-
t.Errorf("failed to raise: %v", ErrInvalidOCPMode)
22+
t.Errorf("failed to raise error\n want: %v\n got: %v", ErrInvalidOCPMode, err)
2323
}
2424
}
2525

‎gangplank/ocp/ci.go

+78-53
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,18 @@ import (
3131
*/
3232

3333
type ci struct {
34-
jobSpecFile string
35-
bc *buildConfig
36-
ciBuild string
34+
apibuild *buildapiv1.Build
35+
bc *buildConfig
36+
js *spec.JobSpec
37+
38+
pod *v1.Pod
39+
40+
hostname string
41+
image string
42+
ipaddr string
43+
jobSpecFile string
44+
projectNamespace string
45+
serviceAccount string
3746
}
3847

3948
// CIBuilder is the manual/unbounded Build interface.
@@ -57,16 +66,14 @@ const (
5766

5867
func (c *ci) Exec(ctx context.Context) error {
5968
log.Info("Executing unbounded builder")
69+
if c.bc == nil {
70+
return errors.New("buildconfig is nil")
71+
}
6072
return c.bc.Exec(ctx)
6173
}
6274

6375
// NewCIBuilder returns a CIBuilder ready for execution.
64-
func NewCIBuilder(ctx context.Context, image, serviceAccount, jsF string) (CIBuilder, error) {
65-
// Require a Kubernetes Service Account Client
66-
if err := k8sAPIClient(); err != nil {
67-
return nil, fmt.Errorf("failed create a kubernetes client: %w", err)
68-
}
69-
76+
func NewCIBuilder(ctx context.Context, inCluster bool, image, serviceAccount, jsF string) (CIBuilder, error) {
7077
// Directly inject the jobspec
7178
js, err := spec.JobSpecFromFile(jsF)
7279
if err != nil {
@@ -75,56 +82,71 @@ func NewCIBuilder(ctx context.Context, image, serviceAccount, jsF string) (CIBui
7582
if js.Recipe.GitURL == "" {
7683
return nil, errors.New("JobSpec does inclue a Git Recipe")
7784
}
78-
os.Setenv("SOURCE_REF", js.Recipe.GitRef)
79-
os.Setenv("SOURCE_URI", js.Recipe.GitURL)
8085

81-
log.WithFields(log.Fields{
82-
"override image": image,
83-
"source ref": js.Recipe.GitRef,
84-
"source uri": js.Recipe.GitURL,
85-
}).Info("jobspec defined source")
86+
c := &ci{
87+
js: &js,
88+
jobSpecFile: jsF,
89+
serviceAccount: serviceAccount,
90+
image: image,
91+
}
8692

87-
// Get the ci API
88-
ciBuild, err := ciAPIBuild(&js, image, serviceAccount)
93+
// TODO: implement out-of-cluster
94+
if err := c.setInCluster(); err != nil {
95+
return nil, fmt.Errorf("failed setting incluster options: %v", err)
96+
}
97+
98+
// Generate the build.openshift.io/v1 object
99+
if err := c.generateAPIBuild(); err != nil {
100+
return nil, fmt.Errorf("failed to generate api build: %v", err)
101+
}
102+
ciBuild, err := c.encodeAPIBuild()
89103
if err != nil {
90-
return nil, fmt.Errorf("failed to create a ci API build object")
104+
return nil, fmt.Errorf("failed to encode apibuild: %v", err)
91105
}
92-
os.Setenv("BUILD", ciBuild)
93106

94107
// Create the buildConfig object
108+
os.Setenv("BUILD", ciBuild)
109+
os.Setenv("SOURCE_REF", js.Recipe.GitRef)
110+
os.Setenv("SOURCE_URI", js.Recipe.GitURL)
95111
bc, err := newBC()
96112
if err != nil {
97113
return nil, err
98114
}
99115
bc.JobSpec = js
100116
bc.JobSpecFile = jsF
117+
c.bc = bc
101118

102-
c := &ci{
103-
jobSpecFile: jsF,
104-
bc: bc,
105-
ciBuild: ciBuild,
106-
}
107119
return c, nil
108120
}
109121

110-
func ciAPIBuild(js *spec.JobSpec, image, serviceAccount string) (string, error) {
122+
func (c *ci) setInCluster() error {
111123
// Dig deep and query find out what Kubernetes thinks this pod
112124
// Discover where this running
113125
hostname, ok := os.LookupEnv("HOSTNAME")
114126
if !ok {
115-
return "", errors.New("Unable to find hostname")
127+
return errors.New("Unable to find hostname")
128+
}
129+
c.hostname = hostname
130+
131+
// Open the Kubernetes Client
132+
ac, pn, err := k8sInClusterClient()
133+
if err != nil {
134+
return fmt.Errorf("failed create a kubernetes client: %w", err)
116135
}
136+
c.projectNamespace = pn
117137

118-
myIP, err := getPodIP(hostname)
138+
myIP, err := getPodIP(ac, pn, hostname)
119139
if err != nil {
120-
return "", fmt.Errorf("failed to query my hostname: %w", err)
140+
return fmt.Errorf("failed to query my hostname: %w", err)
121141
}
142+
c.ipaddr = myIP
122143

123144
// Discover where this running
124-
myPod, err := apiClient.Pods(projectNamespace).Get(hostname, metav1.GetOptions{})
145+
myPod, err := ac.CoreV1().Pods(pn).Get(hostname, metav1.GetOptions{})
125146
if err != nil {
126-
return "", err
147+
return err
127148
}
149+
c.pod = myPod
128150

129151
// find the running pod this is running on.
130152
var myContainer *v1.Container = nil
@@ -139,24 +161,19 @@ func ciAPIBuild(js *spec.JobSpec, image, serviceAccount string) (string, error)
139161
}
140162

141163
// allow both the service account and the image to be overriden
142-
if serviceAccount == "" {
143-
serviceAccount = myPod.Spec.ServiceAccountName
164+
if c.serviceAccount == "" {
165+
c.serviceAccount = myPod.Spec.ServiceAccountName
144166
}
145-
if image == "" {
146-
image = myContainer.Image
167+
if c.image == "" {
168+
c.image = myContainer.Image
147169
}
148-
if serviceAccount == "" || image == "" {
149-
return "", errors.New("serviceAccount and image must be defined by running pod or via overrides")
170+
if c.serviceAccount == "" || c.image == "" {
171+
return errors.New("serviceAccount and image must be defined by running pod or via overrides")
150172
}
173+
return nil
174+
}
151175

152-
l := log.WithFields(log.Fields{
153-
"ip": myIP,
154-
"image": image,
155-
"serviceAccount": serviceAccount,
156-
"host": myContainer.Name,
157-
})
158-
l.Info("identified pod")
159-
176+
func (c *ci) generateAPIBuild() error {
160177
// Create just _enough_ of the OpenShift BuildConfig spec
161178
// Create a "ci" build.openshift.io/v1 specification.
162179
ciBuildNumber := time.Now().Format("20060102150405")
@@ -167,7 +184,7 @@ func ciAPIBuild(js *spec.JobSpec, image, serviceAccount string) (string, error)
167184
// ciRunnerTag is tested for to determine if this is
168185
// a buildconfig or a faked one
169186
ciRunnerTag: "true",
170-
fmt.Sprintf(ciAnnotation, "IP"): myIP,
187+
fmt.Sprintf(ciAnnotation, "IP"): c.ipaddr,
171188
// Required Labels
172189
buildapiv1.BuildConfigAnnotation: "ci-cosa-bc",
173190
buildapiv1.BuildNumberAnnotation: ciBuildNumber,
@@ -180,25 +197,33 @@ func ciAPIBuild(js *spec.JobSpec, image, serviceAccount string) (string, error)
180197

181198
// Populate the Spec
182199
a.Spec = buildapiv1.BuildSpec{}
183-
a.Spec.ServiceAccount = myPod.Spec.ServiceAccountName
200+
a.Spec.ServiceAccount = c.serviceAccount
184201
a.Spec.Strategy = buildapiv1.BuildStrategy{}
185202
a.Spec.Strategy.CustomStrategy = new(buildapiv1.CustomBuildStrategy)
186203
a.Spec.Strategy.CustomStrategy.From = corev1.ObjectReference{
187-
Name: image,
204+
Name: c.image,
188205
}
189206
a.Spec.Source = buildapiv1.BuildSource{
190207
ContextDir: cosaSrvDir,
191208
Git: &buildapiv1.GitBuildSource{
192-
Ref: js.Recipe.GitRef,
193-
URI: js.Recipe.GitURL,
209+
Ref: c.js.Recipe.GitRef,
210+
URI: c.js.Recipe.GitURL,
194211
},
195212
}
196213

197-
// Render the ci buildapiv1 object to a JSON object.
198-
// JSON is the messaginging interface for Kubernetes.
214+
c.apibuild = &a
215+
return nil
216+
}
217+
218+
// encodeAPIBuilder the ci buildapiv1 object to a JSON object.
219+
// JSON is the messaginging interface for Kubernetes.
220+
func (c *ci) encodeAPIBuild() (string, error) {
221+
if c.apibuild == nil {
222+
return "", errors.New("apibuild is not defined yet")
223+
}
199224
aW := bytes.NewBuffer([]byte{})
200225
s := json.NewYAMLSerializer(json.DefaultMetaFactory, buildScheme, buildScheme)
201-
if err := s.Encode(&a, aW); err != nil {
226+
if err := s.Encode(c.apibuild, aW); err != nil {
202227
return "", err
203228
}
204229
d, err := ioutil.ReadAll(aW)

‎gangplank/ocp/cosa-pod.go

+76-34
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2323
"k8s.io/apimachinery/pkg/fields"
2424
"k8s.io/apimachinery/pkg/labels"
25+
"k8s.io/client-go/kubernetes"
2526
)
2627

2728
const (
@@ -89,48 +90,88 @@ var (
8990
}
9091
)
9192

92-
// setPodDefaults checks the Kubernetes version to determine if
93-
// we're on OCP 3.11 (v1.11) or 4.2(v1.15) or later.
94-
// https://docs.openshift.com/container-platform/4.2/release_notes/ocp-4-2-release-notes.html#ocp-4-2-about-this-release
95-
func setPodDefaults() error {
96-
vi, err := apiClientSet.DiscoveryClient.ServerVersion()
93+
// cosaPod is a COSA pod
94+
type cosaPod struct {
95+
apiBuild *buildapiv1.Build
96+
apiClientSet *kubernetes.Clientset
97+
project string
98+
99+
ocpInitCommand []string
100+
ocpRequirements v1.ResourceList
101+
ocpSecContext *v1.SecurityContext
102+
volumes []v1.Volume
103+
volumeMounts []v1.VolumeMount
104+
105+
ctx context.Context
106+
index int
107+
pod *v1.Pod
108+
}
109+
110+
// CosaPodder create COSA capable pods.
111+
type CosaPodder interface {
112+
WorkerRunner(envVar []v1.EnvVar) error
113+
}
114+
115+
// a cosaPod is a CosaPodder
116+
var _ = CosaPodder(&cosaPod{})
117+
118+
// NewCosaPodder creates a CosaPodder
119+
func NewCosaPodder(
120+
ctx context.Context,
121+
apiBuild *buildapiv1.Build,
122+
apiClientSet *kubernetes.Clientset,
123+
project string,
124+
index int) (CosaPodder, error) {
125+
126+
cp := &cosaPod{
127+
apiBuild: apiBuild,
128+
apiClientSet: apiClientSet,
129+
project: project,
130+
ctx: ctx,
131+
index: index,
132+
133+
// Set defaults for OCP4.x
134+
ocpRequirements: ocpRequirements,
135+
ocpSecContext: ocpSecContext,
136+
ocpInitCommand: ocpInitCommand,
137+
138+
volumes: volumes,
139+
volumeMounts: volumeMounts,
140+
}
141+
142+
vi, err := cp.apiClientSet.DiscoveryClient.ServerVersion()
97143
if err != nil {
98-
return fmt.Errorf("failed to query the kubernetes version: %w", err)
144+
return nil, fmt.Errorf("failed to query the kubernetes version: %w", err)
99145
}
100146

101147
minor, err := strconv.Atoi(strings.TrimRight(vi.Minor, "+"))
102148
log.Infof("Kubernetes version of cluster is %s %s.%d", vi.String(), vi.Major, minor)
103149
if err != nil {
104-
return fmt.Errorf("failed to detect OCP cluster version: %v", err)
150+
return nil, fmt.Errorf("failed to detect OCP cluster version: %v", err)
105151
}
106152
if minor >= 15 {
107153
log.Info("Detected OpenShift 4.x cluster")
108-
return nil
154+
return cp, nil
109155
}
110156

111157
log.Infof("Creating container with Openshift v3.x defaults")
112-
ocpRequirements = ocp3Requirements
113-
ocpInitCommand = ocp3InitCommand
114-
ocpSecContext = ocp3SecContext
115-
return nil
158+
cp.ocpRequirements = ocp3Requirements
159+
cp.ocpSecContext = ocp3SecContext
160+
cp.ocpInitCommand = ocp3InitCommand
161+
162+
return cp, nil
116163
}
117164

118165
func ptrInt(i int64) *int64 { return &i }
119166

120167
func ptrBool(b bool) *bool { return &b }
121168

122-
// Create creates a pod and ensures that the pod is cleaned up when the
123-
// process finishes
124-
// Inspired by https://github.com/kubernetes/client-go/blob/master/examples/create-update-delete-deployment/main.go
125-
func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
126-
if err := setPodDefaults(); err != nil {
127-
log.WithField("err", err).Error("assuming OpenShift version v4.x")
128-
}
129-
169+
// WorkerRunner runs a worker pod and watches until finished
170+
func (cp *cosaPod) WorkerRunner(envVars []v1.EnvVar) error {
130171
podName := fmt.Sprintf("%s-%s-worker-%d",
131-
apiBuild.Annotations[buildapiv1.BuildConfigAnnotation],
132-
apiBuild.Annotations[buildapiv1.BuildNumberAnnotation],
133-
index,
172+
cp.apiBuild.Annotations[buildapiv1.BuildConfigAnnotation],
173+
cp.apiBuild.Annotations[buildapiv1.BuildNumberAnnotation],
174+
cp.index,
134175
)
135176
log.Infof("Creating pod %s", podName)
136177

@@ -144,10 +185,10 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
144185
"/usr/bin/gangplank",
145186
"builder",
146187
},
147-
Env: eVars,
188+
Env: envVars,
148189
WorkingDir: "/srv",
149-
VolumeMounts: volumeMounts,
150-
SecurityContext: ocpSecContext,
190+
VolumeMounts: cp.volumeMounts,
191+
SecurityContext: cp.ocpSecContext,
151192
Resources: v1.ResourceRequirements{
152193
Limits: ocpRequirements,
153194
Requests: ocpRequirements,
@@ -187,15 +228,16 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
187228
},
188229
}
189230

190-
resp, err := apiClient.Pods(projectNamespace).Create(req)
231+
ac := cp.apiClientSet.CoreV1()
232+
resp, err := ac.Pods(cp.project).Create(req)
191233
if err != nil {
192234
return fmt.Errorf("failed to create pod %s: %w", podName, err)
193235
}
194-
195236
log.Infof("Pod created: %s", podName)
237+
cp.pod = req
196238

197239
status := resp.Status
198-
w, err := apiClient.Pods(projectNamespace).Watch(
240+
w, err := ac.Pods(cp.project).Watch(
199241
metav1.ListOptions{
200242
Watch: true,
201243
ResourceVersion: resp.ResourceVersion,
@@ -213,7 +255,7 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
213255
// ender is our clean-up that kill our pods
214256
ender := func() {
215257
l.Infof("terminating")
216-
if err := apiClient.Pods(projectNamespace).Delete(podName, &metav1.DeleteOptions{}); err != nil {
258+
if err := ac.Pods(cp.project).Delete(podName, &metav1.DeleteOptions{}); err != nil {
217259
l.WithField("err", err).Error("Failed delete on pod, yolo.")
218260
}
219261
}
@@ -244,7 +286,7 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
244286
return nil
245287
case v1.PodRunning:
246288
l.Infof("Pod successfully completed")
247-
if err := streamPodLogs(&logStarted, req); err != nil {
289+
if err := cp.streamPodLogs(&logStarted, req); err != nil {
248290
l.WithField("err", err).Error("failed to open logging")
249291
}
250292
case v1.PodFailed:
@@ -261,7 +303,7 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
261303
case <-sigs:
262304
ender()
263305
return errors.New("Termination requested")
264-
case <-ctx.Done():
306+
case <-cp.ctx.Done():
265307
return nil
266308
}
267309
}
@@ -271,15 +313,15 @@ func createWorkerPod(ctx context.Context, index int, eVars []v1.EnvVar) error {
271313
// pods are responsible for their work, but not for their logs.
272314
// To make streamPodLogs thread safe and non-blocking, it expects
273315
// a pointer to a bool. If that pointer is nil or true, then we return
274-
func streamPodLogs(logging *bool, pod *v1.Pod) error {
316+
func (cp *cosaPod) streamPodLogs(logging *bool, pod *v1.Pod) error {
275317
if logging != nil && *logging {
276318
return nil
277319
}
278320
*logging = true
279321
podLogOpts := v1.PodLogOptions{
280322
Follow: true,
281323
}
282-
req := apiClient.Pods(projectNamespace).GetLogs(pod.Name, &podLogOpts)
324+
req := cp.apiClientSet.CoreV1().Pods(cp.project).GetLogs(pod.Name, &podLogOpts)
283325
podLogs, err := req.Stream()
284326
if err != nil {
285327
return err

‎gangplank/ocp/k8s.go

+11-23
Original file line numberDiff line numberDiff line change
@@ -11,64 +11,52 @@ import (
1111
"k8s.io/apimachinery/pkg/fields"
1212
"k8s.io/apimachinery/pkg/labels"
1313
"k8s.io/client-go/kubernetes"
14-
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
1514
"k8s.io/client-go/rest"
1615
)
1716

1817
const clusterNamespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
1918

2019
var (
21-
// apiClient is v1 Client Interface for interacting Kubernetes
22-
apiClient corev1.CoreV1Interface
23-
24-
// apiClientSet is a generic Kubernetes Client Set
25-
apiClientSet *kubernetes.Clientset
26-
27-
// projectNamespace is the current namespace
28-
projectNamespace string
29-
3020
// forceNotInCluster is used for testing. This is set to
3121
// true for when testing is run with `-tag ci`
3222
forceNotInCluster = false
3323
)
3424

35-
// k8sAPIClient opens an in-cluster Kubernetes API client.
25+
// k8sInClusterClient opens an in-cluster Kubernetes API client.
3626
// The running pod must have a service account defined in the PodSpec.
37-
func k8sAPIClient() error {
27+
func k8sInClusterClient() (*kubernetes.Clientset, string, error) {
3828
_, kport := os.LookupEnv("KUBERNETES_SERVICE_PORT")
3929
_, khost := os.LookupEnv("KUBERNETES_SERVICE_HOST")
4030
if !khost || !kport || forceNotInCluster {
41-
return ErrNotInCluster
31+
return nil, "", ErrNotInCluster
4232
}
4333

4434
// creates the in-cluster config
4535
cc, err := rest.InClusterConfig()
4636
if err != nil {
47-
return err
37+
return nil, "", err
4838
}
4939

5040
// creates the clientset
5141
nc, err := kubernetes.NewForConfig(cc)
5242
if err != nil {
53-
return err
43+
return nil, "", err
5444
}
55-
apiClient = nc.CoreV1()
56-
apiClientSet = nc
5745

5846
pname, err := ioutil.ReadFile(clusterNamespaceFile)
5947
if err != nil {
60-
return fmt.Errorf("Failed determining the current namespace: %v", err)
48+
return nil, "", fmt.Errorf("Failed determining the current namespace: %v", err)
6149
}
62-
projectNamespace = string(pname)
50+
pn := string(pname)
6351

64-
log.Infof("Current project/namespace is %s", projectNamespace)
65-
return nil
52+
log.Infof("Current project/namespace is %s", pn)
53+
return nc, pn, nil
6654
}
6755

6856
// getPodiP returns the IP of a pod. getPodIP blocks pending until the podIP
6957
// is recieved.
70-
func getPodIP(podName string) (string, error) {
71-
w, err := apiClient.Pods(projectNamespace).Watch(
58+
func getPodIP(cs *kubernetes.Clientset, podNamespace, podName string) (string, error) {
59+
w, err := cs.CoreV1().Pods(podNamespace).Watch(
7260
metav1.ListOptions{
7361
Watch: true,
7462
FieldSelector: fields.Set{"metadata.name": podName}.AsSelector().String(),

‎gangplank/ocp/sa_secrets.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99

1010
log "github.com/sirupsen/logrus"
1111
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12+
"k8s.io/client-go/kubernetes"
1213
)
1314

1415
/*
@@ -155,15 +156,15 @@ func (sm *secretMap) writeSecretFiles(toDir, name string, d map[string][]byte, r
155156
// 'coreos-assembler.coreos.com/secret=k' and then maps the secret
156157
// automatically in. "k" must be in the "known" secrets type to be mapped
157158
// automatically.
158-
func kubernetesSecretsSetup(toDir string) ([]string, error) {
159+
func kubernetesSecretsSetup(ac *kubernetes.Clientset, ns, toDir string) ([]string, error) {
159160
lo := metav1.ListOptions{
160161
LabelSelector: secretLabelName,
161162
Limit: 100,
162163
}
163164

164165
var ret []string
165166

166-
secrets, err := apiClient.Secrets(projectNamespace).List(lo)
167+
secrets, err := ac.CoreV1().Secrets(ns).List(lo)
167168
if err != nil {
168169
return ret, err
169170
}

‎gangplank/ocp/worker.go

+6-7
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,6 @@ func newWorkSpec(ctx context.Context) (*workSpec, error) {
4949
if err := ws.Unmarshal(r); err != nil {
5050
return nil, err
5151
}
52-
53-
// Require a Kubernetes Service Account Client
54-
if err := k8sAPIClient(); err != nil {
55-
return nil, fmt.Errorf("failed create a kubernetes client: %w", err)
56-
}
57-
5852
if _, err := os.Stat(cosaSrvDir); os.IsNotExist(err) {
5953
return nil, fmt.Errorf("Context dir %q does not exist", cosaSrvDir)
6054
}
@@ -85,12 +79,17 @@ func (ws *workSpec) Marshal() ([]byte, error) {
8579

8680
// Exec executes the work spec tasks.
8781
func (ws *workSpec) Exec(ctx context.Context) error {
82+
ac, pn, err := k8sInClusterClient()
83+
if err != nil {
84+
return fmt.Errorf("failed create a kubernetes client: %w", err)
85+
}
86+
8887
// Workers always will use /srv
8988
if err := os.Chdir(cosaSrvDir); err != nil {
9089
return fmt.Errorf("unable to switch to %s: %w", cosaSrvDir, err)
9190
}
9291

93-
ks, err := kubernetesSecretsSetup(cosaSrvDir)
92+
ks, err := kubernetesSecretsSetup(ac, pn, cosaSrvDir)
9493
if err != nil {
9594
log.Errorf("Failed to setup Service Account Secrets: %v", err)
9695
}

0 commit comments

Comments
 (0)
Please sign in to comment.