Skip to content

app_manager

SparkAppManager

Bases: LoggingMixin

Manage Spark apps on Kubernetes.

Examples:

>>> from spark_on_k8s.utils.app_manager import SparkAppManager
>>> app_manager = SparkAppManager()
>>> app_manager.stream_logs(
...     namespace="spark",
...     pod_name="20240114225118-driver",
...     should_print=True,
... )

Parameters:

Name Type Description Default
k8s_client_manager KubernetesClientManager

Kubernetes client manager. Defaults to None.

None
logger_name str

logger name. Defaults to "SparkAppManager".

None
Source code in spark_on_k8s/utils/app_manager.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
class SparkAppManager(LoggingMixin):
    """Manage Spark apps on Kubernetes.

    Examples:
        >>> from spark_on_k8s.utils.app_manager import SparkAppManager
        >>> app_manager = SparkAppManager()
        >>> app_manager.stream_logs(
        ...     namespace="spark",
        ...     pod_name="20240114225118-driver",
        ...     should_print=True,
        ... )

    Args:
        k8s_client_manager (KubernetesClientManager, optional): Kubernetes client manager. Defaults to None.
        logger_name (str, optional): logger name. Defaults to "SparkAppManager".
    """

    def __init__(
        self,
        *,
        k8s_client_manager: KubernetesClientManager | None = None,
        logger_name: str | None = None,
    ):
        super().__init__(logger_name=logger_name or "SparkAppManager")
        self.k8s_client_manager = k8s_client_manager or KubernetesClientManager()

    def app_status(
        self,
        *,
        namespace: str,
        pod_name: str | None = None,
        app_id: str | None = None,
        client: k8s.CoreV1Api | None = None,
    ) -> SparkAppStatus:
        """Get app status.

        Args:
            namespace (str): Namespace.
            pod_name (str): Pod name. Defaults to None.
            app_id (str): App ID. Defaults to None.
            client (k8s.CoreV1Api, optional): Kubernetes client. Defaults to None.

        Returns:
            SparkAppStatus: App status.
        """

        def _app_status(_client: k8s.CoreV1Api) -> SparkAppStatus:
            if pod_name is None and app_id is None:
                raise ValueError("Either pod_name or app_id must be specified")
            if pod_name is not None:
                _pod = _client.read_namespaced_pod(
                    namespace=namespace,
                    name=pod_name,
                )
            else:
                _pod = _client.list_namespaced_pod(
                    namespace=namespace,
                    label_selector=f"spark-app-id={app_id}",
                ).items[0]
            return get_app_status(_pod)

        if client is None:
            with self.k8s_client_manager.client() as client:
                api = k8s.CoreV1Api(client)
                return _app_status(api)
        return _app_status(client)

    def wait_for_app(
        self,
        *,
        namespace: str,
        pod_name: str | None = None,
        app_id: str | None = None,
        poll_interval: float = 10,
        should_print: bool = False,
        startup_timeout: float = 0,
    ):
        """Wait for a Spark app to finish.

        Args:
            namespace (str): Namespace.
            pod_name (str): Pod name.
            app_id (str): App ID.
            poll_interval (float, optional): Poll interval in seconds. Defaults to 10.
            startup_timeout (float, optional): Timeout in seconds to wait for the app to start.
                Defaults to 0 (no timeout).
            should_print (bool, optional): Whether to print logs instead of logging them.
        """
        start_time = time.time()
        termination_statuses = {SparkAppStatus.Succeeded, SparkAppStatus.Failed, SparkAppStatus.Unknown}
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            while True:
                try:
                    status = self.app_status(
                        namespace=namespace, pod_name=pod_name, app_id=app_id, client=api
                    )
                    if status in termination_statuses:
                        break
                    if status == SparkAppStatus.Pending:
                        if startup_timeout and start_time + startup_timeout < time.time():
                            raise TimeoutError("App startup timeout")

                except ApiException as e:
                    if e.status == 404:
                        self.log(
                            msg=f"Pod {pod_name} was deleted", level=logging.INFO, should_print=should_print
                        )
                        return
                self.log(
                    msg=f"Pod {pod_name} status is {status}, sleep {poll_interval}s",
                    level=logging.INFO,
                    should_print=should_print,
                )
                time.sleep(poll_interval)
            self.log(
                msg=f"Pod {pod_name} finished with status {status.value}",
                level=logging.INFO,
                should_print=should_print,
            )

    def stream_logs(
        self,
        *,
        namespace: str,
        pod_name: str | None = None,
        app_id: str | None = None,
        startup_timeout: float = 0,
        should_print: bool = False,
    ):
        """Stream logs from a Spark app.

        Args:
            namespace (str): Namespace.
            pod_name (str): Pod name.
            app_id (str): App ID.
            startup_timeout (float, optional): Timeout in seconds to wait for the app to start.
                Defaults to 0 (no timeout).
            should_print (bool, optional): Whether to print logs instead of logging them.
        """
        start_time = time.time()
        if pod_name is None and app_id is None:
            raise ValueError("Either pod_name or app_id must be specified")
        if pod_name is None:
            with self.k8s_client_manager.client() as client:
                api = k8s.CoreV1Api(client)
                pods = api.list_namespaced_pod(
                    namespace=namespace,
                    label_selector=f"spark-app-id={app_id}",
                ).items
                if len(pods) == 0:
                    raise ValueError(f"No pods found for app {app_id}")
                pod_name = pods[0].metadata.name
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            while True:
                pod = api.read_namespaced_pod(
                    namespace=namespace,
                    name=pod_name,
                )
                if pod.status.phase != "Pending":
                    break
                if startup_timeout and start_time + startup_timeout < time.time():
                    raise TimeoutError("App startup timeout")
                time.sleep(5)
            watcher = watch.Watch()
            for line in watcher.stream(
                api.read_namespaced_pod_log,
                namespace=namespace,
                name=pod_name,
            ):
                self.log(msg=line, level=logging.INFO, should_print=should_print)
            watcher.stop()

    def list_apps(self, namespace: str) -> list[str]:
        """List apps.

        Args:
            namespace (str): Namespace.

        Returns:
            list[str]: Spark apps in the namespace.
        """
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            return [
                pod.metadata.labels["spark-app-id"]
                for pod in api.list_namespaced_pod(
                    namespace=namespace,
                    label_selector="spark-role=driver",
                ).items
            ]

    def kill_app(
        self,
        namespace: str,
        pod_name: str | None = None,
        app_id: str | None = None,
        should_print: bool = False,
    ):
        """Kill an app.

        Args:
            namespace (str): Namespace.
            pod_name (str): Pod name.
            app_id (str): App ID.
            should_print (bool, optional): Whether to print logs instead of logging them.
        """
        if pod_name is None and app_id is None:
            raise ValueError("Either pod_name or app_id must be specified")
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            if pod_name is None:
                pods = api.list_namespaced_pod(
                    namespace=namespace,
                    label_selector=f"spark-app-id={app_id}",
                ).items
                if len(pods) == 0:
                    raise ValueError(f"No pods found for app {app_id}")
                pod = pods[0]
            else:
                pod = api.read_namespaced_pod(
                    namespace=namespace,
                    name=pod_name,
                )
            container_name = pod.spec.containers[0].name
            if pod.status.phase != "Running":
                self.log(
                    f"App is not running, it is {get_app_status(pod).value}",
                    level=logging.INFO,
                    should_print=should_print,
                )
                return
            stream(
                api.connect_get_namespaced_pod_exec,
                pod.metadata.name,
                namespace,
                command=["/bin/sh", "-c", "kill 1"],
                container=container_name,
                stderr=True,
                stdin=False,
                stdout=True,
                tty=False,
                _preload_content=False,
            )

    def delete_app(
        self, namespace: str, pod_name: str | None = None, app_id: str | None = None, force: bool = False
    ):
        """Delete an app.

        Args:
            namespace (str): Namespace.
            pod_name (str): Pod name.
            app_id (str): App ID.
            force (bool, optional): Whether to force delete the app. Defaults to False.
        """
        if pod_name is None and app_id is None:
            raise ValueError("Either pod_name or app_id must be specified")
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            if app_id:
                # we don't use `delete_collection_namespaced_pod` to know if the app exists or not
                pods = api.list_namespaced_pod(
                    namespace=namespace,
                    label_selector=f"spark-app-id={app_id}",
                ).items
                if len(pods) == 0:
                    raise ValueError(f"No pods found for app {app_id}")
                pod_name = pods[0].metadata.name
            api.delete_namespaced_pod(
                name=pod_name,
                namespace=namespace,
                body=k8s.V1DeleteOptions(
                    grace_period_seconds=0 if force else None,
                    propagation_policy="Foreground",
                ),
            )

    @staticmethod
    def create_spark_pod_spec(
        *,
        app_name: str,
        app_id: str,
        image: str,
        namespace: str = "default",
        service_account: str = "spark",
        container_name: str = "driver",
        env_variables: dict[str, str] | None = None,
        pod_resources: dict[str, dict[str, str]] | None = None,
        args: list[str] | None = None,
        image_pull_policy: Literal["Always", "Never", "IfNotPresent"] = "IfNotPresent",
        extra_labels: dict[str, str] | None = None,
        annotations: dict[str, str] | None = None,
        env_from_secrets: list[str] | None = None,
        volumes: list[k8s.V1Volume] | None = None,
        volume_mounts: list[k8s.V1VolumeMount] | None = None,
        node_selector: dict[str, str] | None = None,
        tolerations: list[k8s.V1Toleration] | None = None,
    ) -> k8s.V1PodTemplateSpec:
        """Create a pod spec for a Spark application

        Args:
            app_name: Name of the Spark application
            app_id: ID of the Spark application
            image: Docker image to use for the Spark driver and executors
            namespace: Kubernetes namespace to use, defaults to "default"
            service_account: Kubernetes service account to use for the Spark driver, defaults to "spark"
            container_name: Name of the container, defaults to "driver"
            env_variables: Dictionary of environment variables to pass to the container
            pod_resources: Dictionary of resources to request for the container
            args: List of arguments to pass to the container
            image_pull_policy: Image pull policy for the driver and executors, defaults to "IfNotPresent"
            extra_labels: Dictionary of extra labels to add to the pod template
            annotations: Dictionary of annotations to add to the pod template
            env_from_secrets: List of secrets to load environment variables from
            volumes: List of volumes to mount in the pod
            volume_mounts: List of volume mounts to mount in the container
            node_selector: Node selector to use for the pod
            tolerations: List of tolerations to use for the pod

        Returns:
            Pod template spec for the Spark application
        """
        pod_metadata = k8s.V1ObjectMeta(
            name=f"{app_id}-driver",
            namespace=namespace,
            labels=SparkAppManager.spark_app_labels(
                app_name=app_name,
                app_id=app_id,
                extra_labels=extra_labels,
            ),
            annotations=annotations,
        )
        pod_spec = k8s.V1PodSpec(
            service_account_name=service_account,
            restart_policy="Never",
            containers=[
                SparkAppManager.create_driver_container(
                    image=image,
                    container_name=container_name,
                    env_variables=env_variables,
                    pod_resources=pod_resources,
                    args=args,
                    image_pull_policy=image_pull_policy,
                    env_from_secrets=env_from_secrets,
                    volume_mounts=volume_mounts,
                )
            ],
            volumes=volumes,
            node_selector=node_selector,
            tolerations=tolerations,
        )
        template = k8s.V1PodTemplateSpec(
            metadata=pod_metadata,
            spec=pod_spec,
        )
        return template

    @staticmethod
    def create_driver_container(
        *,
        image: str,
        container_name: str = "driver",
        env_variables: dict[str, str] | None = None,
        pod_resources: dict[str, dict[str, str]] | None = None,
        args: list[str] | None = None,
        image_pull_policy: Literal["Always", "Never", "IfNotPresent"] = "IfNotPresent",
        env_from_secrets: list[str] | None = None,
        volume_mounts: list[k8s.V1VolumeMount] | None = None,
    ) -> k8s.V1Container:
        """Create a container spec for the Spark driver

        Args:
            image: Docker image to use for the Spark driver and executors
            container_name: Name of the container, defaults to "driver"
            env_variables: Dictionary of environment variables to pass to the container
            pod_resources: Dictionary of resources to request for the container
            args: List of arguments to pass to the container
            image_pull_policy: Image pull policy for the driver and executors, defaults to "IfNotPresent"
            env_from_secrets: List of secrets to load environment variables from
            volume_mounts: List of volume mounts to mount in the container

        Returns:
            Container spec for the Spark driver
        """
        return k8s.V1Container(
            name=container_name,
            image=image,
            image_pull_policy=image_pull_policy,
            env=[k8s.V1EnvVar(name=key, value=value) for key, value in (env_variables or {}).items()]
            + [
                k8s.V1EnvVar(
                    name="SPARK_DRIVER_BIND_ADDRESS",
                    value_from=k8s.V1EnvVarSource(
                        field_ref=k8s.V1ObjectFieldSelector(
                            field_path="status.podIP",
                        )
                    ),
                ),
            ],
            resources=k8s.V1ResourceRequirements(
                **(pod_resources or {}),
            ),
            args=args or [],
            ports=[
                k8s.V1ContainerPort(
                    container_port=7077,
                    name="driver-port",
                ),
                k8s.V1ContainerPort(
                    container_port=4040,
                    name="ui-port",
                ),
            ],
            env_from=[
                k8s.V1EnvFromSource(
                    secret_ref=k8s.V1SecretEnvSource(
                        name=secret_name,
                    ),
                )
                for secret_name in (env_from_secrets or [])
            ],
            volume_mounts=volume_mounts,
        )

    @staticmethod
    def spark_app_labels(
        *,
        app_name: str,
        app_id: str,
        extra_labels: dict[str, str] | None = None,
    ) -> dict[str, str]:
        """Create labels for a Spark application

        Args:
            app_name: Name of the Spark application
            app_id: ID of the Spark application
            extra_labels: Dictionary of extra labels to add to the labels

        Returns:
            Dictionary of labels for the Spark application resources
        """
        return {
            "spark-app-name": app_name,
            "spark-app-id": app_id,
            "spark-role": "driver",
            **(extra_labels or {}),
        }

    @staticmethod
    def create_headless_service_object(
        *,
        app_name: str,
        app_id: str,
        namespace: str = "default",
        pod_owner_uid: str | None = None,
        extra_labels: dict[str, str] | None = None,
    ) -> k8s.V1Service:
        """Create a headless service for a Spark application

        Args:
            app_name: Name of the Spark application
            app_id: ID of the Spark application
            namespace: Kubernetes namespace to use, defaults to "default"
            pod_owner_uid: UID of the pod to use as owner reference for the service
            extra_labels: Dictionary of extra labels to add to the service

        Returns:
            The created headless service for the Spark application
        """
        labels = SparkAppManager.spark_app_labels(
            app_name=app_name,
            app_id=app_id,
            extra_labels=extra_labels,
        )
        owner = (
            [
                k8s.V1OwnerReference(
                    api_version="v1",
                    kind="Pod",
                    name=f"{app_id}-driver",
                    uid=pod_owner_uid,
                )
            ]
            if pod_owner_uid
            else None
        )
        return k8s.V1Service(
            metadata=k8s.V1ObjectMeta(
                name=app_id,
                labels=labels,
                namespace=namespace,
                owner_references=owner,
            ),
            spec=k8s.V1ServiceSpec(
                selector=labels,
                ports=[
                    k8s.V1ServicePort(
                        port=7077,
                        name="driver-port",
                    ),
                    k8s.V1ServicePort(
                        port=4040,
                        name="ui-port",
                    ),
                ],
                type="ClusterIP",
                cluster_ip="None",
            ),
        )

    @staticmethod
    def create_secret_object(
        *,
        app_name: str,
        app_id: str,
        secrets_values: dict[str, str],
        namespace: str = "default",
    ) -> k8s.V1Secret:
        """Create a secret for a Spark application to store secrets values

        Args:
            app_name: Name of the Spark application
            app_id: ID of the Spark application
            secrets_values: Dictionary of secrets values
            namespace: Kubernetes namespace to use, defaults to "default"

        Returns:
            The created secret for the Spark application
        """
        return k8s.V1Secret(
            metadata=k8s.V1ObjectMeta(
                name=app_id,
                namespace=namespace,
                labels=SparkAppManager.spark_app_labels(
                    app_name=app_name,
                    app_id=app_id,
                ),
            ),
            string_data=secrets_values,
        )

    @staticmethod
    def create_configmap_objects(
        *,
        app_name: str,
        app_id: str,
        configmaps: list[ConfigMap],
        namespace: str = "default",
    ) -> list[k8s.V1ConfigMap]:
        """Create configmaps for a Spark application to mount in the driver
         and executors containers as volumes

        Args:
            app_name: Name of the Spark application
            app_id: ID of the Spark application
            configmaps: List of configmaps to create
            namespace: Kubernetes namespace to use, defaults to "default"

        Returns:
            The created configmaps objects for the Spark application
        """
        k8s_configmaps = []
        configmap_names = set()
        configmap_mount_paths = set()
        for index, configmap in enumerate(configmaps):
            configmap_name = configmap.get("name", f"{app_id}-{index}")
            if configmap_name in configmap_names:
                raise ValueError(f"Configmap name {configmap_name} is duplicated")
            configmap_mount_path = configmap["mount_path"]
            if configmap_mount_path in configmap_mount_paths:
                raise ValueError(f"Configmap mount path {configmap_mount_path} is duplicated")
            configmap_names.add(configmap_name)
            configmap_mount_paths.add(configmap_mount_path)
            data = {}
            source: ConfigMapSource
            for source in configmap["sources"]:
                if "text" in source:
                    data[source["name"]] = source["text"]
                elif "text_path" in source:
                    with open(source["text_path"]) as file:
                        data[source["name"]] = file.read()
            k8s_configmaps.append(
                k8s.V1ConfigMap(
                    metadata=k8s.V1ObjectMeta(
                        name=configmap_name,
                        namespace=namespace,
                        labels=SparkAppManager.spark_app_labels(
                            app_name=app_name,
                            app_id=app_id,
                        ),
                    ),
                    data=data,
                )
            )
        return k8s_configmaps

app_status(*, namespace, pod_name=None, app_id=None, client=None)

Get app status.

Parameters:

Name Type Description Default
namespace str

Namespace.

required
pod_name str

Pod name. Defaults to None.

None
app_id str

App ID. Defaults to None.

None
client CoreV1Api

Kubernetes client. Defaults to None.

None

Returns:

Name Type Description
SparkAppStatus SparkAppStatus

App status.

Source code in spark_on_k8s/utils/app_manager.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
def app_status(
    self,
    *,
    namespace: str,
    pod_name: str | None = None,
    app_id: str | None = None,
    client: k8s.CoreV1Api | None = None,
) -> SparkAppStatus:
    """Get app status.

    Args:
        namespace (str): Namespace.
        pod_name (str): Pod name. Defaults to None.
        app_id (str): App ID. Defaults to None.
        client (k8s.CoreV1Api, optional): Kubernetes client. Defaults to None.

    Returns:
        SparkAppStatus: App status.
    """

    def _app_status(_client: k8s.CoreV1Api) -> SparkAppStatus:
        if pod_name is None and app_id is None:
            raise ValueError("Either pod_name or app_id must be specified")
        if pod_name is not None:
            _pod = _client.read_namespaced_pod(
                namespace=namespace,
                name=pod_name,
            )
        else:
            _pod = _client.list_namespaced_pod(
                namespace=namespace,
                label_selector=f"spark-app-id={app_id}",
            ).items[0]
        return get_app_status(_pod)

    if client is None:
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            return _app_status(api)
    return _app_status(client)

create_configmap_objects(*, app_name, app_id, configmaps, namespace='default') staticmethod

Create configmaps for a Spark application to mount in the driver and executors containers as volumes

Parameters:

Name Type Description Default
app_name str

Name of the Spark application

required
app_id str

ID of the Spark application

required
configmaps list[ConfigMap]

List of configmaps to create

required
namespace str

Kubernetes namespace to use, defaults to "default"

'default'

Returns:

Type Description
list[V1ConfigMap]

The created configmaps objects for the Spark application

Source code in spark_on_k8s/utils/app_manager.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
@staticmethod
def create_configmap_objects(
    *,
    app_name: str,
    app_id: str,
    configmaps: list[ConfigMap],
    namespace: str = "default",
) -> list[k8s.V1ConfigMap]:
    """Create configmaps for a Spark application to mount in the driver
     and executors containers as volumes

    Args:
        app_name: Name of the Spark application
        app_id: ID of the Spark application
        configmaps: List of configmaps to create
        namespace: Kubernetes namespace to use, defaults to "default"

    Returns:
        The created configmaps objects for the Spark application
    """
    k8s_configmaps = []
    configmap_names = set()
    configmap_mount_paths = set()
    for index, configmap in enumerate(configmaps):
        configmap_name = configmap.get("name", f"{app_id}-{index}")
        if configmap_name in configmap_names:
            raise ValueError(f"Configmap name {configmap_name} is duplicated")
        configmap_mount_path = configmap["mount_path"]
        if configmap_mount_path in configmap_mount_paths:
            raise ValueError(f"Configmap mount path {configmap_mount_path} is duplicated")
        configmap_names.add(configmap_name)
        configmap_mount_paths.add(configmap_mount_path)
        data = {}
        source: ConfigMapSource
        for source in configmap["sources"]:
            if "text" in source:
                data[source["name"]] = source["text"]
            elif "text_path" in source:
                with open(source["text_path"]) as file:
                    data[source["name"]] = file.read()
        k8s_configmaps.append(
            k8s.V1ConfigMap(
                metadata=k8s.V1ObjectMeta(
                    name=configmap_name,
                    namespace=namespace,
                    labels=SparkAppManager.spark_app_labels(
                        app_name=app_name,
                        app_id=app_id,
                    ),
                ),
                data=data,
            )
        )
    return k8s_configmaps

create_driver_container(*, image, container_name='driver', env_variables=None, pod_resources=None, args=None, image_pull_policy='IfNotPresent', env_from_secrets=None, volume_mounts=None) staticmethod

Create a container spec for the Spark driver

Parameters:

Name Type Description Default
image str

Docker image to use for the Spark driver and executors

required
container_name str

Name of the container, defaults to "driver"

'driver'
env_variables dict[str, str] | None

Dictionary of environment variables to pass to the container

None
pod_resources dict[str, dict[str, str]] | None

Dictionary of resources to request for the container

None
args list[str] | None

List of arguments to pass to the container

None
image_pull_policy Literal['Always', 'Never', 'IfNotPresent']

Image pull policy for the driver and executors, defaults to "IfNotPresent"

'IfNotPresent'
env_from_secrets list[str] | None

List of secrets to load environment variables from

None
volume_mounts list[V1VolumeMount] | None

List of volume mounts to mount in the container

None

Returns:

Type Description
V1Container

Container spec for the Spark driver

Source code in spark_on_k8s/utils/app_manager.py
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
@staticmethod
def create_driver_container(
    *,
    image: str,
    container_name: str = "driver",
    env_variables: dict[str, str] | None = None,
    pod_resources: dict[str, dict[str, str]] | None = None,
    args: list[str] | None = None,
    image_pull_policy: Literal["Always", "Never", "IfNotPresent"] = "IfNotPresent",
    env_from_secrets: list[str] | None = None,
    volume_mounts: list[k8s.V1VolumeMount] | None = None,
) -> k8s.V1Container:
    """Create a container spec for the Spark driver

    Args:
        image: Docker image to use for the Spark driver and executors
        container_name: Name of the container, defaults to "driver"
        env_variables: Dictionary of environment variables to pass to the container
        pod_resources: Dictionary of resources to request for the container
        args: List of arguments to pass to the container
        image_pull_policy: Image pull policy for the driver and executors, defaults to "IfNotPresent"
        env_from_secrets: List of secrets to load environment variables from
        volume_mounts: List of volume mounts to mount in the container

    Returns:
        Container spec for the Spark driver
    """
    return k8s.V1Container(
        name=container_name,
        image=image,
        image_pull_policy=image_pull_policy,
        env=[k8s.V1EnvVar(name=key, value=value) for key, value in (env_variables or {}).items()]
        + [
            k8s.V1EnvVar(
                name="SPARK_DRIVER_BIND_ADDRESS",
                value_from=k8s.V1EnvVarSource(
                    field_ref=k8s.V1ObjectFieldSelector(
                        field_path="status.podIP",
                    )
                ),
            ),
        ],
        resources=k8s.V1ResourceRequirements(
            **(pod_resources or {}),
        ),
        args=args or [],
        ports=[
            k8s.V1ContainerPort(
                container_port=7077,
                name="driver-port",
            ),
            k8s.V1ContainerPort(
                container_port=4040,
                name="ui-port",
            ),
        ],
        env_from=[
            k8s.V1EnvFromSource(
                secret_ref=k8s.V1SecretEnvSource(
                    name=secret_name,
                ),
            )
            for secret_name in (env_from_secrets or [])
        ],
        volume_mounts=volume_mounts,
    )

create_headless_service_object(*, app_name, app_id, namespace='default', pod_owner_uid=None, extra_labels=None) staticmethod

Create a headless service for a Spark application

Parameters:

Name Type Description Default
app_name str

Name of the Spark application

required
app_id str

ID of the Spark application

required
namespace str

Kubernetes namespace to use, defaults to "default"

'default'
pod_owner_uid str | None

UID of the pod to use as owner reference for the service

None
extra_labels dict[str, str] | None

Dictionary of extra labels to add to the service

None

Returns:

Type Description
V1Service

The created headless service for the Spark application

Source code in spark_on_k8s/utils/app_manager.py
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
@staticmethod
def create_headless_service_object(
    *,
    app_name: str,
    app_id: str,
    namespace: str = "default",
    pod_owner_uid: str | None = None,
    extra_labels: dict[str, str] | None = None,
) -> k8s.V1Service:
    """Create a headless service for a Spark application

    Args:
        app_name: Name of the Spark application
        app_id: ID of the Spark application
        namespace: Kubernetes namespace to use, defaults to "default"
        pod_owner_uid: UID of the pod to use as owner reference for the service
        extra_labels: Dictionary of extra labels to add to the service

    Returns:
        The created headless service for the Spark application
    """
    labels = SparkAppManager.spark_app_labels(
        app_name=app_name,
        app_id=app_id,
        extra_labels=extra_labels,
    )
    owner = (
        [
            k8s.V1OwnerReference(
                api_version="v1",
                kind="Pod",
                name=f"{app_id}-driver",
                uid=pod_owner_uid,
            )
        ]
        if pod_owner_uid
        else None
    )
    return k8s.V1Service(
        metadata=k8s.V1ObjectMeta(
            name=app_id,
            labels=labels,
            namespace=namespace,
            owner_references=owner,
        ),
        spec=k8s.V1ServiceSpec(
            selector=labels,
            ports=[
                k8s.V1ServicePort(
                    port=7077,
                    name="driver-port",
                ),
                k8s.V1ServicePort(
                    port=4040,
                    name="ui-port",
                ),
            ],
            type="ClusterIP",
            cluster_ip="None",
        ),
    )

create_secret_object(*, app_name, app_id, secrets_values, namespace='default') staticmethod

Create a secret for a Spark application to store secrets values

Parameters:

Name Type Description Default
app_name str

Name of the Spark application

required
app_id str

ID of the Spark application

required
secrets_values dict[str, str]

Dictionary of secrets values

required
namespace str

Kubernetes namespace to use, defaults to "default"

'default'

Returns:

Type Description
V1Secret

The created secret for the Spark application

Source code in spark_on_k8s/utils/app_manager.py
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
@staticmethod
def create_secret_object(
    *,
    app_name: str,
    app_id: str,
    secrets_values: dict[str, str],
    namespace: str = "default",
) -> k8s.V1Secret:
    """Create a secret for a Spark application to store secrets values

    Args:
        app_name: Name of the Spark application
        app_id: ID of the Spark application
        secrets_values: Dictionary of secrets values
        namespace: Kubernetes namespace to use, defaults to "default"

    Returns:
        The created secret for the Spark application
    """
    return k8s.V1Secret(
        metadata=k8s.V1ObjectMeta(
            name=app_id,
            namespace=namespace,
            labels=SparkAppManager.spark_app_labels(
                app_name=app_name,
                app_id=app_id,
            ),
        ),
        string_data=secrets_values,
    )

create_spark_pod_spec(*, app_name, app_id, image, namespace='default', service_account='spark', container_name='driver', env_variables=None, pod_resources=None, args=None, image_pull_policy='IfNotPresent', extra_labels=None, annotations=None, env_from_secrets=None, volumes=None, volume_mounts=None, node_selector=None, tolerations=None) staticmethod

Create a pod spec for a Spark application

Parameters:

Name Type Description Default
app_name str

Name of the Spark application

required
app_id str

ID of the Spark application

required
image str

Docker image to use for the Spark driver and executors

required
namespace str

Kubernetes namespace to use, defaults to "default"

'default'
service_account str

Kubernetes service account to use for the Spark driver, defaults to "spark"

'spark'
container_name str

Name of the container, defaults to "driver"

'driver'
env_variables dict[str, str] | None

Dictionary of environment variables to pass to the container

None
pod_resources dict[str, dict[str, str]] | None

Dictionary of resources to request for the container

None
args list[str] | None

List of arguments to pass to the container

None
image_pull_policy Literal['Always', 'Never', 'IfNotPresent']

Image pull policy for the driver and executors, defaults to "IfNotPresent"

'IfNotPresent'
extra_labels dict[str, str] | None

Dictionary of extra labels to add to the pod template

None
annotations dict[str, str] | None

Dictionary of annotations to add to the pod template

None
env_from_secrets list[str] | None

List of secrets to load environment variables from

None
volumes list[V1Volume] | None

List of volumes to mount in the pod

None
volume_mounts list[V1VolumeMount] | None

List of volume mounts to mount in the container

None
node_selector dict[str, str] | None

Node selector to use for the pod

None
tolerations list[V1Toleration] | None

List of tolerations to use for the pod

None

Returns:

Type Description
V1PodTemplateSpec

Pod template spec for the Spark application

Source code in spark_on_k8s/utils/app_manager.py
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
@staticmethod
def create_spark_pod_spec(
    *,
    app_name: str,
    app_id: str,
    image: str,
    namespace: str = "default",
    service_account: str = "spark",
    container_name: str = "driver",
    env_variables: dict[str, str] | None = None,
    pod_resources: dict[str, dict[str, str]] | None = None,
    args: list[str] | None = None,
    image_pull_policy: Literal["Always", "Never", "IfNotPresent"] = "IfNotPresent",
    extra_labels: dict[str, str] | None = None,
    annotations: dict[str, str] | None = None,
    env_from_secrets: list[str] | None = None,
    volumes: list[k8s.V1Volume] | None = None,
    volume_mounts: list[k8s.V1VolumeMount] | None = None,
    node_selector: dict[str, str] | None = None,
    tolerations: list[k8s.V1Toleration] | None = None,
) -> k8s.V1PodTemplateSpec:
    """Create a pod spec for a Spark application

    Args:
        app_name: Name of the Spark application
        app_id: ID of the Spark application
        image: Docker image to use for the Spark driver and executors
        namespace: Kubernetes namespace to use, defaults to "default"
        service_account: Kubernetes service account to use for the Spark driver, defaults to "spark"
        container_name: Name of the container, defaults to "driver"
        env_variables: Dictionary of environment variables to pass to the container
        pod_resources: Dictionary of resources to request for the container
        args: List of arguments to pass to the container
        image_pull_policy: Image pull policy for the driver and executors, defaults to "IfNotPresent"
        extra_labels: Dictionary of extra labels to add to the pod template
        annotations: Dictionary of annotations to add to the pod template
        env_from_secrets: List of secrets to load environment variables from
        volumes: List of volumes to mount in the pod
        volume_mounts: List of volume mounts to mount in the container
        node_selector: Node selector to use for the pod
        tolerations: List of tolerations to use for the pod

    Returns:
        Pod template spec for the Spark application
    """
    pod_metadata = k8s.V1ObjectMeta(
        name=f"{app_id}-driver",
        namespace=namespace,
        labels=SparkAppManager.spark_app_labels(
            app_name=app_name,
            app_id=app_id,
            extra_labels=extra_labels,
        ),
        annotations=annotations,
    )
    pod_spec = k8s.V1PodSpec(
        service_account_name=service_account,
        restart_policy="Never",
        containers=[
            SparkAppManager.create_driver_container(
                image=image,
                container_name=container_name,
                env_variables=env_variables,
                pod_resources=pod_resources,
                args=args,
                image_pull_policy=image_pull_policy,
                env_from_secrets=env_from_secrets,
                volume_mounts=volume_mounts,
            )
        ],
        volumes=volumes,
        node_selector=node_selector,
        tolerations=tolerations,
    )
    template = k8s.V1PodTemplateSpec(
        metadata=pod_metadata,
        spec=pod_spec,
    )
    return template

delete_app(namespace, pod_name=None, app_id=None, force=False)

Delete an app.

Parameters:

Name Type Description Default
namespace str

Namespace.

required
pod_name str

Pod name.

None
app_id str

App ID.

None
force bool

Whether to force delete the app. Defaults to False.

False
Source code in spark_on_k8s/utils/app_manager.py
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def delete_app(
    self, namespace: str, pod_name: str | None = None, app_id: str | None = None, force: bool = False
):
    """Delete an app.

    Args:
        namespace (str): Namespace.
        pod_name (str): Pod name.
        app_id (str): App ID.
        force (bool, optional): Whether to force delete the app. Defaults to False.
    """
    if pod_name is None and app_id is None:
        raise ValueError("Either pod_name or app_id must be specified")
    with self.k8s_client_manager.client() as client:
        api = k8s.CoreV1Api(client)
        if app_id:
            # we don't use `delete_collection_namespaced_pod` to know if the app exists or not
            pods = api.list_namespaced_pod(
                namespace=namespace,
                label_selector=f"spark-app-id={app_id}",
            ).items
            if len(pods) == 0:
                raise ValueError(f"No pods found for app {app_id}")
            pod_name = pods[0].metadata.name
        api.delete_namespaced_pod(
            name=pod_name,
            namespace=namespace,
            body=k8s.V1DeleteOptions(
                grace_period_seconds=0 if force else None,
                propagation_policy="Foreground",
            ),
        )

kill_app(namespace, pod_name=None, app_id=None, should_print=False)

Kill an app.

Parameters:

Name Type Description Default
namespace str

Namespace.

required
pod_name str

Pod name.

None
app_id str

App ID.

None
should_print bool

Whether to print logs instead of logging them.

False
Source code in spark_on_k8s/utils/app_manager.py
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def kill_app(
    self,
    namespace: str,
    pod_name: str | None = None,
    app_id: str | None = None,
    should_print: bool = False,
):
    """Kill an app.

    Args:
        namespace (str): Namespace.
        pod_name (str): Pod name.
        app_id (str): App ID.
        should_print (bool, optional): Whether to print logs instead of logging them.
    """
    if pod_name is None and app_id is None:
        raise ValueError("Either pod_name or app_id must be specified")
    with self.k8s_client_manager.client() as client:
        api = k8s.CoreV1Api(client)
        if pod_name is None:
            pods = api.list_namespaced_pod(
                namespace=namespace,
                label_selector=f"spark-app-id={app_id}",
            ).items
            if len(pods) == 0:
                raise ValueError(f"No pods found for app {app_id}")
            pod = pods[0]
        else:
            pod = api.read_namespaced_pod(
                namespace=namespace,
                name=pod_name,
            )
        container_name = pod.spec.containers[0].name
        if pod.status.phase != "Running":
            self.log(
                f"App is not running, it is {get_app_status(pod).value}",
                level=logging.INFO,
                should_print=should_print,
            )
            return
        stream(
            api.connect_get_namespaced_pod_exec,
            pod.metadata.name,
            namespace,
            command=["/bin/sh", "-c", "kill 1"],
            container=container_name,
            stderr=True,
            stdin=False,
            stdout=True,
            tty=False,
            _preload_content=False,
        )

list_apps(namespace)

List apps.

Parameters:

Name Type Description Default
namespace str

Namespace.

required

Returns:

Type Description
list[str]

list[str]: Spark apps in the namespace.

Source code in spark_on_k8s/utils/app_manager.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
def list_apps(self, namespace: str) -> list[str]:
    """List apps.

    Args:
        namespace (str): Namespace.

    Returns:
        list[str]: Spark apps in the namespace.
    """
    with self.k8s_client_manager.client() as client:
        api = k8s.CoreV1Api(client)
        return [
            pod.metadata.labels["spark-app-id"]
            for pod in api.list_namespaced_pod(
                namespace=namespace,
                label_selector="spark-role=driver",
            ).items
        ]

spark_app_labels(*, app_name, app_id, extra_labels=None) staticmethod

Create labels for a Spark application

Parameters:

Name Type Description Default
app_name str

Name of the Spark application

required
app_id str

ID of the Spark application

required
extra_labels dict[str, str] | None

Dictionary of extra labels to add to the labels

None

Returns:

Type Description
dict[str, str]

Dictionary of labels for the Spark application resources

Source code in spark_on_k8s/utils/app_manager.py
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
@staticmethod
def spark_app_labels(
    *,
    app_name: str,
    app_id: str,
    extra_labels: dict[str, str] | None = None,
) -> dict[str, str]:
    """Create labels for a Spark application

    Args:
        app_name: Name of the Spark application
        app_id: ID of the Spark application
        extra_labels: Dictionary of extra labels to add to the labels

    Returns:
        Dictionary of labels for the Spark application resources
    """
    return {
        "spark-app-name": app_name,
        "spark-app-id": app_id,
        "spark-role": "driver",
        **(extra_labels or {}),
    }

stream_logs(*, namespace, pod_name=None, app_id=None, startup_timeout=0, should_print=False)

Stream logs from a Spark app.

Parameters:

Name Type Description Default
namespace str

Namespace.

required
pod_name str

Pod name.

None
app_id str

App ID.

None
startup_timeout float

Timeout in seconds to wait for the app to start. Defaults to 0 (no timeout).

0
should_print bool

Whether to print logs instead of logging them.

False
Source code in spark_on_k8s/utils/app_manager.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
def stream_logs(
    self,
    *,
    namespace: str,
    pod_name: str | None = None,
    app_id: str | None = None,
    startup_timeout: float = 0,
    should_print: bool = False,
):
    """Stream logs from a Spark app.

    Args:
        namespace (str): Namespace.
        pod_name (str): Pod name.
        app_id (str): App ID.
        startup_timeout (float, optional): Timeout in seconds to wait for the app to start.
            Defaults to 0 (no timeout).
        should_print (bool, optional): Whether to print logs instead of logging them.
    """
    start_time = time.time()
    if pod_name is None and app_id is None:
        raise ValueError("Either pod_name or app_id must be specified")
    if pod_name is None:
        with self.k8s_client_manager.client() as client:
            api = k8s.CoreV1Api(client)
            pods = api.list_namespaced_pod(
                namespace=namespace,
                label_selector=f"spark-app-id={app_id}",
            ).items
            if len(pods) == 0:
                raise ValueError(f"No pods found for app {app_id}")
            pod_name = pods[0].metadata.name
    with self.k8s_client_manager.client() as client:
        api = k8s.CoreV1Api(client)
        while True:
            pod = api.read_namespaced_pod(
                namespace=namespace,
                name=pod_name,
            )
            if pod.status.phase != "Pending":
                break
            if startup_timeout and start_time + startup_timeout < time.time():
                raise TimeoutError("App startup timeout")
            time.sleep(5)
        watcher = watch.Watch()
        for line in watcher.stream(
            api.read_namespaced_pod_log,
            namespace=namespace,
            name=pod_name,
        ):
            self.log(msg=line, level=logging.INFO, should_print=should_print)
        watcher.stop()

wait_for_app(*, namespace, pod_name=None, app_id=None, poll_interval=10, should_print=False, startup_timeout=0)

Wait for a Spark app to finish.

Parameters:

Name Type Description Default
namespace str

Namespace.

required
pod_name str

Pod name.

None
app_id str

App ID.

None
poll_interval float

Poll interval in seconds. Defaults to 10.

10
startup_timeout float

Timeout in seconds to wait for the app to start. Defaults to 0 (no timeout).

0
should_print bool

Whether to print logs instead of logging them.

False
Source code in spark_on_k8s/utils/app_manager.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def wait_for_app(
    self,
    *,
    namespace: str,
    pod_name: str | None = None,
    app_id: str | None = None,
    poll_interval: float = 10,
    should_print: bool = False,
    startup_timeout: float = 0,
):
    """Wait for a Spark app to finish.

    Args:
        namespace (str): Namespace.
        pod_name (str): Pod name.
        app_id (str): App ID.
        poll_interval (float, optional): Poll interval in seconds. Defaults to 10.
        startup_timeout (float, optional): Timeout in seconds to wait for the app to start.
            Defaults to 0 (no timeout).
        should_print (bool, optional): Whether to print logs instead of logging them.
    """
    start_time = time.time()
    termination_statuses = {SparkAppStatus.Succeeded, SparkAppStatus.Failed, SparkAppStatus.Unknown}
    with self.k8s_client_manager.client() as client:
        api = k8s.CoreV1Api(client)
        while True:
            try:
                status = self.app_status(
                    namespace=namespace, pod_name=pod_name, app_id=app_id, client=api
                )
                if status in termination_statuses:
                    break
                if status == SparkAppStatus.Pending:
                    if startup_timeout and start_time + startup_timeout < time.time():
                        raise TimeoutError("App startup timeout")

            except ApiException as e:
                if e.status == 404:
                    self.log(
                        msg=f"Pod {pod_name} was deleted", level=logging.INFO, should_print=should_print
                    )
                    return
            self.log(
                msg=f"Pod {pod_name} status is {status}, sleep {poll_interval}s",
                level=logging.INFO,
                should_print=should_print,
            )
            time.sleep(poll_interval)
        self.log(
            msg=f"Pod {pod_name} finished with status {status.value}",
            level=logging.INFO,
            should_print=should_print,
        )