Source code for airflow.providers.cncf.kubernetes.utils.pod_manager
# Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License."""Launches PODs."""from__future__importannotationsimportenumimportjsonimportmathimporttimefromcollections.abcimportIterablefromcontextlibimportclosing,suppressfromdataclassesimportdataclassfromdatetimeimporttimedeltafromtypingimportTYPE_CHECKING,Callable,Generator,Protocol,castimportpendulumimporttenacityfromdeprecatedimportdeprecatedfromkubernetesimportclient,watchfromkubernetes.client.restimportApiExceptionfromkubernetes.streamimportstreamaskubernetes_streamfrompendulumimportDateTimefrompendulum.parsing.exceptionsimportParserErrorfromtyping_extensionsimportLiteralfromurllib3.exceptionsimportHTTPError,TimeoutErrorfromairflow.exceptionsimportAirflowException,AirflowProviderDeprecationWarningfromairflow.providers.cncf.kubernetes.callbacksimportExecutionMode,KubernetesPodOperatorCallbackfromairflow.providers.cncf.kubernetes.utils.xcom_sidecarimportPodDefaultsfromairflow.utils.log.logging_mixinimportLoggingMixinfromairflow.utils.timezoneimportutcnowifTYPE_CHECKING:fromkubernetes.client.models.core_v1_event_listimportCoreV1EventListfromkubernetes.client.models.v1_container_statusimportV1ContainerStatusfromkubernetes.client.models.v1_podimportV1Podfromurllib3.responseimportHTTPResponseEMPTY_XCOM_RESULT="__airflow_xcom_result_empty__""""Sentinel for no xcom result.:meta private:"""
[docs]classPodLaunchFailedException(AirflowException):"""When pod launching fails in KubernetesPodOperator."""
[docs]defshould_retry_start_pod(exception:BaseException)->bool:"""Check if an Exception indicates a transient error and warrants retrying."""ifisinstance(exception,ApiException):returnstr(exception.status)=="409"returnFalse
[docs]classPodPhase:""" Possible pod phases. See https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase. """
[docs]classPodOperatorHookProtocol(Protocol):""" Protocol to define methods relied upon by KubernetesPodOperator. Subclasses of KubernetesPodOperator, such as GKEStartPodOperator, may use hooks that don't extend KubernetesHook. We use this protocol to document the methods used by KPO and ensure that these methods exist on such other hooks. """@property
[docs]defis_in_cluster(self)->bool:"""Expose whether the hook is configured with ``load_incluster_config`` or not."""
[docs]defget_pod(self,name:str,namespace:str)->V1Pod:"""Read pod object from kubernetes API."""
[docs]defget_namespace(self)->str|None:"""Return the namespace that defined in the connection."""
[docs]defget_xcom_sidecar_container_image(self)->str|None:"""Return the xcom sidecar image that defined in the connection."""
[docs]defget_xcom_sidecar_container_resources(self)->str|None:"""Return the xcom sidecar resources that defined in the connection."""
[docs]defget_container_status(pod:V1Pod,container_name:str)->V1ContainerStatus|None:"""Retrieve container status."""container_statuses=pod.status.container_statusesifpodandpod.statuselseNoneifcontainer_statuses:# In general the variable container_statuses can store multiple items matching different containers.# The following generator expression yields all items that have name equal to the container_name.# The function next() here calls the generator to get only the first value. If there's nothing found# then None is returned.returnnext((xforxincontainer_statusesifx.name==container_name),None)returnNone
[docs]defcontainer_is_running(pod:V1Pod,container_name:str)->bool:""" Examine V1Pod ``pod`` to determine whether ``container_name`` is running. If that container is present and running, returns True. Returns False otherwise. """container_status=get_container_status(pod,container_name)ifnotcontainer_status:returnFalsereturncontainer_status.state.runningisnotNone
[docs]defcontainer_is_completed(pod:V1Pod,container_name:str)->bool:""" Examine V1Pod ``pod`` to determine whether ``container_name`` is completed. If that container is present and completed, returns True. Returns False otherwise. """container_status=get_container_status(pod,container_name)ifnotcontainer_status:returnFalsereturncontainer_status.state.terminatedisnotNone
[docs]defcontainer_is_succeeded(pod:V1Pod,container_name:str)->bool:""" Examine V1Pod ``pod`` to determine whether ``container_name`` is completed and succeeded. If that container is present and completed and succeeded, returns True. Returns False otherwise. """ifnotcontainer_is_completed(pod,container_name):returnFalsecontainer_status=get_container_status(pod,container_name)ifnotcontainer_status:returnFalsereturncontainer_status.state.terminated.exit_code==0
[docs]defcontainer_is_terminated(pod:V1Pod,container_name:str)->bool:""" Examine V1Pod ``pod`` to determine whether ``container_name`` is terminated. If that container is present and terminated, returns True. Returns False otherwise. """container_statuses=pod.status.container_statusesifpodandpod.statuselseNoneifnotcontainer_statuses:returnFalsecontainer_status=next((xforxincontainer_statusesifx.name==container_name),None)ifnotcontainer_status:returnFalsereturncontainer_status.state.terminatedisnotNone
[docs]classPodLaunchTimeoutException(AirflowException):"""When pod does not leave the ``Pending`` phase within specified timeout."""
[docs]classPodNotFoundException(AirflowException):"""Expected pod does not exist in kube-api."""
classPodLogsConsumer:""" Responsible for pulling pod logs from a stream with checking a container status before reading data. This class is a workaround for the issue https://github.com/apache/airflow/issues/23497. :param response: HTTP response with logs :param pod: Pod instance from Kubernetes client :param pod_manager: Pod manager instance :param container_name: Name of the container that we're reading logs from :param post_termination_timeout: (Optional) The period of time in seconds representing for how long time logs are available after the container termination. :param read_pod_cache_timeout: (Optional) The container's status cache lifetime. The container status is cached to reduce API calls. :meta private: """def__init__(self,response:HTTPResponse,pod:V1Pod,pod_manager:PodManager,container_name:str,post_termination_timeout:int=120,read_pod_cache_timeout:int=120,):self.response=responseself.pod=podself.pod_manager=pod_managerself.container_name=container_nameself.post_termination_timeout=post_termination_timeoutself.last_read_pod_at=Noneself.read_pod_cache=Noneself.read_pod_cache_timeout=read_pod_cache_timeoutdef__iter__(self)->Generator[bytes,None,None]:r"""Yield log items divided by the '\n' symbol."""incomplete_log_item:list[bytes]=[]ifself.logs_available():fordata_chunkinself.response.stream(amt=None,decode_content=True):ifb"\n"indata_chunk:log_items=data_chunk.split(b"\n")yield fromself._extract_log_items(incomplete_log_item,log_items)incomplete_log_item=self._save_incomplete_log_item(log_items[-1])else:incomplete_log_item.append(data_chunk)ifnotself.logs_available():breakifincomplete_log_item:yieldb"".join(incomplete_log_item)@staticmethoddef_extract_log_items(incomplete_log_item:list[bytes],log_items:list[bytes]):yieldb"".join(incomplete_log_item)+log_items[0]+b"\n"forxinlog_items[1:-1]:yieldx+b"\n"@staticmethoddef_save_incomplete_log_item(sub_chunk:bytes):return[sub_chunk]if[sub_chunk]else[]deflogs_available(self):remote_pod=self.read_pod()ifcontainer_is_running(pod=remote_pod,container_name=self.container_name):returnTruecontainer_status=get_container_status(pod=remote_pod,container_name=self.container_name)state=container_status.stateifcontainer_statuselseNoneterminated=state.terminatedifstateelseNoneifterminated:termination_time=terminated.finished_atiftermination_time:returntermination_time+timedelta(seconds=self.post_termination_timeout)>utcnow()returnFalsedefread_pod(self):_now=utcnow()if(self.read_pod_cacheisNoneorself.last_read_pod_at+timedelta(seconds=self.read_pod_cache_timeout)<_now):self.read_pod_cache=self.pod_manager.read_pod(self.pod)self.last_read_pod_at=_nowreturnself.read_pod_cache@dataclass
[docs]classPodLoggingStatus:"""Return the status of the pod and last log time when exiting from `fetch_container_logs`."""
[docs]classPodManager(LoggingMixin):"""Create, monitor, and otherwise interact with Kubernetes pods for use with the KubernetesPodOperator."""def__init__(self,kube_client:client.CoreV1Api,callbacks:type[KubernetesPodOperatorCallback]|None=None,progress_callback:Callable[[str],None]|None=None,):""" Create the launcher. :param kube_client: kubernetes client :param callbacks: :param progress_callback: Callback function invoked when fetching container log. This parameter is deprecated, please use ```` """super().__init__()self._client=kube_clientself._progress_callback=progress_callbackself._watch=watch.Watch()self._callbacks=callbacks
[docs]defrun_pod_async(self,pod:V1Pod,**kwargs)->V1Pod:"""Run POD asynchronously."""sanitized_pod=self._client.api_client.sanitize_for_serialization(pod)json_pod=json.dumps(sanitized_pod,indent=2)self.log.debug("Pod Creation Request: \n%s",json_pod)try:resp=self._client.create_namespaced_pod(body=sanitized_pod,namespace=pod.metadata.namespace,**kwargs)self.log.debug("Pod Creation Response: %s",resp)exceptExceptionase:self.log.exception("Exception when attempting to create Namespaced Pod: %s",str(json_pod).replace("\n"," "))raiseereturnresp
[docs]defdelete_pod(self,pod:V1Pod)->None:"""Delete POD."""try:self._client.delete_namespaced_pod(pod.metadata.name,pod.metadata.namespace,body=client.V1DeleteOptions())exceptApiExceptionase:# If the pod is already deletedifstr(e.status)!="404":raise
[docs]defcreate_pod(self,pod:V1Pod)->V1Pod:"""Launch the pod asynchronously."""returnself.run_pod_async(pod)
[docs]defawait_pod_start(self,pod:V1Pod,startup_timeout:int=120,startup_check_interval:int=1)->None:""" Wait for the pod to reach phase other than ``Pending``. :param pod: :param startup_timeout: Timeout (in seconds) for startup of the pod (if pod is pending for too long, fails task) :param startup_check_interval: Interval (in seconds) between checks :return: """curr_time=time.time()whileTrue:remote_pod=self.read_pod(pod)ifremote_pod.status.phase!=PodPhase.PENDING:breakself.log.warning("Pod not yet started: %s",pod.metadata.name)iftime.time()-curr_time>=startup_timeout:msg=(f"Pod took longer than {startup_timeout} seconds to start. ""Check the pod events in kubernetes to determine why.")raisePodLaunchFailedException(msg)time.sleep(startup_check_interval)
@deprecated(reason=("Method `follow_container_logs` is deprecated. Use `fetch_container_logs` instead ""with option `follow=True`."),category=AirflowProviderDeprecationWarning,)
deffetch_container_logs(self,pod:V1Pod,container_name:str,*,follow=False,since_time:DateTime|None=None,post_termination_timeout:int=120,)->PodLoggingStatus:""" Follow the logs of container and stream to airflow logging. Returns when container exits. Between when the pod starts and logs being available, there might be a delay due to CSR not approved and signed yet. In such situation, ApiException is thrown. This is why we are retrying on this specific exception. :meta private: """defconsume_logs(*,since_time:DateTime|None=None)->tuple[DateTime|None,Exception|None]:""" Try to follow container logs until container completes. For a long-running container, sometimes the log read may be interrupted Such errors of this kind are suppressed. Returns the last timestamp observed in logs. """exception=Nonelast_captured_timestamp=None# We timeout connections after 30 minutes because otherwise they can get# stuck forever. The 30 is somewhat arbitrary.# As a consequence, a TimeoutError will be raised no more than 30 minutes# after starting read.connection_timeout=60*30# We set a shorter read timeout because that helps reduce *connection* timeouts# (since the connection will be restarted periodically). And with read timeout,# we don't need to worry about either duplicate messages or losing messages; we# can safely resume from a few seconds laterread_timeout=60*5try:logs=self.read_pod_logs(pod=pod,container_name=container_name,timestamps=True,since_seconds=(math.ceil((pendulum.now()-since_time).total_seconds())ifsince_timeelseNone),follow=follow,post_termination_timeout=post_termination_timeout,_request_timeout=(connection_timeout,read_timeout),)message_to_log=Nonemessage_timestamp=Noneprogress_callback_lines=[]try:forraw_lineinlogs:line=raw_line.decode("utf-8",errors="backslashreplace")line_timestamp,message=self.parse_log_line(line)ifline_timestamp:# detect new log lineifmessage_to_logisNone:# first line in the logmessage_to_log=messagemessage_timestamp=line_timestampprogress_callback_lines.append(line)else:# previous log line is completeforlineinprogress_callback_lines:ifself._progress_callback:self._progress_callback(line)ifself._callbacks:self._callbacks.progress_callback(line=line,client=self._client,mode=ExecutionMode.SYNC)ifmessage_to_logisnotNone:self.log.info("[%s] %s",container_name,message_to_log)last_captured_timestamp=message_timestampmessage_to_log=messagemessage_timestamp=line_timestampprogress_callback_lines=[line]else:# continuation of the previous log linemessage_to_log=f"{message_to_log}\n{message}"progress_callback_lines.append(line)finally:# log the last line and update the last_captured_timestampforlineinprogress_callback_lines:ifself._progress_callback:self._progress_callback(line)ifself._callbacks:self._callbacks.progress_callback(line=line,client=self._client,mode=ExecutionMode.SYNC)ifmessage_to_logisnotNone:self.log.info("[%s] %s",container_name,message_to_log)last_captured_timestamp=message_timestampexceptTimeoutErrorase:# in case of timeout, increment return time by 2 seconds to avoid# duplicate log entriesifval:=(last_captured_timestamporsince_time):returnval.add(seconds=2),eexceptHTTPErrorase:exception=eself.log.exception("Reading of logs interrupted for container %r; will retry.",container_name,)returnlast_captured_timestamporsince_time,exception# note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to# loop as we do here. But in a long-running process we might temporarily lose connectivity.# So the looping logic is there to let us resume following the logs.last_log_time=since_timewhileTrue:last_log_time,exc=consume_logs(since_time=last_log_time)ifnotself.container_is_running(pod,container_name=container_name):returnPodLoggingStatus(running=False,last_log_time=last_log_time)ifnotfollow:returnPodLoggingStatus(running=True,last_log_time=last_log_time)else:# a timeout is a normal thing and we ignore it and resume following logsifnotisinstance(exc,TimeoutError):self.log.warning("Pod %s log read interrupted but container %s still running. Logs generated in the last one second might get duplicated.",pod.metadata.name,container_name,)time.sleep(1)def_reconcile_requested_log_containers(self,requested:Iterable[str]|str|bool,actual:list[str],pod_name)->list[str]:"""Return actual containers based on requested."""containers_to_log=[]ifactual:ifisinstance(requested,str):# fetch logs only for requested container if only one container is providedifrequestedinactual:containers_to_log.append(requested)else:self.log.error("container %s whose logs were requested not found in the pod %s",requested,pod_name,)elifisinstance(requested,bool):# if True is provided, get logs for all the containersifrequestedisTrue:containers_to_log.extend(actual)else:self.log.error("False is not a valid value for container_logs",)else:# if a sequence of containers are provided, iterate for every container in the podifisinstance(requested,Iterable):forcontainerinrequested:ifcontainerinactual:containers_to_log.append(container)else:self.log.error("Container %s whose logs were requests not found in the pod %s",container,pod_name,)else:self.log.error("Invalid type %s specified for container names input parameter",type(requested))else:self.log.error("Could not retrieve containers for the pod: %s",pod_name)returncontainers_to_logdeffetch_requested_container_logs(self,pod:V1Pod,containers:Iterable[str]|str|Literal[True],follow_logs=False)->list[PodLoggingStatus]:""" Follow the logs of containers in the specified pod and publish it to airflow logging. Returns when all the containers exit. :meta private: """pod_logging_statuses=[]all_containers=self.get_container_names(pod)containers_to_log=self._reconcile_requested_log_containers(requested=containers,actual=all_containers,pod_name=pod.metadata.name,)forcincontainers_to_log:status=self.fetch_container_logs(pod=pod,container_name=c,follow=follow_logs)pod_logging_statuses.append(status)returnpod_logging_statuses
[docs]defawait_container_completion(self,pod:V1Pod,container_name:str)->None:""" Wait for the given container in the given pod to be completed. :param pod: pod spec that will be monitored :param container_name: name of the container within the pod to monitor """whileTrue:remote_pod=self.read_pod(pod)terminated=container_is_completed(remote_pod,container_name)ifterminated:breakself.log.info("Waiting for container '%s' state to be completed",container_name)time.sleep(1)
[docs]defawait_pod_completion(self,pod:V1Pod,istio_enabled:bool=False,container_name:str="base")->V1Pod:""" Monitor a pod and return the final state. :param istio_enabled: whether istio is enabled in the namespace :param pod: pod spec that will be monitored :param container_name: name of the container within the pod :return: tuple[State, str | None] """whileTrue:remote_pod=self.read_pod(pod)ifremote_pod.status.phaseinPodPhase.terminal_states:breakifistio_enabledandcontainer_is_completed(remote_pod,container_name):breakself.log.info("Pod %s has phase %s",pod.metadata.name,remote_pod.status.phase)time.sleep(2)returnremote_pod
[docs]defparse_log_line(self,line:str)->tuple[DateTime|None,str]:""" Parse K8s log line and returns the final state. :param line: k8s log line :return: timestamp and log message """timestamp,sep,message=line.strip().partition(" ")ifnotsep:returnNone,linetry:last_log_time=cast(DateTime,pendulum.parse(timestamp))exceptParserError:returnNone,linereturnlast_log_time,message
[docs]defcontainer_is_running(self,pod:V1Pod,container_name:str)->bool:"""Read pod and checks if container is running."""remote_pod=self.read_pod(pod)returncontainer_is_running(pod=remote_pod,container_name=container_name)
[docs]defcontainer_is_terminated(self,pod:V1Pod,container_name:str)->bool:"""Read pod and checks if container is terminated."""remote_pod=self.read_pod(pod)returncontainer_is_terminated(pod=remote_pod,container_name=container_name)
[docs]defread_pod_logs(self,pod:V1Pod,container_name:str,tail_lines:int|None=None,timestamps:bool=False,since_seconds:int|None=None,follow=True,post_termination_timeout:int=120,**kwargs,)->PodLogsConsumer:"""Read log from the POD."""additional_kwargs={}ifsince_seconds:additional_kwargs["since_seconds"]=since_secondsiftail_lines:additional_kwargs["tail_lines"]=tail_linesadditional_kwargs.update(**kwargs)try:logs=self._client.read_namespaced_pod_log(name=pod.metadata.name,namespace=pod.metadata.namespace,container=container_name,follow=follow,timestamps=timestamps,_preload_content=False,**additional_kwargs,)exceptHTTPError:self.log.exception("There was an error reading the kubernetes API.")raisereturnPodLogsConsumer(response=logs,pod=pod,pod_manager=self,container_name=container_name,post_termination_timeout=post_termination_timeout,)
[docs]defget_container_names(self,pod:V1Pod)->list[str]:"""Return container names from the POD except for the airflow-xcom-sidecar container."""pod_info=self.read_pod(pod)return[container_spec.nameforcontainer_specinpod_info.spec.containersifcontainer_spec.name!=PodDefaults.SIDECAR_CONTAINER_NAME]
[docs]defread_pod_events(self,pod:V1Pod)->CoreV1EventList:"""Read events from the POD."""try:returnself._client.list_namespaced_event(namespace=pod.metadata.namespace,field_selector=f"involvedObject.name={pod.metadata.name}")exceptHTTPErrorase:raiseAirflowException(f"There was an error reading the kubernetes API: {e}")
[docs]defread_pod(self,pod:V1Pod)->V1Pod:"""Read POD information."""try:returnself._client.read_namespaced_pod(pod.metadata.name,pod.metadata.namespace)exceptHTTPErrorase:raiseAirflowException(f"There was an error reading the kubernetes API: {e}")
[docs]defawait_xcom_sidecar_container_start(self,pod:V1Pod,timeout:int=900,log_interval:int=30)->None:"""Check if the sidecar container has reached the 'Running' state before performing do_xcom_push."""self.log.info("Checking if xcom sidecar container is started.")start_time=time.time()last_log_time=start_timewhileTrue:elapsed_time=time.time()-start_timeifself.container_is_running(pod,PodDefaults.SIDECAR_CONTAINER_NAME):self.log.info("The xcom sidecar container has started.")breakif(time.time()-last_log_time)>=log_interval:self.log.warning("Still waiting for the xcom sidecar container to start. Elapsed time: %d seconds.",int(elapsed_time),)last_log_time=time.time()ifelapsed_time>timeout:raiseAirflowException(f"Xcom sidecar container did not start within {timeout//60} minutes.")time.sleep(1)
[docs]defextract_xcom(self,pod:V1Pod)->str:"""Retrieve XCom value and kill xcom sidecar container."""try:result=self.extract_xcom_json(pod)returnresultfinally:self.extract_xcom_kill(pod)
[docs]defextract_xcom_json(self,pod:V1Pod)->str:"""Retrieve XCom value and also check if xcom json is valid."""withclosing(kubernetes_stream(self._client.connect_get_namespaced_pod_exec,pod.metadata.name,pod.metadata.namespace,container=PodDefaults.SIDECAR_CONTAINER_NAME,command=["/bin/sh"],stdin=True,stdout=True,stderr=True,tty=False,_preload_content=False,))asresp:result=self._exec_pod_command(resp,f"if [ -s {PodDefaults.XCOM_MOUNT_PATH}/return.json ]; "f"then cat {PodDefaults.XCOM_MOUNT_PATH}/return.json; "f"else echo {EMPTY_XCOM_RESULT}; fi",)ifresultandresult.rstrip()!=EMPTY_XCOM_RESULT:# Note: result string is parsed to check if its valid json.# This function still returns a string which is converted into json in the calling method.json.loads(result)ifresultisNone:raiseAirflowException(f"Failed to extract xcom from pod: {pod.metadata.name}")returnresult