response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Set a state of task instances.
def post_set_task_instances_state(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse: """Set a state of task instances.""" body = get_json_request_dict() try: data = set_task_instance_state_form.load(body) except ValidationError as err: raise BadRequest(detail=str(err.messages)) error_message = f"Dag ID {dag_id} not found" dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: raise NotFound(error_message) task_id = data["task_id"] task = dag.task_dict.get(task_id) if not task: error_message = f"Task ID {task_id} not found" raise NotFound(error_message) execution_date = data.get("execution_date") run_id = data.get("dag_run_id") if ( execution_date and ( session.scalars( select(TI).where( TI.task_id == task_id, TI.dag_id == dag_id, TI.execution_date == execution_date ) ).one_or_none() ) is None ): raise NotFound( detail=f"Task instance not found for task {task_id!r} on execution_date {execution_date}" ) if run_id and not session.get( TI, {"task_id": task_id, "dag_id": dag_id, "run_id": run_id, "map_index": -1} ): error_message = f"Task instance not found for task {task_id!r} on DAG run with ID {run_id!r}" raise NotFound(detail=error_message) tis = dag.set_task_instance_state( task_id=task_id, run_id=run_id, execution_date=execution_date, state=data["new_state"], upstream=data["include_upstream"], downstream=data["include_downstream"], future=data["include_future"], past=data["include_past"], commit=not data["dry_run"], session=session, ) return task_instance_reference_collection_schema.dump(TaskInstanceReferenceCollection(task_instances=tis))
Set the note for a Mapped Task instance.
def set_mapped_task_instance_note( *, dag_id: str, dag_run_id: str, task_id: str, map_index: int ) -> APIResponse: """Set the note for a Mapped Task instance.""" return set_task_instance_note(dag_id=dag_id, dag_run_id=dag_run_id, task_id=task_id, map_index=map_index)
Update the state of a task instance.
def patch_task_instance( *, dag_id: str, dag_run_id: str, task_id: str, map_index: int = -1, session: Session = NEW_SESSION ) -> APIResponse: """Update the state of a task instance.""" body = get_json_request_dict() try: data = set_single_task_instance_state_form.load(body) except ValidationError as err: raise BadRequest(detail=str(err.messages)) dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: raise NotFound("DAG not found", detail=f"DAG {dag_id!r} not found") if not dag.has_task(task_id): raise NotFound("Task not found", detail=f"Task {task_id!r} not found in DAG {dag_id!r}") ti: TI | None = session.get( TI, {"task_id": task_id, "dag_id": dag_id, "run_id": dag_run_id, "map_index": map_index} ) if not ti: error_message = f"Task instance not found for task {task_id!r} on DAG run with ID {dag_run_id!r}" raise NotFound(detail=error_message) if not data["dry_run"]: ti = dag.set_task_instance_state( task_id=task_id, run_id=dag_run_id, map_indexes=[map_index], state=data["new_state"], commit=True, session=session, ) return task_instance_reference_schema.dump(ti)
Update the state of a mapped task instance.
def patch_mapped_task_instance( *, dag_id: str, dag_run_id: str, task_id: str, map_index: int, session: Session = NEW_SESSION ) -> APIResponse: """Update the state of a mapped task instance.""" return patch_task_instance( dag_id=dag_id, dag_run_id=dag_run_id, task_id=task_id, map_index=map_index, session=session )
Set the note for a Task instance. This supports both Mapped and non-Mapped Task instances.
def set_task_instance_note( *, dag_id: str, dag_run_id: str, task_id: str, map_index: int = -1, session: Session = NEW_SESSION ) -> APIResponse: """Set the note for a Task instance. This supports both Mapped and non-Mapped Task instances.""" try: post_body = set_task_instance_note_form_schema.load(get_json_request_dict()) new_note = post_body["note"] except ValidationError as err: raise BadRequest(detail=str(err)) query = ( select(TI) .where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id) .join(TI.dag_run) .outerjoin( SlaMiss, and_( SlaMiss.dag_id == TI.dag_id, SlaMiss.execution_date == DR.execution_date, SlaMiss.task_id == TI.task_id, ), ) .add_columns(SlaMiss) .options(joinedload(TI.rendered_task_instance_fields)) ) if map_index == -1: query = query.where(or_(TI.map_index == -1, TI.map_index is None)) else: query = query.where(TI.map_index == map_index) try: result = session.execute(query).one_or_none() except MultipleResultsFound: raise NotFound( "Task instance not found", detail="Task instance is mapped, add the map_index value to the URL" ) if result is None: error_message = f"Task Instance not found for dag_id={dag_id}, run_id={dag_run_id}, task_id={task_id}" raise NotFound(error_message) ti, sla_miss = result current_user_id = get_auth_manager().get_user_id() if ti.task_instance_note is None: ti.note = (new_note, current_user_id) else: ti.task_instance_note.content = new_note ti.task_instance_note.user_id = current_user_id session.commit() return task_instance_schema.dump((ti, sla_miss))
Delete variable.
def delete_variable(*, variable_key: str) -> Response: """Delete variable.""" if Variable.delete(variable_key) == 0: raise NotFound("Variable not found") return Response(status=HTTPStatus.NO_CONTENT)
Get a variable by key.
def get_variable(*, variable_key: str, session: Session = NEW_SESSION) -> Response: """Get a variable by key.""" var = session.scalar(select(Variable).where(Variable.key == variable_key).limit(1)) if not var: raise NotFound("Variable not found", detail="Variable does not exist") return variable_schema.dump(var)
Get all variable values.
def get_variables( *, limit: int | None, order_by: str = "id", offset: int | None = None, session: Session = NEW_SESSION, ) -> Response: """Get all variable values.""" total_entries = session.execute(select(func.count(Variable.id))).scalar() to_replace = {"value": "val"} allowed_sort_attrs = ["value", "key", "id"] query = select(Variable) query = apply_sorting(query, order_by, to_replace, allowed_sort_attrs) variables = session.scalars(query.offset(offset).limit(limit)).all() return variable_collection_schema.dump( { "variables": variables, "total_entries": total_entries, } )
Update a variable by key.
def patch_variable( *, variable_key: str, update_mask: UpdateMask = None, session: Session = NEW_SESSION, ) -> Response: """Update a variable by key.""" try: data = variable_schema.load(get_json_request_dict()) except ValidationError as err: raise BadRequest("Invalid Variable schema", detail=str(err.messages)) if data["key"] != variable_key: raise BadRequest("Invalid post body", detail="key from request body doesn't match uri parameter") non_update_fields = ["key"] variable = session.scalar(select(Variable).filter_by(key=variable_key).limit(1)) if not variable: raise NotFound("Variable not found", detail="Variable does not exist") if update_mask: data = extract_update_mask_data(update_mask, non_update_fields, data) for key, val in data.items(): setattr(variable, key, val) session.add(variable) return variable_schema.dump(variable)
Create a variable.
def post_variables() -> Response: """Create a variable.""" try: data = variable_schema.load(get_json_request_dict()) except ValidationError as err: raise BadRequest("Invalid Variable schema", detail=str(err.messages)) Variable.set(data["key"], data["val"], description=data.get("description", None)) return variable_schema.dump(data)
Get version information.
def get_version() -> APIResponse: """Get version information.""" airflow_version = airflow.__version__ git_version = get_airflow_git_version() version_info = VersionInfo(version=airflow_version, git_version=git_version) return version_info_schema.dump(version_info)
Get all XCom values.
def get_xcom_entries( *, dag_id: str, dag_run_id: str, task_id: str, map_index: int | None = None, xcom_key: str | None = None, limit: int | None, offset: int | None = None, session: Session = NEW_SESSION, ) -> APIResponse: """Get all XCom values.""" query = select(XCom) if dag_id == "~": readable_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"], user=g.user) query = query.where(XCom.dag_id.in_(readable_dag_ids)) query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id)) else: query = query.where(XCom.dag_id == dag_id) query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id)) if task_id != "~": query = query.where(XCom.task_id == task_id) if dag_run_id != "~": query = query.where(DR.run_id == dag_run_id) if map_index is not None: query = query.where(XCom.map_index == map_index) if xcom_key is not None: query = query.where(XCom.key == xcom_key) # Match idx_xcom_task_instance + idx_xcom_key for performance. query = query.order_by(XCom.dag_id, XCom.task_id, XCom.run_id, XCom.map_index, XCom.key) total_entries = get_query_count(query, session=session) query = session.scalars(query.offset(offset).limit(limit)) return xcom_collection_schema.dump(XComCollection(xcom_entries=query, total_entries=total_entries))
Get an XCom entry.
def get_xcom_entry( *, dag_id: str, task_id: str, dag_run_id: str, xcom_key: str, map_index: int = -1, deserialize: bool = False, session: Session = NEW_SESSION, ) -> APIResponse: """Get an XCom entry.""" if deserialize: if not conf.getboolean("api", "enable_xcom_deserialize_support", fallback=False): raise BadRequest(detail="XCom deserialization is disabled in configuration.") query = select(XCom, XCom.value) else: query = select(XCom) query = query.where( XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.key == xcom_key, XCom.map_index == map_index ) query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id)) query = query.where(DR.run_id == dag_run_id) if deserialize: item = session.execute(query).one_or_none() else: item = session.scalars(query).one_or_none() if item is None: raise NotFound("XCom entry not found") if deserialize: xcom, value = item stub = copy.copy(xcom) stub.value = value stub.value = XCom.deserialize_value(stub) item = stub return xcom_schema.dump(item)
Set process title. This is used by airflow.cli.commands.internal_api_command to track the status of the worker.
def post_worker_init(_): """ Set process title. This is used by airflow.cli.commands.internal_api_command to track the status of the worker. """ old_title = setproctitle.getproctitle() setproctitle.setproctitle(settings.GUNICORN_WORKER_READY_PREFIX + old_title)
Allow methods to be executed in database isolation mode. If [core]database_access_isolation is true then such method are not executed locally, but instead RPC call is made to Database API (aka Internal API). This makes some components decouple from direct Airflow database access. Each decorated method must be present in METHODS list in airflow.api_internal.endpoints.rpc_api_endpoint. Only static methods can be decorated. This decorator must be before "provide_session". See [AIP-44](https://cwiki.apache.org/confluence/display/AIRFLOW/AIP-44+Airflow+Internal+API) for more information .
def internal_api_call(func: Callable[PS, RT]) -> Callable[PS, RT]: """ Allow methods to be executed in database isolation mode. If [core]database_access_isolation is true then such method are not executed locally, but instead RPC call is made to Database API (aka Internal API). This makes some components decouple from direct Airflow database access. Each decorated method must be present in METHODS list in airflow.api_internal.endpoints.rpc_api_endpoint. Only static methods can be decorated. This decorator must be before "provide_session". See [AIP-44](https://cwiki.apache.org/confluence/display/AIRFLOW/AIP-44+Airflow+Internal+API) for more information . """ headers = { "Content-Type": "application/json", } from requests.exceptions import ConnectionError @tenacity.retry( stop=tenacity.stop_after_attempt(10), wait=tenacity.wait_exponential(min=1), retry=tenacity.retry_if_exception_type((NewConnectionError, ConnectionError)), before_sleep=tenacity.before_log(logger, logging.WARNING), ) def make_jsonrpc_request(method_name: str, params_json: str) -> bytes: data = {"jsonrpc": "2.0", "method": method_name, "params": params_json} internal_api_endpoint = InternalApiConfig.get_internal_api_endpoint() response = requests.post(url=internal_api_endpoint, data=json.dumps(data), headers=headers) if response.status_code != 200: raise AirflowException( f"Got {response.status_code}:{response.reason} when sending " f"the internal api request: {response.text}" ) return response.content @wraps(func) def wrapper(*args, **kwargs): use_internal_api = InternalApiConfig.get_use_internal_api() if not use_internal_api: return func(*args, **kwargs) from airflow.serialization.serialized_objects import BaseSerialization # avoid circular import bound = inspect.signature(func).bind(*args, **kwargs) arguments_dict = dict(bound.arguments) if "session" in arguments_dict: del arguments_dict["session"] if "cls" in arguments_dict: # used by @classmethod del arguments_dict["cls"] args_dict = BaseSerialization.serialize(arguments_dict, use_pydantic_models=True) method_name = f"{func.__module__}.{func.__qualname__}" result = make_jsonrpc_request(method_name, args_dict) if result is None or result == b"": return None return BaseSerialization.deserialize(json.loads(result), use_pydantic_models=True) return wrapper
Handle Internal API /internal_api/v1/rpcapi endpoint.
def internal_airflow_api(body: dict[str, Any]) -> APIResponse: """Handle Internal API /internal_api/v1/rpcapi endpoint.""" log.debug("Got request") json_rpc = body.get("jsonrpc") if json_rpc != "2.0": return log_and_build_error_response(message="Expected jsonrpc 2.0 request.", status=400) methods_map = _initialize_map() method_name = body.get("method") if method_name not in methods_map: return log_and_build_error_response(message=f"Unrecognized method: {method_name}.", status=400) handler = methods_map[method_name] params = {} try: if body.get("params"): params_json = body.get("params") params = BaseSerialization.deserialize(params_json, use_pydantic_models=True) except Exception: return log_and_build_error_response(message="Error deserializing parameters.", status=400) log.debug("Calling method %s\nparams: %s", method_name, params) try: # Session must be created there as it may be needed by serializer for lazy-loaded fields. with create_session() as session: output = handler(**params, session=session) output_json = BaseSerialization.serialize(output, use_pydantic_models=True) response = json.dumps(output_json) if output_json is not None else None return Response(response=response, headers={"Content-Type": "application/json"}) except Exception: return log_and_build_error_response(message=f"Error executing method '{method_name}'.", status=500)
Return the map associating a method to a FAB action.
def get_fab_action_from_method_map(): """Return the map associating a method to a FAB action.""" return _MAP_METHOD_NAME_TO_FAB_ACTION_NAME
Return the map associating a FAB action to a method.
def get_method_from_fab_action_map(): """Return the map associating a FAB action to a method.""" return { **{v: k for k, v in _MAP_METHOD_NAME_TO_FAB_ACTION_NAME.items()}, }
Create a lazy loader for command.
def lazy_load_command(import_path: str) -> Callable: """Create a lazy loader for command.""" _, _, name = import_path.rpartition(".") def command(*args, **kwargs): func = import_string(import_path) return func(*args, **kwargs) command.__name__ = name return command
Define a positive int type for an argument.
def positive_int(*, allow_zero): """Define a positive int type for an argument.""" def _check(value): try: value = int(value) if allow_zero and value == 0: return value if value > 0: return value except ValueError: pass raise argparse.ArgumentTypeError(f"invalid positive int value: '{value}'") return _check
Parse comma-separated list and returns list of string (strips whitespace).
def string_list_type(val): """Parse comma-separated list and returns list of string (strips whitespace).""" return [x.strip() for x in val.split(",")]
Lower arg.
def string_lower_type(val): """Lower arg.""" if not val: return return val.strip().lower()
Create and returns command line argument parser.
def get_parser(dag_parser: bool = False) -> argparse.ArgumentParser: """Create and returns command line argument parser.""" parser = DefaultHelpParser(prog="airflow", formatter_class=AirflowHelpFormatter) subparsers = parser.add_subparsers(dest="subcommand", metavar="GROUP_OR_COMMAND") subparsers.required = True command_dict = DAG_CLI_DICT if dag_parser else ALL_COMMANDS_DICT for _, sub in sorted(command_dict.items()): _add_command(subparsers, sub) return parser
Sort subcommand optional args, keep positional args.
def _sort_args(args: Iterable[Arg]) -> Iterable[Arg]: """Sort subcommand optional args, keep positional args.""" def get_long_option(arg: Arg): """Get long option from Arg.flags.""" return arg.flags[0] if len(arg.flags) == 1 else arg.flags[1] positional, optional = partition(lambda x: x.flags[0].startswith("-"), args) yield from positional yield from sorted(optional, key=lambda x: get_long_option(x).lower())
Check whether a file IO is stdout. The intended use case for this helper is to check whether an argument parsed with argparse.FileType points to stdout (by setting the path to ``-``). This is why there is no equivalent for stderr; argparse does not allow using it. .. warning:: *fileio* must be open for this check to be successful.
def is_stdout(fileio: IOBase) -> bool: """Check whether a file IO is stdout. The intended use case for this helper is to check whether an argument parsed with argparse.FileType points to stdout (by setting the path to ``-``). This is why there is no equivalent for stderr; argparse does not allow using it. .. warning:: *fileio* must be open for this check to be successful. """ return fileio.fileno() == sys.stdout.fileno()
Start Flower, Celery monitoring tool.
def flower(args): """Start Flower, Celery monitoring tool.""" # This needs to be imported locally to not trigger Providers Manager initialization from airflow.providers.celery.executors.celery_executor import app as celery_app options = [ "flower", conf.get("celery", "BROKER_URL"), f"--address={args.hostname}", f"--port={args.port}", ] if args.broker_api: options.append(f"--broker-api={args.broker_api}") if args.url_prefix: options.append(f"--url-prefix={args.url_prefix}") if args.basic_auth: options.append(f"--basic-auth={args.basic_auth}") if args.flower_conf: options.append(f"--conf={args.flower_conf}") run_command_with_daemon_option( args=args, process_name="flower", callback=lambda: celery_app.start(options) )
Start serve_logs sub-process.
def _serve_logs(skip_serve_logs: bool = False): """Start serve_logs sub-process.""" sub_proc = None if skip_serve_logs is False: sub_proc = Process(target=serve_logs) sub_proc.start() try: yield finally: if sub_proc: sub_proc.terminate()
Reconfigure the logger. * remove any previously configured handlers * logs of severity error, and above goes to stderr, * logs of severity lower than error goes to stdout.
def logger_setup_handler(logger, **kwargs): """ Reconfigure the logger. * remove any previously configured handlers * logs of severity error, and above goes to stderr, * logs of severity lower than error goes to stdout. """ if conf.getboolean("logging", "celery_stdout_stderr_separation", fallback=False): celery_formatter = logging.Formatter(DEFAULT_TASK_LOG_FMT) class NoErrorOrAboveFilter(logging.Filter): """Allow only logs with level *lower* than ERROR to be reported.""" def filter(self, record): return record.levelno < logging.ERROR below_error_handler = logging.StreamHandler(sys.stdout) below_error_handler.addFilter(NoErrorOrAboveFilter()) below_error_handler.setFormatter(celery_formatter) from_error_handler = logging.StreamHandler(sys.stderr) from_error_handler.setLevel(logging.ERROR) from_error_handler.setFormatter(celery_formatter) logger.handlers[:] = [below_error_handler, from_error_handler]
Start Airflow Celery worker.
def worker(args): """Start Airflow Celery worker.""" # This needs to be imported locally to not trigger Providers Manager initialization from airflow.providers.celery.executors.celery_executor import app as celery_app # Disable connection pool so that celery worker does not hold an unnecessary db connection settings.reconfigure_orm(disable_connection_pool=True) if not settings.validate_session(): raise SystemExit("Worker exiting, database connection precheck failed.") autoscale = args.autoscale skip_serve_logs = args.skip_serve_logs if autoscale is None and conf.has_option("celery", "worker_autoscale"): autoscale = conf.get("celery", "worker_autoscale") if hasattr(celery_app.backend, "ResultSession"): # Pre-create the database tables now, otherwise SQLA via Celery has a # race condition where one of the subprocesses can die with "Table # already exists" error, because SQLA checks for which tables exist, # then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT # EXISTS try: session = celery_app.backend.ResultSession() session.close() except sqlalchemy.exc.IntegrityError: # At least on postgres, trying to create a table that already exist # gives a unique constraint violation or the # "pg_type_typname_nsp_index" table. If this happens we can ignore # it, we raced to create the tables and lost. pass # backwards-compatible: https://github.com/apache/airflow/pull/21506#pullrequestreview-879893763 celery_log_level = conf.get("logging", "CELERY_LOGGING_LEVEL") if not celery_log_level: celery_log_level = conf.get("logging", "LOGGING_LEVEL") # Setup Celery worker options = [ "worker", "-O", "fair", "--queues", args.queues, "--concurrency", args.concurrency, "--hostname", args.celery_hostname, "--loglevel", celery_log_level, ] if autoscale: options.extend(["--autoscale", autoscale]) if args.without_mingle: options.append("--without-mingle") if args.without_gossip: options.append("--without-gossip") if conf.has_option("celery", "pool"): pool = conf.get("celery", "pool") options.extend(["--pool", pool]) # Celery pools of type eventlet and gevent use greenlets, which # requires monkey patching the app: # https://eventlet.net/doc/patching.html#monkey-patch # Otherwise task instances hang on the workers and are never # executed. maybe_patch_concurrency(["-P", pool]) worker_pid_file_path, stdout, stderr, log_file = setup_locations( process=WORKER_PROCESS_NAME, stdout=args.stdout, stderr=args.stderr, log=args.log_file, pid=args.pid, ) def run_celery_worker(): with _serve_logs(skip_serve_logs): celery_app.worker_main(options) if args.umask: umask = args.umask else: umask = conf.get("celery", "worker_umask", fallback=settings.DAEMON_UMASK) run_command_with_daemon_option( args=args, process_name=WORKER_PROCESS_NAME, callback=run_celery_worker, should_setup_logging=True, umask=umask, pid_file=worker_pid_file_path, )
Send SIGTERM to Celery worker.
def stop_worker(args): """Send SIGTERM to Celery worker.""" # Read PID from file if args.pid: pid_file_path = args.pid else: pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME) pid = read_pid_from_pidfile(pid_file_path) # Send SIGTERM if pid: worker_process = psutil.Process(pid) worker_process.terminate() # Remove pid file remove_existing_pidfile(pid_file_path)
Display cheat-sheet.
def cheat_sheet(args): """Display cheat-sheet.""" display_commands_index()
Display list of all commands.
def display_commands_index(): """Display list of all commands.""" def display_recursive( prefix: list[str], commands: Iterable[GroupCommand | ActionCommand], help_msg: str | None = None, ): actions: list[ActionCommand] = [] groups: list[GroupCommand] = [] for command in commands: if isinstance(command, GroupCommand): groups.append(command) else: actions.append(command) console = AirflowConsole() if actions: table = SimpleTable(title=help_msg or "Miscellaneous commands") table.add_column(width=46) table.add_column() for action_command in sorted(actions, key=lambda d: d.name): table.add_row(" ".join([*prefix, action_command.name]), action_command.help) console.print(table) if groups: for group_command in sorted(groups, key=lambda d: d.name): group_prefix = [*prefix, group_command.name] display_recursive(group_prefix, group_command.subcommands, group_command.help) display_recursive(["airflow"], airflow_commands)
Show current application configuration.
def show_config(args): """Show current application configuration.""" with StringIO() as output: conf.write( output, section=args.section, include_examples=args.include_examples or args.defaults, include_descriptions=args.include_descriptions or args.defaults, include_sources=args.include_sources and not args.defaults, include_env_vars=args.include_env_vars or args.defaults, include_providers=not args.exclude_providers, comment_out_everything=args.comment_out_everything or args.defaults, only_defaults=args.defaults, ) code = output.getvalue() if should_use_colors(args): code = pygments.highlight(code=code, formatter=get_terminal_formatter(), lexer=IniLexer()) print(code)
Get one value from configuration.
def get_value(args): """Get one value from configuration.""" # while this will make get_value quite a bit slower we must initialize configuration # for providers because we do not know what sections and options will be available after # providers are initialized. Theoretically Providers might add new sections and options # but also override defaults for existing options, so without loading all providers we # cannot be sure what is the final value of the option. if not conf.has_option(args.section, args.option): raise SystemExit(f"The option [{args.section}/{args.option}] is not found in config.") value = conf.get(args.section, args.option) print(value)
Get a connection.
def connections_get(args): """Get a connection.""" try: conn = BaseHook.get_connection(args.conn_id) except AirflowNotFoundException: raise SystemExit("Connection not found.") AirflowConsole().print_as( data=[conn], output=args.output, mapper=_connection_mapper, )
List all connections at the command line.
def connections_list(args): """List all connections at the command line.""" with create_session() as session: query = select(Connection) if args.conn_id: query = query.where(Connection.conn_id == args.conn_id) query = session.scalars(query) conns = query.all() AirflowConsole().print_as( data=conns, output=args.output, mapper=_connection_mapper, )
Check if a URI is valid, by checking if scheme (conn_type) provided.
def _valid_uri(uri: str) -> bool: """Check if a URI is valid, by checking if scheme (conn_type) provided.""" return urlsplit(uri).scheme != ""
Return connection types available.
def _get_connection_types() -> list[str]: """Return connection types available.""" _connection_types = [] providers_manager = ProvidersManager() for connection_type, provider_info in providers_manager.hooks.items(): if provider_info: _connection_types.append(connection_type) return _connection_types
Export all connections to a file.
def connections_export(args): """Export all connections to a file.""" file_formats = [".yaml", ".json", ".env"] if args.format: warnings.warn( "Option `--format` is deprecated. Use `--file-format` instead.", DeprecationWarning, stacklevel=3 ) if args.format and args.file_format: raise SystemExit("Option `--format` is deprecated. Use `--file-format` instead.") default_format = ".json" provided_file_format = None if args.format or args.file_format: provided_file_format = f".{(args.format or args.file_format).lower()}" with args.file as f: if is_stdout(f): filetype = provided_file_format or default_format elif provided_file_format: filetype = provided_file_format else: filetype = Path(args.file.name).suffix.lower() if filetype not in file_formats: raise SystemExit( f"Unsupported file format. The file must have the extension {', '.join(file_formats)}." ) if args.serialization_format and filetype != ".env": raise SystemExit("Option `--serialization-format` may only be used with file type `env`.") with create_session() as session: connections = session.scalars(select(Connection).order_by(Connection.conn_id)).all() msg = _format_connections( conns=connections, file_format=filetype, serialization_format=args.serialization_format or "uri", ) f.write(msg) print_export_output("Connections", connections, f)
Add new connection.
def connections_add(args): """Add new connection.""" has_uri = bool(args.conn_uri) has_json = bool(args.conn_json) has_type = bool(args.conn_type) # Validate connection-id try: helpers.validate_key(args.conn_id, max_length=200) except Exception as e: raise SystemExit(f"Could not create connection. {e}") if not has_type and not (has_json or has_uri): raise SystemExit("Must supply either conn-uri or conn-json if not supplying conn-type") if has_json and has_uri: raise SystemExit("Cannot supply both conn-uri and conn-json") if has_type and args.conn_type not in _get_connection_types(): warnings.warn( f"The type provided to --conn-type is invalid: {args.conn_type}", UserWarning, stacklevel=4 ) warnings.warn( f"Supported --conn-types are:{_get_connection_types()}." "Hence overriding the conn-type with generic", UserWarning, stacklevel=4, ) args.conn_type = "generic" if has_uri or has_json: invalid_args = [] if has_uri and not _valid_uri(args.conn_uri): raise SystemExit(f"The URI provided to --conn-uri is invalid: {args.conn_uri}") for arg in alternative_conn_specs: if getattr(args, arg) is not None: invalid_args.append(arg) if has_json and args.conn_extra: invalid_args.append("--conn-extra") if invalid_args: raise SystemExit( "The following args are not compatible with " f"the --conn-{'uri' if has_uri else 'json'} flag: {invalid_args!r}" ) if args.conn_uri: new_conn = Connection(conn_id=args.conn_id, description=args.conn_description, uri=args.conn_uri) if args.conn_extra is not None: new_conn.set_extra(args.conn_extra) elif args.conn_json: new_conn = Connection.from_json(conn_id=args.conn_id, value=args.conn_json) if not new_conn.conn_type: raise SystemExit("conn-json is invalid; must supply conn-type") else: new_conn = Connection( conn_id=args.conn_id, conn_type=args.conn_type, description=args.conn_description, host=args.conn_host, login=args.conn_login, password=args.conn_password, schema=args.conn_schema, port=args.conn_port, ) if args.conn_extra is not None: new_conn.set_extra(args.conn_extra) with create_session() as session: if not session.scalar(select(Connection).where(Connection.conn_id == new_conn.conn_id).limit(1)): session.add(new_conn) msg = "Successfully added `conn_id`={conn_id} : {uri}" msg = msg.format( conn_id=new_conn.conn_id, uri=args.conn_uri or urlunsplit( ( new_conn.conn_type, f"{new_conn.login or ''}:{'******' if new_conn.password else ''}" f"@{new_conn.host or ''}:{new_conn.port or ''}", new_conn.schema or "", "", "", ) ), ) print(msg) else: msg = f"A connection with `conn_id`={new_conn.conn_id} already exists." raise SystemExit(msg)
Delete connection from DB.
def connections_delete(args): """Delete connection from DB.""" with create_session() as session: try: to_delete = session.scalars(select(Connection).where(Connection.conn_id == args.conn_id)).one() except exc.NoResultFound: raise SystemExit(f"Did not find a connection with `conn_id`={args.conn_id}") except exc.MultipleResultsFound: raise SystemExit(f"Found more than one connection with `conn_id`={args.conn_id}") else: session.delete(to_delete) print(f"Successfully deleted connection with `conn_id`={to_delete.conn_id}")
Import connections from a file.
def connections_import(args): """Import connections from a file.""" if os.path.exists(args.file): _import_helper(args.file, args.overwrite) else: raise SystemExit("Missing connections file.")
Load connections from a file and save them to the DB. :param overwrite: Whether to skip or overwrite on collision.
def _import_helper(file_path: str, overwrite: bool) -> None: """Load connections from a file and save them to the DB. :param overwrite: Whether to skip or overwrite on collision. """ connections_dict = load_connections_dict(file_path) with create_session() as session: for conn_id, conn in connections_dict.items(): try: helpers.validate_key(conn_id, max_length=200) except Exception as e: print(f"Could not import connection. {e}") continue existing_conn_id = session.scalar(select(Connection.id).where(Connection.conn_id == conn_id)) if existing_conn_id is not None: if not overwrite: print(f"Could not import connection {conn_id}: connection already exists.") continue # The conn_ids match, but the PK of the new entry must also be the same as the old conn.id = existing_conn_id session.merge(conn) session.commit() print(f"Imported connection {conn_id}")
Test an Airflow connection.
def connections_test(args) -> None: """Test an Airflow connection.""" console = AirflowConsole() if conf.get("core", "test_connection", fallback="Disabled").lower().strip() != "enabled": console.print( "[bold yellow]\nTesting connections is disabled in Airflow configuration. " "Contact your deployment admin to enable it.\n" ) raise SystemExit(1) print(f"Retrieving connection: {args.conn_id!r}") try: conn = BaseHook.get_connection(args.conn_id) except AirflowNotFoundException: console.print("[bold yellow]\nConnection not found.\n") raise SystemExit(1) print("\nTesting...") status, message = conn.test_connection() if status is True: console.print("[bold green]\nConnection success!\n") else: console.print(f"[bold][red]\nConnection failed![/bold]\n{message}\n")
Run the command in a daemon process if daemon mode enabled or within this process if not. :param args: the set of arguments passed to the original CLI command :param process_name: process name used in naming log and PID files for the daemon :param callback: the actual command to run with or without daemon context :param should_setup_logging: if true, then a log file handler for the daemon process will be created :param umask: file access creation mask ("umask") to set for the process on daemon start :param pid_file: if specified, this file path us used to store daemon process PID. If not specified, a file path is generated with the default pattern.
def run_command_with_daemon_option( *, args: Namespace, process_name: str, callback: Callable, should_setup_logging: bool = False, umask: str = settings.DAEMON_UMASK, pid_file: str | None = None, ): """Run the command in a daemon process if daemon mode enabled or within this process if not. :param args: the set of arguments passed to the original CLI command :param process_name: process name used in naming log and PID files for the daemon :param callback: the actual command to run with or without daemon context :param should_setup_logging: if true, then a log file handler for the daemon process will be created :param umask: file access creation mask ("umask") to set for the process on daemon start :param pid_file: if specified, this file path us used to store daemon process PID. If not specified, a file path is generated with the default pattern. """ if args.daemon: pid = pid_file or args.pid if pid_file is not None or args.pid is not None else None pid, stdout, stderr, log_file = setup_locations( process=process_name, pid=pid, stdout=args.stdout, stderr=args.stderr, log=args.log_file ) # Check if the process is already running; if not but a pidfile exists, clean it up check_if_pidfile_process_is_running(pid_file=pid, process_name=process_name) if should_setup_logging: files_preserve = [setup_logging(log_file)] else: files_preserve = None with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle: stdout_handle.truncate(0) stderr_handle.truncate(0) ctx = daemon.DaemonContext( pidfile=TimeoutPIDLockFile(pid, -1), files_preserve=files_preserve, stdout=stdout_handle, stderr=stderr_handle, umask=int(umask, 8), ) with ctx: # in daemon context stats client needs to be reinitialized. from airflow.stats import Stats Stats.instance = None callback() else: signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigint_handler) signal.signal(signal.SIGQUIT, sigquit_handler) callback()
Create backfill job or dry run for a DAG or list of DAGs using regex.
def dag_backfill(args, dag: list[DAG] | DAG | None = None) -> None: """Create backfill job or dry run for a DAG or list of DAGs using regex.""" logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT) signal.signal(signal.SIGTERM, sigint_handler) if args.ignore_first_depends_on_past: warnings.warn( "--ignore-first-depends-on-past is deprecated as the value is always set to True", category=RemovedInAirflow3Warning, stacklevel=4, ) args.ignore_first_depends_on_past = True if not args.treat_dag_id_as_regex and args.treat_dag_as_regex: warnings.warn( "--treat-dag-as-regex is deprecated, use --treat-dag-id-as-regex instead", category=RemovedInAirflow3Warning, stacklevel=4, ) args.treat_dag_id_as_regex = args.treat_dag_as_regex if not args.start_date and not args.end_date: raise AirflowException("Provide a start_date and/or end_date") if not dag: dags = get_dags(args.subdir, dag_id=args.dag_id, use_regex=args.treat_dag_id_as_regex) elif isinstance(dag, list): dags = dag else: dags = [dag] del dag dags.sort(key=lambda d: d.dag_id) _run_dag_backfill(dags, args) if len(dags) > 1: log.info("All of the backfills are done.")
Create a dag run for the specified dag.
def dag_trigger(args) -> None: """Create a dag run for the specified dag.""" api_client = get_current_api_client() try: message = api_client.trigger_dag( dag_id=args.dag_id, run_id=args.run_id, conf=args.conf, execution_date=args.exec_date, replace_microseconds=args.replace_microseconds, ) AirflowConsole().print_as( data=[message] if message is not None else [], output=args.output, ) except OSError as err: raise AirflowException(err)
Delete all DB records related to the specified dag.
def dag_delete(args) -> None: """Delete all DB records related to the specified dag.""" api_client = get_current_api_client() if ( args.yes or input("This will drop all existing records related to the specified DAG. Proceed? (y/n)").upper() == "Y" ): try: message = api_client.delete_dag(dag_id=args.dag_id) print(message) except OSError as err: raise AirflowException(err) else: print("Cancelled")
Pauses a DAG.
def dag_pause(args) -> None: """Pauses a DAG.""" set_is_paused(True, args)
Unpauses a DAG.
def dag_unpause(args) -> None: """Unpauses a DAG.""" set_is_paused(False, args)
Set is_paused for DAG by a given dag_id.
def set_is_paused(is_paused: bool, args) -> None: """Set is_paused for DAG by a given dag_id.""" should_apply = True dags = [ dag for dag in get_dags(args.subdir, dag_id=args.dag_id, use_regex=args.treat_dag_id_as_regex) if is_paused != dag.get_is_paused() ] if not dags: print(f"No {'un' if is_paused else ''}paused DAGs were found") return if not args.yes and args.treat_dag_id_as_regex: dags_ids = [dag.dag_id for dag in dags] question = ( f"You are about to {'un' if not is_paused else ''}pause {len(dags_ids)} DAGs:\n" f"{','.join(dags_ids)}" f"\n\nAre you sure? [y/n]" ) should_apply = ask_yesno(question) if should_apply: dags_models = [DagModel.get_dagmodel(dag.dag_id) for dag in dags] for dag_model in dags_models: if dag_model is not None: dag_model.set_is_paused(is_paused=is_paused) AirflowConsole().print_as( data=[ {"dag_id": dag.dag_id, "is_paused": dag.get_is_paused()} for dag in dags_models if dag is not None ], output=args.output, ) else: print("Operation cancelled by user")
Display DAG dependencies, save to file or show as imgcat image.
def dag_dependencies_show(args) -> None: """Display DAG dependencies, save to file or show as imgcat image.""" dot = render_dag_dependencies(SerializedDagModel.get_dag_dependencies()) filename = args.save imgcat = args.imgcat if filename and imgcat: raise SystemExit( "Option --save and --imgcat are mutually exclusive. " "Please remove one option to execute the command.", ) elif filename: _save_dot_to_file(dot, filename) elif imgcat: _display_dot_via_imgcat(dot) else: print(dot.source)
Display DAG or saves its graphic representation to the file.
def dag_show(args) -> None: """Display DAG or saves its graphic representation to the file.""" dag = get_dag(args.subdir, args.dag_id) dot = render_dag(dag) filename = args.save imgcat = args.imgcat if filename and imgcat: raise SystemExit( "Option --save and --imgcat are mutually exclusive. " "Please remove one option to execute the command.", ) elif filename: _save_dot_to_file(dot, filename) elif imgcat: _display_dot_via_imgcat(dot) else: print(dot.source)
Return a dagbag dag details dict.
def _get_dagbag_dag_details(dag: DAG) -> dict: """Return a dagbag dag details dict.""" return { "dag_id": dag.dag_id, "dag_display_name": dag.dag_display_name, "root_dag_id": dag.parent_dag.dag_id if dag.parent_dag else None, "is_paused": dag.get_is_paused(), "is_active": dag.get_is_active(), "is_subdag": dag.is_subdag, "last_parsed_time": None, "last_pickled": None, "last_expired": None, "scheduler_lock": None, "pickle_id": dag.pickle_id, "default_view": dag.default_view, "fileloc": dag.fileloc, "file_token": None, "owners": dag.owner, "description": dag.description, "schedule_interval": dag.schedule_interval, "timetable_description": dag.timetable.description, "tags": dag.tags, "max_active_tasks": dag.max_active_tasks, "max_active_runs": dag.max_active_runs, "max_consecutive_failed_dag_runs": dag.max_consecutive_failed_dag_runs, "has_task_concurrency_limits": any( t.max_active_tis_per_dag is not None or t.max_active_tis_per_dagrun is not None for t in dag.tasks ), "has_import_errors": False, "next_dagrun": None, "next_dagrun_data_interval_start": None, "next_dagrun_data_interval_end": None, "next_dagrun_create_after": None, }
Return the state (and conf if exists) of a DagRun at the command line. >>> airflow dags state tutorial 2015-01-01T00:00:00.000000 running >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000 failed, {"name": "bob", "age": "42"}
def dag_state(args, session: Session = NEW_SESSION) -> None: """ Return the state (and conf if exists) of a DagRun at the command line. >>> airflow dags state tutorial 2015-01-01T00:00:00.000000 running >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000 failed, {"name": "bob", "age": "42"} """ dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") dr = session.scalar(select(DagRun).filter_by(dag_id=args.dag_id, execution_date=args.execution_date)) out = dr.state if dr else None conf_out = "" if out and dr.conf: conf_out = ", " + json.dumps(dr.conf) print(str(out) + conf_out)
Return the next execution datetime of a DAG at the command line. >>> airflow dags next-execution tutorial 2018-08-31 10:38:00
def dag_next_execution(args) -> None: """ Return the next execution datetime of a DAG at the command line. >>> airflow dags next-execution tutorial 2018-08-31 10:38:00 """ dag = get_dag(args.subdir, args.dag_id) with create_session() as session: last_parsed_dag: DagModel = session.scalars( select(DagModel).where(DagModel.dag_id == dag.dag_id) ).one() if last_parsed_dag.get_is_paused(): print("[INFO] Please be reminded this DAG is PAUSED now.", file=sys.stderr) def print_execution_interval(interval: DataInterval | None): if interval is None: print( "[WARN] No following schedule can be found. " "This DAG may have schedule interval '@once' or `None`.", file=sys.stderr, ) print(None) return print(interval.start.isoformat()) next_interval = dag.get_next_data_interval(last_parsed_dag) print_execution_interval(next_interval) for _ in range(1, args.num_executions): next_info = dag.next_dagrun_info(next_interval, restricted=False) next_interval = None if next_info is None else next_info.data_interval print_execution_interval(next_interval)
Display dags with or without stats at the command line.
def dag_list_dags(args, session=NEW_SESSION) -> None: """Display dags with or without stats at the command line.""" cols = args.columns if args.columns else [] invalid_cols = [c for c in cols if c not in dag_schema.fields] valid_cols = [c for c in cols if c in dag_schema.fields] if invalid_cols: from rich import print as rich_print rich_print( f"[red][bold]Error:[/bold] Ignoring the following invalid columns: {invalid_cols}. " f"List of valid columns: {list(dag_schema.fields.keys())}", file=sys.stderr, ) dagbag = DagBag(process_subdir(args.subdir)) if dagbag.import_errors: from rich import print as rich_print rich_print( "[red][bold]Error:[/bold] Failed to load all files. " "For details, run `airflow dags list-import-errors`", file=sys.stderr, ) def get_dag_detail(dag: DAG) -> dict: dag_model = DagModel.get_dagmodel(dag.dag_id, session=session) if dag_model: dag_detail = dag_schema.dump(dag_model) else: dag_detail = _get_dagbag_dag_details(dag) return {col: dag_detail[col] for col in valid_cols} AirflowConsole().print_as( data=sorted(dagbag.dags.values(), key=operator.attrgetter("dag_id")), output=args.output, mapper=get_dag_detail, )
Get DAG details given a DAG id.
def dag_details(args, session=NEW_SESSION): """Get DAG details given a DAG id.""" dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") dag_detail = dag_schema.dump(dag) if args.output in ["table", "plain"]: data = [{"property_name": key, "property_value": value} for key, value in dag_detail.items()] else: data = [dag_detail] AirflowConsole().print_as( data=data, output=args.output, )
Display dags with import errors on the command line.
def dag_list_import_errors(args) -> None: """Display dags with import errors on the command line.""" dagbag = DagBag(process_subdir(args.subdir)) data = [] for filename, errors in dagbag.import_errors.items(): data.append({"filepath": filename, "error": errors}) AirflowConsole().print_as( data=data, output=args.output, ) if data: sys.exit(1)
Display dagbag stats at the command line.
def dag_report(args) -> None: """Display dagbag stats at the command line.""" dagbag = DagBag(process_subdir(args.subdir)) AirflowConsole().print_as( data=dagbag.dagbag_stats, output=args.output, mapper=lambda x: { "file": x.file, "duration": x.duration, "dag_num": x.dag_num, "task_num": x.task_num, "dags": sorted(ast.literal_eval(x.dags)), }, )
List latest n jobs.
def dag_list_jobs(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None: """List latest n jobs.""" queries = [] if dag: args.dag_id = dag.dag_id if args.dag_id: dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") queries.append(Job.dag_id == args.dag_id) if args.state: queries.append(Job.state == args.state) fields = ["dag_id", "state", "job_type", "start_date", "end_date"] all_jobs_iter = session.scalars( select(Job).where(*queries).order_by(Job.start_date.desc()).limit(args.limit) ) all_jobs = [{f: str(job.__getattribute__(f)) for f in fields} for job in all_jobs_iter] AirflowConsole().print_as( data=all_jobs, output=args.output, )
List dag runs for a given DAG.
def dag_list_dag_runs(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None: """List dag runs for a given DAG.""" if dag: args.dag_id = dag.dag_id else: dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") state = args.state.lower() if args.state else None dag_runs = DagRun.find( dag_id=args.dag_id, state=state, no_backfills=args.no_backfill, execution_start_date=args.start_date, execution_end_date=args.end_date, session=session, ) dag_runs.sort(key=lambda x: x.execution_date, reverse=True) AirflowConsole().print_as( data=dag_runs, output=args.output, mapper=lambda dr: { "dag_id": dr.dag_id, "run_id": dr.run_id, "state": dr.state, "execution_date": dr.execution_date.isoformat(), "start_date": dr.start_date.isoformat() if dr.start_date else "", "end_date": dr.end_date.isoformat() if dr.end_date else "", }, )
Execute one single DagRun for a given DAG and execution date.
def dag_test(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None: """Execute one single DagRun for a given DAG and execution date.""" run_conf = None if args.conf: try: run_conf = json.loads(args.conf) except ValueError as e: raise SystemExit(f"Configuration {args.conf!r} is not valid JSON. Error: {e}") execution_date = args.execution_date or timezone.utcnow() with _airflow_parsing_context_manager(dag_id=args.dag_id): dag = dag or get_dag(subdir=args.subdir, dag_id=args.dag_id) dr: DagRun = dag.test(execution_date=execution_date, run_conf=run_conf, session=session) show_dagrun = args.show_dagrun imgcat = args.imgcat_dagrun filename = args.save_dagrun if show_dagrun or imgcat or filename: tis = session.scalars( select(TaskInstance).where( TaskInstance.dag_id == args.dag_id, TaskInstance.execution_date == execution_date, ) ).all() dot_graph = render_dag(dag, tis=tis) print() if filename: _save_dot_to_file(dot_graph, filename) if imgcat: _display_dot_via_imgcat(dot_graph) if show_dagrun: print(dot_graph.source) if dr and dr.state == DagRunState.FAILED: raise SystemExit("DagRun failed")
Serialize a DAG instance.
def dag_reserialize(args, session: Session = NEW_SESSION) -> None: """Serialize a DAG instance.""" session.execute(delete(SerializedDagModel).execution_options(synchronize_session=False)) if not args.clear_only: dagbag = DagBag(process_subdir(args.subdir)) dagbag.sync_to_db(session=session)
Create DagFileProcessorProcess instance.
def _create_dag_processor_job_runner(args: Any) -> DagProcessorJobRunner: """Create DagFileProcessorProcess instance.""" processor_timeout_seconds: int = conf.getint("core", "dag_file_processor_timeout") processor_timeout = timedelta(seconds=processor_timeout_seconds) return DagProcessorJobRunner( job=Job(), processor=DagFileProcessorManager( processor_timeout=processor_timeout, dag_directory=args.subdir, max_runs=args.num_runs, dag_ids=[], pickle_dags=args.do_pickle, ), )
Start Airflow Dag Processor Job.
def dag_processor(args): """Start Airflow Dag Processor Job.""" if not conf.getboolean("scheduler", "standalone_dag_processor"): raise SystemExit("The option [scheduler/standalone_dag_processor] must be True.") sql_conn: str = conf.get("database", "sql_alchemy_conn").lower() if sql_conn.startswith("sqlite"): raise SystemExit("Standalone DagProcessor is not supported when using sqlite.") job_runner = _create_dag_processor_job_runner(args) reload_configuration_for_dag_processing() run_command_with_daemon_option( args=args, process_name="dag-processor", callback=lambda: run_job(job=job_runner.job, execute_callable=job_runner._execute), should_setup_logging=True, )
Initialize the metadata database.
def initdb(args): """Initialize the metadata database.""" warnings.warn( "`db init` is deprecated. Use `db migrate` instead to migrate the db and/or " "airflow connections create-default-connections to create the default connections", DeprecationWarning, stacklevel=2, ) print(f"DB: {settings.engine.url!r}") db.initdb() print("Initialization done")
Reset the metadata database.
def resetdb(args): """Reset the metadata database.""" print(f"DB: {settings.engine.url!r}") if not (args.yes or input("This will drop existing tables if they exist. Proceed? (y/n)").upper() == "Y"): raise SystemExit("Cancelled") db.resetdb(skip_init=args.skip_init)
Upgrades the metadata database.
def upgradedb(args): """Upgrades the metadata database.""" warnings.warn("`db upgrade` is deprecated. Use `db migrate` instead.", DeprecationWarning, stacklevel=2) migratedb(args)
Recursively search for the revision of the given version. This searches REVISION_HEADS_MAP for the revision of the given version, recursively searching for the previous version if the given version is not found.
def get_version_revision(version: str, recursion_limit=10) -> str | None: """ Recursively search for the revision of the given version. This searches REVISION_HEADS_MAP for the revision of the given version, recursively searching for the previous version if the given version is not found. """ if version in _REVISION_HEADS_MAP: return _REVISION_HEADS_MAP[version] try: major, minor, patch = map(int, version.split(".")) except ValueError: return None new_version = f"{major}.{minor}.{patch - 1}" recursion_limit -= 1 if recursion_limit <= 0: # Prevent infinite recursion as I can't imagine 10 successive versions without migration return None return get_version_revision(new_version, recursion_limit)
Migrates the metadata database.
def migratedb(args): """Migrates the metadata database.""" print(f"DB: {settings.engine.url!r}") if args.to_revision and args.to_version: raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.") if args.from_version and args.from_revision: raise SystemExit("Cannot supply both `--from-revision` and `--from-version`") if (args.from_revision or args.from_version) and not args.show_sql_only: raise SystemExit( "Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`" ) to_revision = None from_revision = None if args.from_revision: from_revision = args.from_revision elif args.from_version: try: parsed_version = parse_version(args.from_version) except InvalidVersion: raise SystemExit(f"Invalid version {args.from_version!r} supplied as `--from-version`.") if parsed_version < parse_version("2.0.0"): raise SystemExit("--from-version must be greater or equal to than 2.0.0") from_revision = get_version_revision(args.from_version) if not from_revision: raise SystemExit(f"Unknown version {args.from_version!r} supplied as `--from-version`.") if args.to_version: try: parse_version(args.to_version) except InvalidVersion: raise SystemExit(f"Invalid version {args.to_version!r} supplied as `--to-version`.") to_revision = get_version_revision(args.to_version) if not to_revision: raise SystemExit(f"Unknown version {args.to_version!r} supplied as `--to-version`.") elif args.to_revision: to_revision = args.to_revision if not args.show_sql_only: print(f"Performing upgrade to the metadata database {settings.engine.url!r}") else: print("Generating sql for upgrade -- upgrade commands will *not* be submitted.") db.upgradedb( to_revision=to_revision, from_revision=from_revision, show_sql_only=args.show_sql_only, reserialize_dags=args.reserialize_dags, ) if not args.show_sql_only: print("Database migrating done!")
Downgrades the metadata database.
def downgrade(args): """Downgrades the metadata database.""" if args.to_revision and args.to_version: raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.") if args.from_version and args.from_revision: raise SystemExit("`--from-revision` may not be combined with `--from-version`") if (args.from_revision or args.from_version) and not args.show_sql_only: raise SystemExit( "Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`" ) if not (args.to_version or args.to_revision): raise SystemExit("Must provide either --to-revision or --to-version.") from_revision = None if args.from_revision: from_revision = args.from_revision elif args.from_version: from_revision = get_version_revision(args.from_version) if not from_revision: raise SystemExit(f"Unknown version {args.from_version!r} supplied as `--from-version`.") if args.to_version: to_revision = get_version_revision(args.to_version) if not to_revision: raise SystemExit(f"Downgrading to version {args.to_version} is not supported.") elif args.to_revision: to_revision = args.to_revision if not args.show_sql_only: print(f"Performing downgrade with database {settings.engine.url!r}") else: print("Generating sql for downgrade -- downgrade commands will *not* be submitted.") if args.show_sql_only or ( args.yes or input( "\nWarning: About to reverse schema migrations for the airflow metastore. " "Please ensure you have backed up your database before any upgrade or " "downgrade operation. Proceed? (y/n)\n" ).upper() == "Y" ): db.downgrade(to_revision=to_revision, from_revision=from_revision, show_sql_only=args.show_sql_only) if not args.show_sql_only: print("Downgrade complete") else: raise SystemExit("Cancelled")
Wait for all airflow migrations to complete. Used for launching airflow in k8s.
def check_migrations(args): """Wait for all airflow migrations to complete. Used for launching airflow in k8s.""" db.check_migrations(timeout=args.migration_wait_timeout)
Run a shell that allows to access metadata database.
def shell(args): """Run a shell that allows to access metadata database.""" url = settings.engine.url print(f"DB: {url!r}") if url.get_backend_name() == "mysql": with NamedTemporaryFile(suffix="my.cnf") as f: content = textwrap.dedent( f""" [client] host = {url.host} user = {url.username} password = {url.password or ""} port = {url.port or "3306"} database = {url.database} """ ).strip() f.write(content.encode()) f.flush() execute_interactive(["mysql", f"--defaults-extra-file={f.name}"]) elif url.get_backend_name() == "sqlite": execute_interactive(["sqlite3", url.database]) elif url.get_backend_name() == "postgresql": env = os.environ.copy() env["PGHOST"] = url.host or "" env["PGPORT"] = str(url.port or "5432") env["PGUSER"] = url.username or "" # PostgreSQL does not allow the use of PGPASSFILE if the current user is root. env["PGPASSWORD"] = url.password or "" env["PGDATABASE"] = url.database execute_interactive(["psql"], env=env) else: raise AirflowException(f"Unknown driver: {url.drivername}")
Run a check command that checks if db is available.
def check(args): """Run a check command that checks if db is available.""" if InternalApiConfig.get_use_internal_api(): return retries: int = args.retry retry_delay: int = args.retry_delay def _warn_remaining_retries(retrystate: RetryCallState): remain = retries - retrystate.attempt_number log.warning("%d retries remain. Will retry in %d seconds", remain, retry_delay) for attempt in Retrying( stop=stop_after_attempt(1 + retries), wait=wait_fixed(retry_delay), reraise=True, before_sleep=_warn_remaining_retries, ): with attempt: db.check()
Purges old records in metadata database.
def cleanup_tables(args): """Purges old records in metadata database.""" run_cleanup( table_names=args.tables, dry_run=args.dry_run, clean_before_timestamp=args.clean_before_timestamp, verbose=args.verbose, confirm=not args.yes, skip_archive=args.skip_archive, )
Export archived records from metadata database.
def export_archived(args): """Export archived records from metadata database.""" export_archived_records( export_format=args.export_format, output_path=args.output_path, table_names=args.tables, drop_archives=args.drop_archives, needs_confirm=not args.yes, )
Drop archived tables from metadata database.
def drop_archived(args): """Drop archived tables from metadata database.""" drop_archived_tables( table_names=args.tables, needs_confirm=not args.yes, )
Upload text file to File.io service and return link.
def _upload_text_to_fileio(content): """Upload text file to File.io service and return link.""" resp = httpx.post("https://file.io", content=content) if resp.status_code not in [200, 201]: print(resp.json()) raise FileIoException("Failed to send report to file.io service.") try: return resp.json()["link"] except ValueError as e: log.debug(e) raise FileIoException("Failed to send report to file.io service.")
Show information related to Airflow, system and other.
def show_info(args): """Show information related to Airflow, system and other.""" # Enforce anonymization, when file_io upload is tuned on. anonymizer = PiiAnonymizer() if args.anonymize or args.file_io else NullAnonymizer() info = AirflowInfo(anonymizer) if args.file_io: _send_report_to_fileio(info.render_text(args.output)) else: info.show(args.output)
Start Airflow Internal API.
def internal_api(args): """Start Airflow Internal API.""" print(settings.HEADER) access_logfile = args.access_logfile or "-" error_logfile = args.error_logfile or "-" access_logformat = args.access_logformat num_workers = args.workers worker_timeout = args.worker_timeout if args.debug: log.info("Starting the Internal API server on port %s and host %s.", args.port, args.hostname) app = create_app(testing=conf.getboolean("core", "unit_test_mode")) app.run( debug=True, # nosec use_reloader=not app.config["TESTING"], port=args.port, host=args.hostname, ) else: log.info( textwrap.dedent( f"""\ Running the Gunicorn Server with: Workers: {num_workers} {args.workerclass} Host: {args.hostname}:{args.port} Timeout: {worker_timeout} Logfiles: {access_logfile} {error_logfile} Access Logformat: {access_logformat} =================================================================""" ) ) pid_file, _, _, _ = setup_locations("internal-api", pid=args.pid) run_args = [ sys.executable, "-m", "gunicorn", "--workers", str(num_workers), "--worker-class", str(args.workerclass), "--timeout", str(worker_timeout), "--bind", args.hostname + ":" + str(args.port), "--name", "airflow-internal-api", "--pid", pid_file, "--access-logfile", str(access_logfile), "--error-logfile", str(error_logfile), "--config", "python:airflow.api_internal.gunicorn_config", ] if args.access_logformat and args.access_logformat.strip(): run_args += ["--access-logformat", str(args.access_logformat)] if args.daemon: run_args += ["--daemon"] run_args += ["airflow.cli.commands.internal_api_command:cached_app()"] # To prevent different workers creating the web app and # all writing to the database at the same time, we use the --preload option. # With the preload option, the app is loaded before the workers are forked, and each worker will # then have a copy of the app run_args += ["--preload"] def kill_proc(signum: int, gunicorn_master_proc: psutil.Process | subprocess.Popen): log.info("Received signal: %s. Closing gunicorn.", signum) gunicorn_master_proc.terminate() with suppress(TimeoutError): gunicorn_master_proc.wait(timeout=30) if isinstance(gunicorn_master_proc, subprocess.Popen): still_running = gunicorn_master_proc.poll() is not None else: still_running = gunicorn_master_proc.is_running() if still_running: gunicorn_master_proc.kill() sys.exit(0) def monitor_gunicorn(gunicorn_master_proc: psutil.Process | subprocess.Popen): # Register signal handlers signal.signal(signal.SIGINT, lambda signum, _: kill_proc(signum, gunicorn_master_proc)) signal.signal(signal.SIGTERM, lambda signum, _: kill_proc(signum, gunicorn_master_proc)) # These run forever until SIG{INT, TERM, KILL, ...} signal is sent GunicornMonitor( gunicorn_master_pid=gunicorn_master_proc.pid, num_workers_expected=num_workers, master_timeout=120, worker_refresh_interval=30, worker_refresh_batch_size=1, reload_on_plugin_change=False, ).start() def start_and_monitor_gunicorn(args): if args.daemon: subprocess.Popen(run_args, close_fds=True) # Reading pid of gunicorn master as it will be different that # the one of process spawned above. gunicorn_master_proc_pid = None while not gunicorn_master_proc_pid: sleep(0.1) gunicorn_master_proc_pid = read_pid_from_pidfile(pid_file) # Run Gunicorn monitor gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid) monitor_gunicorn(gunicorn_master_proc) else: with subprocess.Popen(run_args, close_fds=True) as gunicorn_master_proc: monitor_gunicorn(gunicorn_master_proc) if args.daemon: # This makes possible errors get reported before daemonization os.environ["SKIP_DAGS_PARSING"] = "True" create_app(None) os.environ.pop("SKIP_DAGS_PARSING") pid_file_path = Path(pid_file) monitor_pid_file = str(pid_file_path.with_name(f"{pid_file_path.stem}-monitor{pid_file_path.suffix}")) run_command_with_daemon_option( args=args, process_name="internal-api", callback=lambda: start_and_monitor_gunicorn(args), should_setup_logging=True, pid_file=monitor_pid_file, )
Create a new instance of Airflow Internal API app.
def create_app(config=None, testing=False): """Create a new instance of Airflow Internal API app.""" flask_app = Flask(__name__) flask_app.config["APP_NAME"] = "Airflow Internal API" flask_app.config["TESTING"] = testing flask_app.config["SQLALCHEMY_DATABASE_URI"] = conf.get("database", "SQL_ALCHEMY_CONN") url = make_url(flask_app.config["SQLALCHEMY_DATABASE_URI"]) if url.drivername == "sqlite" and url.database and not url.database.startswith("/"): raise AirflowConfigException( f'Cannot use relative path: `{conf.get("database", "SQL_ALCHEMY_CONN")}` to connect to sqlite. ' "Please use absolute path such as `sqlite:////tmp/airflow.db`." ) flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False flask_app.config["SESSION_COOKIE_HTTPONLY"] = True flask_app.config["SESSION_COOKIE_SAMESITE"] = "Lax" if config: flask_app.config.from_mapping(config) if "SQLALCHEMY_ENGINE_OPTIONS" not in flask_app.config: flask_app.config["SQLALCHEMY_ENGINE_OPTIONS"] = settings.prepare_engine_args() InternalApiConfig.force_database_direct_access() csrf = CSRFProtect() csrf.init_app(flask_app) db = SQLA() db.session = settings.Session db.init_app(flask_app) init_dagbag(flask_app) cache_config = {"CACHE_TYPE": "flask_caching.backends.filesystem", "CACHE_DIR": gettempdir()} Cache(app=flask_app, config=cache_config) configure_logging() configure_manifest_files(flask_app) import_all_models() with flask_app.app_context(): init_error_handlers(flask_app) init_api_internal(flask_app, standalone_api=True) init_jinja_globals(flask_app) init_xframe_protection(flask_app) return flask_app
Return cached instance of Airflow Internal API app.
def cached_app(config=None, testing=False): """Return cached instance of Airflow Internal API app.""" global app if not app: app = create_app(config=config, testing=testing) return app
Check if job(s) are still alive.
def check(args, session: Session = NEW_SESSION) -> None: """Check if job(s) are still alive.""" if args.allow_multiple and args.limit <= 1: raise SystemExit("To use option --allow-multiple, you must set the limit to a value greater than 1.") if args.hostname and args.local: raise SystemExit("You can't use --hostname and --local at the same time") query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc()) if args.job_type: query = query.where(Job.job_type == args.job_type) if args.hostname: query = query.where(Job.hostname == args.hostname) if args.local: query = query.where(Job.hostname == get_hostname()) if args.limit > 0: query = query.limit(args.limit) alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()] count_alive_jobs = len(alive_jobs) if count_alive_jobs == 0: raise SystemExit("No alive jobs found.") if count_alive_jobs > 1 and not args.allow_multiple: raise SystemExit(f"Found {count_alive_jobs} alive jobs. Expected only one.") if count_alive_jobs == 1: print("Found one alive job.") else: print(f"Found {count_alive_jobs} alive jobs.")
Start a kerberos ticket renewer.
def kerberos(args): """Start a kerberos ticket renewer.""" print(settings.HEADER) mode = KerberosMode.STANDARD if args.one_time: mode = KerberosMode.ONE_TIME run_command_with_daemon_option( args=args, process_name="kerberos", callback=lambda: krb.run(principal=args.principal, keytab=args.keytab, mode=mode), )
Generate yaml files for each task in the DAG. Used for testing output of KubernetesExecutor.
def generate_pod_yaml(args): """Generate yaml files for each task in the DAG. Used for testing output of KubernetesExecutor.""" execution_date = args.execution_date dag = get_dag(subdir=args.subdir, dag_id=args.dag_id) yaml_output_path = args.output_path dr = DagRun(dag.dag_id, execution_date=execution_date) kube_config = KubeConfig() for task in dag.tasks: ti = TaskInstance(task, None) ti.dag_run = dr pod = PodGenerator.construct_pod( dag_id=args.dag_id, task_id=ti.task_id, pod_id=create_pod_id(args.dag_id, ti.task_id), try_number=ti.try_number, kube_image=kube_config.kube_image, date=ti.execution_date, args=ti.command_as_list(), pod_override_object=PodGenerator.from_obj(ti.executor_config), scheduler_job_id="worker-config", namespace=kube_config.executor_namespace, base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file), with_mutation_hook=True, ) api_client = ApiClient() date_string = pod_generator.datetime_to_label_safe_datestring(execution_date) yaml_file_name = f"{args.dag_id}_{ti.task_id}_{date_string}.yml" os.makedirs(os.path.dirname(yaml_output_path + "/airflow_yaml_output/"), exist_ok=True) with open(yaml_output_path + "/airflow_yaml_output/" + yaml_file_name, "w") as output: sanitized_pod = api_client.sanitize_for_serialization(pod) output.write(yaml.dump(sanitized_pod)) print(f"YAML output can be found at {yaml_output_path}/airflow_yaml_output/")
Clean up k8s pods in evicted/failed/succeeded/pending states.
def cleanup_pods(args): """Clean up k8s pods in evicted/failed/succeeded/pending states.""" namespace = args.namespace min_pending_minutes = args.min_pending_minutes # protect newly created pods from deletion if min_pending_minutes < 5: min_pending_minutes = 5 # https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ # All Containers in the Pod have terminated in success, and will not be restarted. pod_succeeded = "succeeded" # The Pod has been accepted by the Kubernetes cluster, # but one or more of the containers has not been set up and made ready to run. pod_pending = "pending" # All Containers in the Pod have terminated, and at least one Container has terminated in failure. # That is, the Container either exited with non-zero status or was terminated by the system. pod_failed = "failed" # https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/ pod_reason_evicted = "evicted" # If pod is failed and restartPolicy is: # * Always: Restart Container; Pod phase stays Running. # * OnFailure: Restart Container; Pod phase stays Running. # * Never: Pod phase becomes Failed. pod_restart_policy_never = "never" print("Loading Kubernetes configuration") kube_client = get_kube_client() print(f"Listing pods in namespace {namespace}") airflow_pod_labels = [ "dag_id", "task_id", "try_number", "airflow_version", ] list_kwargs = {"namespace": namespace, "limit": 500, "label_selector": ",".join(airflow_pod_labels)} while True: pod_list = kube_client.list_namespaced_pod(**list_kwargs) for pod in pod_list.items: pod_name = pod.metadata.name print(f"Inspecting pod {pod_name}") pod_phase = pod.status.phase.lower() pod_reason = pod.status.reason.lower() if pod.status.reason else "" pod_restart_policy = pod.spec.restart_policy.lower() current_time = datetime.now(pod.metadata.creation_timestamp.tzinfo) if ( pod_phase == pod_succeeded or (pod_phase == pod_failed and pod_restart_policy == pod_restart_policy_never) or (pod_reason == pod_reason_evicted) or ( pod_phase == pod_pending and current_time - pod.metadata.creation_timestamp > timedelta(minutes=min_pending_minutes) ) ): print( f'Deleting pod "{pod_name}" phase "{pod_phase}" and reason "{pod_reason}", ' f'restart policy "{pod_restart_policy}"' ) try: _delete_pod(pod.metadata.name, namespace) except ApiException as e: print(f"Can't remove POD: {e}", file=sys.stderr) else: print(f"No action taken on pod {pod_name}") continue_token = pod_list.metadata._continue if not continue_token: break list_kwargs["_continue"] = continue_token
Delete a namespaced pod. Helper Function for cleanup_pods.
def _delete_pod(name, namespace): """ Delete a namespaced pod. Helper Function for cleanup_pods. """ kube_client = get_kube_client() delete_options = client.V1DeleteOptions() print(f'Deleting POD "{name}" from "{namespace}" namespace') api_response = kube_client.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options) print(api_response)
Check command value and raise error if value is in removed command.
def check_legacy_command(action, value): """Check command value and raise error if value is in removed command.""" new_command = COMMAND_MAP.get(value) if new_command is not None: msg = f"`airflow {value}` command, has been removed, please use `airflow {new_command}`" raise ArgumentError(action, msg)
Dump plugins information.
def dump_plugins(args): """Dump plugins information.""" plugins_info: list[dict[str, str]] = get_plugin_info() if not plugins_manager.plugins: print("No plugins loaded") return # Remove empty info if args.output == "table": # We can do plugins_info[0] as the element it will exist as there's # at least one plugin at this point for col in list(plugins_info[0]): if all(not bool(p[col]) for p in plugins_info): for plugin in plugins_info: del plugin[col] AirflowConsole().print_as(plugins_info, output=args.output)
Display info of all the pools.
def pool_list(args): """Display info of all the pools.""" api_client = get_current_api_client() pools = api_client.get_pools() _show_pools(pools=pools, output=args.output)
Display pool info by a given name.
def pool_get(args): """Display pool info by a given name.""" api_client = get_current_api_client() try: pools = [api_client.get_pool(name=args.pool)] _show_pools(pools=pools, output=args.output) except PoolNotFound: raise SystemExit(f"Pool {args.pool} does not exist")
Create new pool with a given name and slots.
def pool_set(args): """Create new pool with a given name and slots.""" api_client = get_current_api_client() api_client.create_pool( name=args.pool, slots=args.slots, description=args.description, include_deferred=args.include_deferred ) print(f"Pool {args.pool} created")
Delete pool by a given name.
def pool_delete(args): """Delete pool by a given name.""" api_client = get_current_api_client() try: api_client.delete_pool(name=args.pool) print(f"Pool {args.pool} deleted") except PoolNotFound: raise SystemExit(f"Pool {args.pool} does not exist")
Import pools from the file.
def pool_import(args): """Import pools from the file.""" if not os.path.exists(args.file): raise SystemExit(f"Missing pools file {args.file}") pools, failed = pool_import_helper(args.file) if failed: raise SystemExit(f"Failed to update pool(s): {', '.join(failed)}") print(f"Uploaded {len(pools)} pool(s)")
Export all the pools to the file.
def pool_export(args): """Export all the pools to the file.""" pools = pool_export_helper(args.file) print(f"Exported {len(pools)} pools to {args.file}")
Help import pools from the json file.
def pool_import_helper(filepath): """Help import pools from the json file.""" api_client = get_current_api_client() with open(filepath) as poolfile: data = poolfile.read() try: pools_json = json.loads(data) except JSONDecodeError as e: raise SystemExit(f"Invalid json file: {e}") pools = [] failed = [] for k, v in pools_json.items(): if isinstance(v, dict) and "slots" in v and "description" in v: pools.append( api_client.create_pool( name=k, slots=v["slots"], description=v["description"], include_deferred=v.get("include_deferred", False), ) ) else: failed.append(k) return pools, failed
Help export all the pools to the json file.
def pool_export_helper(filepath): """Help export all the pools to the json file.""" api_client = get_current_api_client() pool_dict = {} pools = api_client.get_pools() for pool in pools: pool_dict[pool[0]] = {"slots": pool[1], "description": pool[2], "include_deferred": pool[3]} with open(filepath, "w") as poolfile: poolfile.write(json.dumps(pool_dict, sort_keys=True, indent=4)) return pools
Get a provider info.
def provider_get(args): """Get a provider info.""" providers = ProvidersManager().providers if args.provider_name in providers: provider_version = providers[args.provider_name].version provider_info = providers[args.provider_name].data if args.full: provider_info["description"] = _remove_rst_syntax(provider_info["description"]) AirflowConsole().print_as( data=[provider_info], output=args.output, ) else: AirflowConsole().print_as( data=[{"Provider": args.provider_name, "Version": provider_version}], output=args.output ) else: raise SystemExit(f"No such provider installed: {args.provider_name}")
List all providers at the command line.
def providers_list(args): """List all providers at the command line.""" AirflowConsole().print_as( data=list(ProvidersManager().providers.values()), output=args.output, mapper=lambda x: { "package_name": x.data["package-name"], "description": _remove_rst_syntax(x.data["description"]), "version": x.version, }, )