body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def timestamp_local(value): 'Filter to convert given timestamp to local date/time.' try: return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): return value
371,278,893,903,197,440
Filter to convert given timestamp to local date/time.
homeassistant/helpers/template.py
timestamp_local
apapadopoulou/core
python
def timestamp_local(value): try: return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): return value
def timestamp_utc(value): 'Filter to convert given timestamp to UTC date/time.' try: return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): return value
-5,678,883,950,891,153,000
Filter to convert given timestamp to UTC date/time.
homeassistant/helpers/template.py
timestamp_utc
apapadopoulou/core
python
def timestamp_utc(value): try: return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): return value
def forgiving_as_timestamp(value): 'Try to convert value to timestamp.' try: return dt_util.as_timestamp(value) except (ValueError, TypeError): return None
3,174,292,803,847,005,700
Try to convert value to timestamp.
homeassistant/helpers/template.py
forgiving_as_timestamp
apapadopoulou/core
python
def forgiving_as_timestamp(value): try: return dt_util.as_timestamp(value) except (ValueError, TypeError): return None
def strptime(string, fmt): 'Parse a time string to datetime.' try: return datetime.strptime(string, fmt) except (ValueError, AttributeError, TypeError): return string
-3,118,304,228,443,340,300
Parse a time string to datetime.
homeassistant/helpers/template.py
strptime
apapadopoulou/core
python
def strptime(string, fmt): try: return datetime.strptime(string, fmt) except (ValueError, AttributeError, TypeError): return string
def fail_when_undefined(value): 'Filter to force a failure when the value is undefined.' if isinstance(value, jinja2.Undefined): value() return value
8,939,017,925,044,279,000
Filter to force a failure when the value is undefined.
homeassistant/helpers/template.py
fail_when_undefined
apapadopoulou/core
python
def fail_when_undefined(value): if isinstance(value, jinja2.Undefined): value() return value
def forgiving_float(value): 'Try to convert value to a float.' try: return float(value) except (ValueError, TypeError): return value
7,585,152,078,184,862,000
Try to convert value to a float.
homeassistant/helpers/template.py
forgiving_float
apapadopoulou/core
python
def forgiving_float(value): try: return float(value) except (ValueError, TypeError): return value
def regex_match(value, find='', ignorecase=False): 'Match value using regex.' if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return bool(re.match(find, value, flags))
6,072,483,120,086,773,000
Match value using regex.
homeassistant/helpers/template.py
regex_match
apapadopoulou/core
python
def regex_match(value, find=, ignorecase=False): if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return bool(re.match(find, value, flags))
def regex_replace(value='', find='', replace='', ignorecase=False): 'Replace using regex.' if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) regex = re.compile(find, flags) return regex.sub(replace, value)
-7,940,056,905,785,757,000
Replace using regex.
homeassistant/helpers/template.py
regex_replace
apapadopoulou/core
python
def regex_replace(value=, find=, replace=, ignorecase=False): if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) regex = re.compile(find, flags) return regex.sub(replace, value)
def regex_search(value, find='', ignorecase=False): 'Search using regex.' if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return bool(re.search(find, value, flags))
-7,090,118,636,444,232,000
Search using regex.
homeassistant/helpers/template.py
regex_search
apapadopoulou/core
python
def regex_search(value, find=, ignorecase=False): if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return bool(re.search(find, value, flags))
def regex_findall_index(value, find='', index=0, ignorecase=False): 'Find all matches using regex and then pick specific match index.' if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return re.findall(find, value, flags)[index]
4,697,311,776,560,366,000
Find all matches using regex and then pick specific match index.
homeassistant/helpers/template.py
regex_findall_index
apapadopoulou/core
python
def regex_findall_index(value, find=, index=0, ignorecase=False): if (not isinstance(value, str)): value = str(value) flags = (re.I if ignorecase else 0) return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value): 'Perform a bitwise and operation.' return (first_value & second_value)
997,094,502,929,750,500
Perform a bitwise and operation.
homeassistant/helpers/template.py
bitwise_and
apapadopoulou/core
python
def bitwise_and(first_value, second_value): return (first_value & second_value)
def bitwise_or(first_value, second_value): 'Perform a bitwise or operation.' return (first_value | second_value)
7,219,139,196,977,614,000
Perform a bitwise or operation.
homeassistant/helpers/template.py
bitwise_or
apapadopoulou/core
python
def bitwise_or(first_value, second_value): return (first_value | second_value)
def base64_encode(value): 'Perform base64 encode.' return base64.b64encode(value.encode('utf-8')).decode('utf-8')
6,402,450,293,629,345,000
Perform base64 encode.
homeassistant/helpers/template.py
base64_encode
apapadopoulou/core
python
def base64_encode(value): return base64.b64encode(value.encode('utf-8')).decode('utf-8')
def base64_decode(value): 'Perform base64 denode.' return base64.b64decode(value).decode('utf-8')
-5,057,114,674,241,039,000
Perform base64 denode.
homeassistant/helpers/template.py
base64_decode
apapadopoulou/core
python
def base64_decode(value): return base64.b64decode(value).decode('utf-8')
def ordinal(value): 'Perform ordinal conversion.' return (str(value) + (list((['th', 'st', 'nd', 'rd'] + (['th'] * 6)))[(int(str(value)[(- 1)]) % 10)] if ((int(str(value)[(- 2):]) % 100) not in range(11, 14)) else 'th'))
523,003,202,717,955,200
Perform ordinal conversion.
homeassistant/helpers/template.py
ordinal
apapadopoulou/core
python
def ordinal(value): return (str(value) + (list((['th', 'st', 'nd', 'rd'] + (['th'] * 6)))[(int(str(value)[(- 1)]) % 10)] if ((int(str(value)[(- 2):]) % 100) not in range(11, 14)) else 'th'))
def from_json(value): 'Convert a JSON string to an object.' return json.loads(value)
-2,927,587,324,358,168,600
Convert a JSON string to an object.
homeassistant/helpers/template.py
from_json
apapadopoulou/core
python
def from_json(value): return json.loads(value)
def to_json(value): 'Convert an object to a JSON string.' return json.dumps(value)
-248,069,255,064,082,430
Convert an object to a JSON string.
homeassistant/helpers/template.py
to_json
apapadopoulou/core
python
def to_json(value): return json.dumps(value)
@contextfilter def random_every_time(context, values): "Choose a random value.\n\n Unlike Jinja's random filter,\n this is context-dependent to avoid caching the chosen value.\n " return random.choice(values)
-3,117,783,825,970,350,000
Choose a random value. Unlike Jinja's random filter, this is context-dependent to avoid caching the chosen value.
homeassistant/helpers/template.py
random_every_time
apapadopoulou/core
python
@contextfilter def random_every_time(context, values): "Choose a random value.\n\n Unlike Jinja's random filter,\n this is context-dependent to avoid caching the chosen value.\n " return random.choice(values)
def relative_time(value): '\n Take a datetime and return its "age" as a string.\n\n The age can be in second, minute, hour, day, month or year. Only the\n biggest unit is considered, e.g. if it\'s 2 days and 3 hours, "2 days" will\n be returned.\n Make sure date is not in the future, or else it will return None.\n\n If the input are not a datetime object the input will be returned unmodified.\n ' if (not isinstance(value, datetime)): return value if (not value.tzinfo): value = dt_util.as_local(value) if (dt_util.now() < value): return value return dt_util.get_age(value)
-4,409,047,958,477,338,600
Take a datetime and return its "age" as a string. The age can be in second, minute, hour, day, month or year. Only the biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will be returned. Make sure date is not in the future, or else it will return None. If the input are not a datetime object the input will be returned unmodified.
homeassistant/helpers/template.py
relative_time
apapadopoulou/core
python
def relative_time(value): '\n Take a datetime and return its "age" as a string.\n\n The age can be in second, minute, hour, day, month or year. Only the\n biggest unit is considered, e.g. if it\'s 2 days and 3 hours, "2 days" will\n be returned.\n Make sure date is not in the future, or else it will return None.\n\n If the input are not a datetime object the input will be returned unmodified.\n ' if (not isinstance(value, datetime)): return value if (not value.tzinfo): value = dt_util.as_local(value) if (dt_util.now() < value): return value return dt_util.get_age(value)
def urlencode(value): 'Urlencode dictionary and return as UTF-8 string.' return urllib_urlencode(value).encode('utf-8')
-5,544,043,678,155,911,000
Urlencode dictionary and return as UTF-8 string.
homeassistant/helpers/template.py
urlencode
apapadopoulou/core
python
def urlencode(value): return urllib_urlencode(value).encode('utf-8')
def _render_with_context(template_str: str, template: jinja2.Template, **kwargs: Any) -> str: 'Store template being rendered in a ContextVar to aid error handling.' template_cv.set(template_str) return template.render(**kwargs)
-7,469,460,773,156,476,000
Store template being rendered in a ContextVar to aid error handling.
homeassistant/helpers/template.py
_render_with_context
apapadopoulou/core
python
def _render_with_context(template_str: str, template: jinja2.Template, **kwargs: Any) -> str: template_cv.set(template_str) return template.render(**kwargs)
def __new__(cls, value: tuple, *, render_result: (str | None)=None) -> TupleWrapper: 'Create a new tuple class.' return super().__new__(cls, tuple(value))
4,402,851,517,117,680,000
Create a new tuple class.
homeassistant/helpers/template.py
__new__
apapadopoulou/core
python
def __new__(cls, value: tuple, *, render_result: (str | None)=None) -> TupleWrapper: return super().__new__(cls, tuple(value))
def __init__(self, value: tuple, *, render_result: (str | None)=None): 'Initialize a new tuple class.' self.render_result = render_result
-3,731,740,333,827,607,600
Initialize a new tuple class.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, value: tuple, *, render_result: (str | None)=None): self.render_result = render_result
def __str__(self) -> str: 'Return string representation.' if (self.render_result is None): return super().__str__() return self.render_result
4,386,185,566,864,812,500
Return string representation.
homeassistant/helpers/template.py
__str__
apapadopoulou/core
python
def __str__(self) -> str: if (self.render_result is None): return super().__str__() return self.render_result
def __init__(self, template): 'Initialise.' self.template = template self.filter_lifecycle = _true self.filter = _true self._result: (str | None) = None self.is_static = False self.exception: (TemplateError | None) = None self.all_states = False self.all_states_lifecycle = False self.domains = set() self.domains_lifecycle = set() self.entities = set() self.rate_limit: (timedelta | None) = None self.has_time = False
-4,230,071,943,378,648,000
Initialise.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, template): self.template = template self.filter_lifecycle = _true self.filter = _true self._result: (str | None) = None self.is_static = False self.exception: (TemplateError | None) = None self.all_states = False self.all_states_lifecycle = False self.domains = set() self.domains_lifecycle = set() self.entities = set() self.rate_limit: (timedelta | None) = None self.has_time = False
def __repr__(self) -> str: 'Representation of RenderInfo.' return f'<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}'
-2,179,960,865,395,067,100
Representation of RenderInfo.
homeassistant/helpers/template.py
__repr__
apapadopoulou/core
python
def __repr__(self) -> str: return f'<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}'
def _filter_domains_and_entities(self, entity_id: str) -> bool: 'Template should re-render if the entity state changes when we match specific domains or entities.' return ((split_entity_id(entity_id)[0] in self.domains) or (entity_id in self.entities))
2,927,773,560,140,866,600
Template should re-render if the entity state changes when we match specific domains or entities.
homeassistant/helpers/template.py
_filter_domains_and_entities
apapadopoulou/core
python
def _filter_domains_and_entities(self, entity_id: str) -> bool: return ((split_entity_id(entity_id)[0] in self.domains) or (entity_id in self.entities))
def _filter_entities(self, entity_id: str) -> bool: 'Template should re-render if the entity state changes when we match specific entities.' return (entity_id in self.entities)
5,553,730,494,122,894,000
Template should re-render if the entity state changes when we match specific entities.
homeassistant/helpers/template.py
_filter_entities
apapadopoulou/core
python
def _filter_entities(self, entity_id: str) -> bool: return (entity_id in self.entities)
def _filter_lifecycle_domains(self, entity_id: str) -> bool: 'Template should re-render if the entity is added or removed with domains watched.' return (split_entity_id(entity_id)[0] in self.domains_lifecycle)
-277,101,688,541,714,530
Template should re-render if the entity is added or removed with domains watched.
homeassistant/helpers/template.py
_filter_lifecycle_domains
apapadopoulou/core
python
def _filter_lifecycle_domains(self, entity_id: str) -> bool: return (split_entity_id(entity_id)[0] in self.domains_lifecycle)
def result(self) -> str: 'Results of the template computation.' if (self.exception is not None): raise self.exception return cast(str, self._result)
-1,629,169,692,258,794,500
Results of the template computation.
homeassistant/helpers/template.py
result
apapadopoulou/core
python
def result(self) -> str: if (self.exception is not None): raise self.exception return cast(str, self._result)
def __init__(self, template, hass=None): 'Instantiate a template.' if (not isinstance(template, str)): raise TypeError('Expected template to be a string') self.template: str = template.strip() self._compiled_code = None self._compiled: (jinja2.Template | None) = None self.hass = hass self.is_static = (not is_template_string(template)) self._exc_info = None self._limited = None self._strict = None
5,451,396,353,592,941,000
Instantiate a template.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, template, hass=None): if (not isinstance(template, str)): raise TypeError('Expected template to be a string') self.template: str = template.strip() self._compiled_code = None self._compiled: (jinja2.Template | None) = None self.hass = hass self.is_static = (not is_template_string(template)) self._exc_info = None self._limited = None self._strict = None
def ensure_valid(self) -> None: 'Return if template is valid.' if (self.is_static or (self._compiled_code is not None)): return try: self._compiled_code = self._env.compile(self.template) except jinja2.TemplateError as err: raise TemplateError(err) from err
-8,108,559,155,988,028,000
Return if template is valid.
homeassistant/helpers/template.py
ensure_valid
apapadopoulou/core
python
def ensure_valid(self) -> None: if (self.is_static or (self._compiled_code is not None)): return try: self._compiled_code = self._env.compile(self.template) except jinja2.TemplateError as err: raise TemplateError(err) from err
def render(self, variables: TemplateVarsType=None, parse_result: bool=True, limited: bool=False, **kwargs: Any) -> Any: 'Render given template.\n\n If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.\n ' if self.is_static: if ((not parse_result) or self.hass.config.legacy_templates): return self.template return self._parse_result(self.template) return run_callback_threadsafe(self.hass.loop, partial(self.async_render, variables, parse_result, limited, **kwargs)).result()
6,037,249,093,941,974,000
Render given template. If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
homeassistant/helpers/template.py
render
apapadopoulou/core
python
def render(self, variables: TemplateVarsType=None, parse_result: bool=True, limited: bool=False, **kwargs: Any) -> Any: 'Render given template.\n\n If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.\n ' if self.is_static: if ((not parse_result) or self.hass.config.legacy_templates): return self.template return self._parse_result(self.template) return run_callback_threadsafe(self.hass.loop, partial(self.async_render, variables, parse_result, limited, **kwargs)).result()
@callback def async_render(self, variables: TemplateVarsType=None, parse_result: bool=True, limited: bool=False, strict: bool=False, **kwargs: Any) -> Any: 'Render given template.\n\n This method must be run in the event loop.\n\n If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.\n ' if self.is_static: if ((not parse_result) or self.hass.config.legacy_templates): return self.template return self._parse_result(self.template) compiled = (self._compiled or self._ensure_compiled(limited, strict)) if (variables is not None): kwargs.update(variables) try: render_result = _render_with_context(self.template, compiled, **kwargs) except Exception as err: raise TemplateError(err) from err render_result = render_result.strip() if (self.hass.config.legacy_templates or (not parse_result)): return render_result return self._parse_result(render_result)
-1,166,353,257,559,217,200
Render given template. This method must be run in the event loop. If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
homeassistant/helpers/template.py
async_render
apapadopoulou/core
python
@callback def async_render(self, variables: TemplateVarsType=None, parse_result: bool=True, limited: bool=False, strict: bool=False, **kwargs: Any) -> Any: 'Render given template.\n\n This method must be run in the event loop.\n\n If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.\n ' if self.is_static: if ((not parse_result) or self.hass.config.legacy_templates): return self.template return self._parse_result(self.template) compiled = (self._compiled or self._ensure_compiled(limited, strict)) if (variables is not None): kwargs.update(variables) try: render_result = _render_with_context(self.template, compiled, **kwargs) except Exception as err: raise TemplateError(err) from err render_result = render_result.strip() if (self.hass.config.legacy_templates or (not parse_result)): return render_result return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: 'Parse the result.' try: result = literal_eval(render_result) if (type(result) in RESULT_WRAPPERS): result = RESULT_WRAPPERS[type(result)](result, render_result=render_result) if ((not isinstance(result, (str, complex))) and ((not isinstance(result, (int, float))) or isinstance(result, bool) or (_IS_NUMERIC.match(render_result) is not None))): return result except (ValueError, TypeError, SyntaxError, MemoryError): pass return render_result
-21,961,819,275,808,824
Parse the result.
homeassistant/helpers/template.py
_parse_result
apapadopoulou/core
python
def _parse_result(self, render_result: str) -> Any: try: result = literal_eval(render_result) if (type(result) in RESULT_WRAPPERS): result = RESULT_WRAPPERS[type(result)](result, render_result=render_result) if ((not isinstance(result, (str, complex))) and ((not isinstance(result, (int, float))) or isinstance(result, bool) or (_IS_NUMERIC.match(render_result) is not None))): return result except (ValueError, TypeError, SyntaxError, MemoryError): pass return render_result
async def async_render_will_timeout(self, timeout: float, variables: TemplateVarsType=None, strict: bool=False, **kwargs: Any) -> bool: 'Check to see if rendering a template will timeout during render.\n\n This is intended to check for expensive templates\n that will make the system unstable. The template\n is rendered in the executor to ensure it does not\n tie up the event loop.\n\n This function is not a security control and is only\n intended to be used as a safety check when testing\n templates.\n\n This method must be run in the event loop.\n ' if self.is_static: return False compiled = (self._compiled or self._ensure_compiled(strict=strict)) if (variables is not None): kwargs.update(variables) self._exc_info = None finish_event = asyncio.Event() def _render_template() -> None: try: _render_with_context(self.template, compiled, **kwargs) except TimeoutError: pass except Exception: self._exc_info = sys.exc_info() finally: run_callback_threadsafe(self.hass.loop, finish_event.set) try: template_render_thread = ThreadWithException(target=_render_template) template_render_thread.start() (await asyncio.wait_for(finish_event.wait(), timeout=timeout)) if self._exc_info: raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2])) except asyncio.TimeoutError: template_render_thread.raise_exc(TimeoutError) return True finally: template_render_thread.join() return False
-7,899,870,130,467,297,000
Check to see if rendering a template will timeout during render. This is intended to check for expensive templates that will make the system unstable. The template is rendered in the executor to ensure it does not tie up the event loop. This function is not a security control and is only intended to be used as a safety check when testing templates. This method must be run in the event loop.
homeassistant/helpers/template.py
async_render_will_timeout
apapadopoulou/core
python
async def async_render_will_timeout(self, timeout: float, variables: TemplateVarsType=None, strict: bool=False, **kwargs: Any) -> bool: 'Check to see if rendering a template will timeout during render.\n\n This is intended to check for expensive templates\n that will make the system unstable. The template\n is rendered in the executor to ensure it does not\n tie up the event loop.\n\n This function is not a security control and is only\n intended to be used as a safety check when testing\n templates.\n\n This method must be run in the event loop.\n ' if self.is_static: return False compiled = (self._compiled or self._ensure_compiled(strict=strict)) if (variables is not None): kwargs.update(variables) self._exc_info = None finish_event = asyncio.Event() def _render_template() -> None: try: _render_with_context(self.template, compiled, **kwargs) except TimeoutError: pass except Exception: self._exc_info = sys.exc_info() finally: run_callback_threadsafe(self.hass.loop, finish_event.set) try: template_render_thread = ThreadWithException(target=_render_template) template_render_thread.start() (await asyncio.wait_for(finish_event.wait(), timeout=timeout)) if self._exc_info: raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2])) except asyncio.TimeoutError: template_render_thread.raise_exc(TimeoutError) return True finally: template_render_thread.join() return False
@callback def async_render_to_info(self, variables: TemplateVarsType=None, strict: bool=False, **kwargs: Any) -> RenderInfo: 'Render the template and collect an entity filter.' assert (self.hass and (_RENDER_INFO not in self.hass.data)) render_info = RenderInfo(self) if self.is_static: render_info._result = self.template.strip() render_info._freeze_static() return render_info self.hass.data[_RENDER_INFO] = render_info try: render_info._result = self.async_render(variables, strict=strict, **kwargs) except TemplateError as ex: render_info.exception = ex finally: del self.hass.data[_RENDER_INFO] render_info._freeze() return render_info
-4,240,149,354,314,797,000
Render the template and collect an entity filter.
homeassistant/helpers/template.py
async_render_to_info
apapadopoulou/core
python
@callback def async_render_to_info(self, variables: TemplateVarsType=None, strict: bool=False, **kwargs: Any) -> RenderInfo: assert (self.hass and (_RENDER_INFO not in self.hass.data)) render_info = RenderInfo(self) if self.is_static: render_info._result = self.template.strip() render_info._freeze_static() return render_info self.hass.data[_RENDER_INFO] = render_info try: render_info._result = self.async_render(variables, strict=strict, **kwargs) except TemplateError as ex: render_info.exception = ex finally: del self.hass.data[_RENDER_INFO] render_info._freeze() return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL): 'Render template with value exposed.\n\n If valid JSON will expose value_json too.\n ' if self.is_static: return self.template return run_callback_threadsafe(self.hass.loop, self.async_render_with_possible_json_value, value, error_value).result()
6,334,468,558,806,341,000
Render template with value exposed. If valid JSON will expose value_json too.
homeassistant/helpers/template.py
render_with_possible_json_value
apapadopoulou/core
python
def render_with_possible_json_value(self, value, error_value=_SENTINEL): 'Render template with value exposed.\n\n If valid JSON will expose value_json too.\n ' if self.is_static: return self.template return run_callback_threadsafe(self.hass.loop, self.async_render_with_possible_json_value, value, error_value).result()
@callback def async_render_with_possible_json_value(self, value, error_value=_SENTINEL, variables=None): 'Render template with value exposed.\n\n If valid JSON will expose value_json too.\n\n This method must be run in the event loop.\n ' if self.is_static: return self.template if (self._compiled is None): self._ensure_compiled() variables = dict((variables or {})) variables['value'] = value with suppress(ValueError, TypeError): variables['value_json'] = json.loads(value) try: return _render_with_context(self.template, self._compiled, **variables).strip() except jinja2.TemplateError as ex: if (error_value is _SENTINEL): _LOGGER.error('Error parsing value: %s (value: %s, template: %s)', ex, value, self.template) return (value if (error_value is _SENTINEL) else error_value)
-4,429,962,319,649,829,000
Render template with value exposed. If valid JSON will expose value_json too. This method must be run in the event loop.
homeassistant/helpers/template.py
async_render_with_possible_json_value
apapadopoulou/core
python
@callback def async_render_with_possible_json_value(self, value, error_value=_SENTINEL, variables=None): 'Render template with value exposed.\n\n If valid JSON will expose value_json too.\n\n This method must be run in the event loop.\n ' if self.is_static: return self.template if (self._compiled is None): self._ensure_compiled() variables = dict((variables or {})) variables['value'] = value with suppress(ValueError, TypeError): variables['value_json'] = json.loads(value) try: return _render_with_context(self.template, self._compiled, **variables).strip() except jinja2.TemplateError as ex: if (error_value is _SENTINEL): _LOGGER.error('Error parsing value: %s (value: %s, template: %s)', ex, value, self.template) return (value if (error_value is _SENTINEL) else error_value)
def _ensure_compiled(self, limited: bool=False, strict: bool=False) -> jinja2.Template: 'Bind a template to a specific hass instance.' self.ensure_valid() assert (self.hass is not None), 'hass variable not set on template' assert ((self._limited is None) or (self._limited == limited)), "can't change between limited and non limited template" assert ((self._strict is None) or (self._strict == strict)), "can't change between strict and non strict template" assert (not (strict and limited)), "can't combine strict and limited template" self._limited = limited self._strict = strict env = self._env self._compiled = cast(jinja2.Template, jinja2.Template.from_code(env, self._compiled_code, env.globals, None)) return self._compiled
5,344,520,617,307,532,000
Bind a template to a specific hass instance.
homeassistant/helpers/template.py
_ensure_compiled
apapadopoulou/core
python
def _ensure_compiled(self, limited: bool=False, strict: bool=False) -> jinja2.Template: self.ensure_valid() assert (self.hass is not None), 'hass variable not set on template' assert ((self._limited is None) or (self._limited == limited)), "can't change between limited and non limited template" assert ((self._strict is None) or (self._strict == strict)), "can't change between strict and non strict template" assert (not (strict and limited)), "can't combine strict and limited template" self._limited = limited self._strict = strict env = self._env self._compiled = cast(jinja2.Template, jinja2.Template.from_code(env, self._compiled_code, env.globals, None)) return self._compiled
def __eq__(self, other): 'Compare template with another.' return ((self.__class__ == other.__class__) and (self.template == other.template) and (self.hass == other.hass))
6,628,471,924,393,962,000
Compare template with another.
homeassistant/helpers/template.py
__eq__
apapadopoulou/core
python
def __eq__(self, other): return ((self.__class__ == other.__class__) and (self.template == other.template) and (self.hass == other.hass))
def __hash__(self) -> int: 'Hash code for template.' return hash(self.template)
3,837,882,927,254,413,000
Hash code for template.
homeassistant/helpers/template.py
__hash__
apapadopoulou/core
python
def __hash__(self) -> int: return hash(self.template)
def __repr__(self) -> str: 'Representation of Template.' return (('Template("' + self.template) + '")')
8,466,370,594,914,990,000
Representation of Template.
homeassistant/helpers/template.py
__repr__
apapadopoulou/core
python
def __repr__(self) -> str: return (('Template("' + self.template) + '")')
def __init__(self, hass: HomeAssistant) -> None: 'Initialize all states.' self._hass = hass
-2,188,343,182,347,853,300
Initialize all states.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, hass: HomeAssistant) -> None: self._hass = hass
def __getattr__(self, name): 'Return the domain state.' if ('.' in name): return _get_state_if_valid(self._hass, name) if (name in _RESERVED_NAMES): return None if (not valid_entity_id(f'{name}.entity')): raise TemplateError(f"Invalid domain name '{name}'") return DomainStates(self._hass, name)
-2,045,282,052,237,543,400
Return the domain state.
homeassistant/helpers/template.py
__getattr__
apapadopoulou/core
python
def __getattr__(self, name): if ('.' in name): return _get_state_if_valid(self._hass, name) if (name in _RESERVED_NAMES): return None if (not valid_entity_id(f'{name}.entity')): raise TemplateError(f"Invalid domain name '{name}'") return DomainStates(self._hass, name)
def __iter__(self): 'Return all states.' self._collect_all() return _state_generator(self._hass, None)
-334,294,168,833,212,740
Return all states.
homeassistant/helpers/template.py
__iter__
apapadopoulou/core
python
def __iter__(self): self._collect_all() return _state_generator(self._hass, None)
def __len__(self) -> int: 'Return number of states.' self._collect_all_lifecycle() return self._hass.states.async_entity_ids_count()
8,146,078,219,081,897,000
Return number of states.
homeassistant/helpers/template.py
__len__
apapadopoulou/core
python
def __len__(self) -> int: self._collect_all_lifecycle() return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id): 'Return the states.' state = _get_state(self._hass, entity_id) return (STATE_UNKNOWN if (state is None) else state.state)
-1,906,464,392,731,029,200
Return the states.
homeassistant/helpers/template.py
__call__
apapadopoulou/core
python
def __call__(self, entity_id): state = _get_state(self._hass, entity_id) return (STATE_UNKNOWN if (state is None) else state.state)
def __repr__(self) -> str: 'Representation of All States.' return '<template AllStates>'
-3,864,778,551,672,276,000
Representation of All States.
homeassistant/helpers/template.py
__repr__
apapadopoulou/core
python
def __repr__(self) -> str: return '<template AllStates>'
def __init__(self, hass: HomeAssistant, domain: str) -> None: 'Initialize the domain states.' self._hass = hass self._domain = domain
5,411,538,624,410,494,000
Initialize the domain states.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, hass: HomeAssistant, domain: str) -> None: self._hass = hass self._domain = domain
def __getattr__(self, name): 'Return the states.' return _get_state_if_valid(self._hass, f'{self._domain}.{name}')
2,082,216,866,680,315,000
Return the states.
homeassistant/helpers/template.py
__getattr__
apapadopoulou/core
python
def __getattr__(self, name): return _get_state_if_valid(self._hass, f'{self._domain}.{name}')
def __iter__(self): 'Return the iteration over all the states.' self._collect_domain() return _state_generator(self._hass, self._domain)
6,838,333,310,154,040,000
Return the iteration over all the states.
homeassistant/helpers/template.py
__iter__
apapadopoulou/core
python
def __iter__(self): self._collect_domain() return _state_generator(self._hass, self._domain)
def __len__(self) -> int: 'Return number of states.' self._collect_domain_lifecycle() return self._hass.states.async_entity_ids_count(self._domain)
-736,870,181,358,220,700
Return number of states.
homeassistant/helpers/template.py
__len__
apapadopoulou/core
python
def __len__(self) -> int: self._collect_domain_lifecycle() return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str: 'Representation of Domain States.' return f"<template DomainStates('{self._domain}')>"
4,759,772,124,110,742,000
Representation of Domain States.
homeassistant/helpers/template.py
__repr__
apapadopoulou/core
python
def __repr__(self) -> str: return f"<template DomainStates('{self._domain}')>"
def __init__(self, hass: HomeAssistant, state: State, collect: bool=True) -> None: 'Initialize template state.' self._hass = hass self._state = state self._collect = collect
2,555,834,302,958,987,000
Initialize template state.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, hass: HomeAssistant, state: State, collect: bool=True) -> None: self._hass = hass self._state = state self._collect = collect
def __getitem__(self, item): 'Return a property as an attribute for jinja.' if (item in _COLLECTABLE_STATE_ATTRIBUTES): if (self._collect and (_RENDER_INFO in self._hass.data)): self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id) return getattr(self._state, item) if (item == 'entity_id'): return self._state.entity_id if (item == 'state_with_unit'): return self.state_with_unit raise KeyError
5,675,116,919,093,509,000
Return a property as an attribute for jinja.
homeassistant/helpers/template.py
__getitem__
apapadopoulou/core
python
def __getitem__(self, item): if (item in _COLLECTABLE_STATE_ATTRIBUTES): if (self._collect and (_RENDER_INFO in self._hass.data)): self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id) return getattr(self._state, item) if (item == 'entity_id'): return self._state.entity_id if (item == 'state_with_unit'): return self.state_with_unit raise KeyError
@property def entity_id(self): 'Wrap State.entity_id.\n\n Intentionally does not collect state\n ' return self._state.entity_id
-1,689,102,585,110,755,000
Wrap State.entity_id. Intentionally does not collect state
homeassistant/helpers/template.py
entity_id
apapadopoulou/core
python
@property def entity_id(self): 'Wrap State.entity_id.\n\n Intentionally does not collect state\n ' return self._state.entity_id
@property def state(self): 'Wrap State.state.' self._collect_state() return self._state.state
-3,503,656,440,696,153,600
Wrap State.state.
homeassistant/helpers/template.py
state
apapadopoulou/core
python
@property def state(self): self._collect_state() return self._state.state
@property def attributes(self): 'Wrap State.attributes.' self._collect_state() return self._state.attributes
-4,926,409,838,260,996,000
Wrap State.attributes.
homeassistant/helpers/template.py
attributes
apapadopoulou/core
python
@property def attributes(self): self._collect_state() return self._state.attributes
@property def last_changed(self): 'Wrap State.last_changed.' self._collect_state() return self._state.last_changed
4,785,937,332,385,225,000
Wrap State.last_changed.
homeassistant/helpers/template.py
last_changed
apapadopoulou/core
python
@property def last_changed(self): self._collect_state() return self._state.last_changed
@property def last_updated(self): 'Wrap State.last_updated.' self._collect_state() return self._state.last_updated
-6,958,189,816,510,916,000
Wrap State.last_updated.
homeassistant/helpers/template.py
last_updated
apapadopoulou/core
python
@property def last_updated(self): self._collect_state() return self._state.last_updated
@property def context(self): 'Wrap State.context.' self._collect_state() return self._state.context
-4,253,427,782,051,221,500
Wrap State.context.
homeassistant/helpers/template.py
context
apapadopoulou/core
python
@property def context(self): self._collect_state() return self._state.context
@property def domain(self): 'Wrap State.domain.' self._collect_state() return self._state.domain
-224,251,247,949,758,750
Wrap State.domain.
homeassistant/helpers/template.py
domain
apapadopoulou/core
python
@property def domain(self): self._collect_state() return self._state.domain
@property def object_id(self): 'Wrap State.object_id.' self._collect_state() return self._state.object_id
-7,908,503,264,930,364,000
Wrap State.object_id.
homeassistant/helpers/template.py
object_id
apapadopoulou/core
python
@property def object_id(self): self._collect_state() return self._state.object_id
@property def name(self): 'Wrap State.name.' self._collect_state() return self._state.name
6,087,567,154,593,101,000
Wrap State.name.
homeassistant/helpers/template.py
name
apapadopoulou/core
python
@property def name(self): self._collect_state() return self._state.name
@property def state_with_unit(self) -> str: 'Return the state concatenated with the unit if available.' self._collect_state() unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) return (f'{self._state.state} {unit}' if unit else self._state.state)
-5,291,269,640,585,265,000
Return the state concatenated with the unit if available.
homeassistant/helpers/template.py
state_with_unit
apapadopoulou/core
python
@property def state_with_unit(self) -> str: self._collect_state() unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) return (f'{self._state.state} {unit}' if unit else self._state.state)
def __eq__(self, other: Any) -> bool: 'Ensure we collect on equality check.' self._collect_state() return self._state.__eq__(other)
-5,653,343,425,434,481,000
Ensure we collect on equality check.
homeassistant/helpers/template.py
__eq__
apapadopoulou/core
python
def __eq__(self, other: Any) -> bool: self._collect_state() return self._state.__eq__(other)
def __repr__(self) -> str: 'Representation of Template State.' return f'<template TemplateState({self._state.__repr__()})>'
-2,292,285,407,276,171,800
Representation of Template State.
homeassistant/helpers/template.py
__repr__
apapadopoulou/core
python
def __repr__(self) -> str: return f'<template TemplateState({self._state.__repr__()})>'
def __str__(self): 'Log undefined __str___.' self._log_message() return super().__str__()
5,647,287,756,732,597,000
Log undefined __str___.
homeassistant/helpers/template.py
__str__
apapadopoulou/core
python
def __str__(self): self._log_message() return super().__str__()
def __iter__(self): 'Log undefined __iter___.' self._log_message() return super().__iter__()
8,365,122,814,951,870,000
Log undefined __iter___.
homeassistant/helpers/template.py
__iter__
apapadopoulou/core
python
def __iter__(self): self._log_message() return super().__iter__()
def __bool__(self): 'Log undefined __bool___.' self._log_message() return super().__bool__()
-5,810,084,362,042,255,000
Log undefined __bool___.
homeassistant/helpers/template.py
__bool__
apapadopoulou/core
python
def __bool__(self): self._log_message() return super().__bool__()
def __init__(self, hass, limited=False, strict=False): 'Initialise template environment.' if (not strict): undefined = LoggingUndefined else: undefined = jinja2.StrictUndefined super().__init__(undefined=undefined) self.hass = hass self.template_cache = weakref.WeakValueDictionary() self.filters['round'] = forgiving_round self.filters['multiply'] = multiply self.filters['log'] = logarithm self.filters['sin'] = sine self.filters['cos'] = cosine self.filters['tan'] = tangent self.filters['asin'] = arc_sine self.filters['acos'] = arc_cosine self.filters['atan'] = arc_tangent self.filters['atan2'] = arc_tangent2 self.filters['sqrt'] = square_root self.filters['as_timestamp'] = forgiving_as_timestamp self.filters['as_local'] = dt_util.as_local self.filters['timestamp_custom'] = timestamp_custom self.filters['timestamp_local'] = timestamp_local self.filters['timestamp_utc'] = timestamp_utc self.filters['to_json'] = to_json self.filters['from_json'] = from_json self.filters['is_defined'] = fail_when_undefined self.filters['max'] = max self.filters['min'] = min self.filters['random'] = random_every_time self.filters['base64_encode'] = base64_encode self.filters['base64_decode'] = base64_decode self.filters['ordinal'] = ordinal self.filters['regex_match'] = regex_match self.filters['regex_replace'] = regex_replace self.filters['regex_search'] = regex_search self.filters['regex_findall_index'] = regex_findall_index self.filters['bitwise_and'] = bitwise_and self.filters['bitwise_or'] = bitwise_or self.filters['ord'] = ord self.globals['log'] = logarithm self.globals['sin'] = sine self.globals['cos'] = cosine self.globals['tan'] = tangent self.globals['sqrt'] = square_root self.globals['pi'] = math.pi self.globals['tau'] = (math.pi * 2) self.globals['e'] = math.e self.globals['asin'] = arc_sine self.globals['acos'] = arc_cosine self.globals['atan'] = arc_tangent self.globals['atan2'] = arc_tangent2 self.globals['float'] = forgiving_float self.globals['as_local'] = dt_util.as_local self.globals['as_timestamp'] = forgiving_as_timestamp self.globals['relative_time'] = relative_time self.globals['timedelta'] = timedelta self.globals['strptime'] = strptime self.globals['urlencode'] = urlencode self.globals['max'] = max self.globals['min'] = min if (hass is None): return def hassfunction(func): 'Wrap function that depend on hass.' @wraps(func) def wrapper(*args, **kwargs): return func(hass, *args[1:], **kwargs) return contextfunction(wrapper) self.globals['device_entities'] = hassfunction(device_entities) self.filters['device_entities'] = contextfilter(self.globals['device_entities']) if limited: def unsupported(name): def warn_unsupported(*args, **kwargs): raise TemplateError(f"Use of '{name}' is not supported in limited templates") return warn_unsupported hass_globals = ['closest', 'distance', 'expand', 'is_state', 'is_state_attr', 'state_attr', 'states', 'utcnow', 'now'] hass_filters = ['closest', 'expand'] for glob in hass_globals: self.globals[glob] = unsupported(glob) for filt in hass_filters: self.filters[filt] = unsupported(filt) return self.globals['expand'] = hassfunction(expand) self.filters['expand'] = contextfilter(self.globals['expand']) self.globals['closest'] = hassfunction(closest) self.filters['closest'] = contextfilter(hassfunction(closest_filter)) self.globals['distance'] = hassfunction(distance) self.globals['is_state'] = hassfunction(is_state) self.globals['is_state_attr'] = hassfunction(is_state_attr) self.globals['state_attr'] = hassfunction(state_attr) self.globals['states'] = AllStates(hass) self.globals['utcnow'] = hassfunction(utcnow) self.globals['now'] = hassfunction(now)
-6,775,535,442,289,782,000
Initialise template environment.
homeassistant/helpers/template.py
__init__
apapadopoulou/core
python
def __init__(self, hass, limited=False, strict=False): if (not strict): undefined = LoggingUndefined else: undefined = jinja2.StrictUndefined super().__init__(undefined=undefined) self.hass = hass self.template_cache = weakref.WeakValueDictionary() self.filters['round'] = forgiving_round self.filters['multiply'] = multiply self.filters['log'] = logarithm self.filters['sin'] = sine self.filters['cos'] = cosine self.filters['tan'] = tangent self.filters['asin'] = arc_sine self.filters['acos'] = arc_cosine self.filters['atan'] = arc_tangent self.filters['atan2'] = arc_tangent2 self.filters['sqrt'] = square_root self.filters['as_timestamp'] = forgiving_as_timestamp self.filters['as_local'] = dt_util.as_local self.filters['timestamp_custom'] = timestamp_custom self.filters['timestamp_local'] = timestamp_local self.filters['timestamp_utc'] = timestamp_utc self.filters['to_json'] = to_json self.filters['from_json'] = from_json self.filters['is_defined'] = fail_when_undefined self.filters['max'] = max self.filters['min'] = min self.filters['random'] = random_every_time self.filters['base64_encode'] = base64_encode self.filters['base64_decode'] = base64_decode self.filters['ordinal'] = ordinal self.filters['regex_match'] = regex_match self.filters['regex_replace'] = regex_replace self.filters['regex_search'] = regex_search self.filters['regex_findall_index'] = regex_findall_index self.filters['bitwise_and'] = bitwise_and self.filters['bitwise_or'] = bitwise_or self.filters['ord'] = ord self.globals['log'] = logarithm self.globals['sin'] = sine self.globals['cos'] = cosine self.globals['tan'] = tangent self.globals['sqrt'] = square_root self.globals['pi'] = math.pi self.globals['tau'] = (math.pi * 2) self.globals['e'] = math.e self.globals['asin'] = arc_sine self.globals['acos'] = arc_cosine self.globals['atan'] = arc_tangent self.globals['atan2'] = arc_tangent2 self.globals['float'] = forgiving_float self.globals['as_local'] = dt_util.as_local self.globals['as_timestamp'] = forgiving_as_timestamp self.globals['relative_time'] = relative_time self.globals['timedelta'] = timedelta self.globals['strptime'] = strptime self.globals['urlencode'] = urlencode self.globals['max'] = max self.globals['min'] = min if (hass is None): return def hassfunction(func): 'Wrap function that depend on hass.' @wraps(func) def wrapper(*args, **kwargs): return func(hass, *args[1:], **kwargs) return contextfunction(wrapper) self.globals['device_entities'] = hassfunction(device_entities) self.filters['device_entities'] = contextfilter(self.globals['device_entities']) if limited: def unsupported(name): def warn_unsupported(*args, **kwargs): raise TemplateError(f"Use of '{name}' is not supported in limited templates") return warn_unsupported hass_globals = ['closest', 'distance', 'expand', 'is_state', 'is_state_attr', 'state_attr', 'states', 'utcnow', 'now'] hass_filters = ['closest', 'expand'] for glob in hass_globals: self.globals[glob] = unsupported(glob) for filt in hass_filters: self.filters[filt] = unsupported(filt) return self.globals['expand'] = hassfunction(expand) self.filters['expand'] = contextfilter(self.globals['expand']) self.globals['closest'] = hassfunction(closest) self.filters['closest'] = contextfilter(hassfunction(closest_filter)) self.globals['distance'] = hassfunction(distance) self.globals['is_state'] = hassfunction(is_state) self.globals['is_state_attr'] = hassfunction(is_state_attr) self.globals['state_attr'] = hassfunction(state_attr) self.globals['states'] = AllStates(hass) self.globals['utcnow'] = hassfunction(utcnow) self.globals['now'] = hassfunction(now)
def is_safe_callable(self, obj): 'Test if callback is safe.' return (isinstance(obj, AllStates) or super().is_safe_callable(obj))
-5,997,129,528,551,347,000
Test if callback is safe.
homeassistant/helpers/template.py
is_safe_callable
apapadopoulou/core
python
def is_safe_callable(self, obj): return (isinstance(obj, AllStates) or super().is_safe_callable(obj))
def is_safe_attribute(self, obj, attr, value): 'Test if attribute is safe.' if isinstance(obj, (AllStates, DomainStates, TemplateState)): return (attr[0] != '_') if isinstance(obj, Namespace): return True return super().is_safe_attribute(obj, attr, value)
-749,355,223,477,753,700
Test if attribute is safe.
homeassistant/helpers/template.py
is_safe_attribute
apapadopoulou/core
python
def is_safe_attribute(self, obj, attr, value): if isinstance(obj, (AllStates, DomainStates, TemplateState)): return (attr[0] != '_') if isinstance(obj, Namespace): return True return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False): 'Compile the template.' if ((name is not None) or (filename is not None) or (raw is not False) or (defer_init is not False)): return super().compile(source, name, filename, raw, defer_init) cached = self.template_cache.get(source) if (cached is None): cached = self.template_cache[source] = super().compile(source) return cached
-5,019,116,213,764,939,000
Compile the template.
homeassistant/helpers/template.py
compile
apapadopoulou/core
python
def compile(self, source, name=None, filename=None, raw=False, defer_init=False): if ((name is not None) or (filename is not None) or (raw is not False) or (defer_init is not False)): return super().compile(source, name, filename, raw, defer_init) cached = self.template_cache.get(source) if (cached is None): cached = self.template_cache[source] = super().compile(source) return cached
def hassfunction(func): 'Wrap function that depend on hass.' @wraps(func) def wrapper(*args, **kwargs): return func(hass, *args[1:], **kwargs) return contextfunction(wrapper)
6,289,396,270,161,430,000
Wrap function that depend on hass.
homeassistant/helpers/template.py
hassfunction
apapadopoulou/core
python
def hassfunction(func): @wraps(func) def wrapper(*args, **kwargs): return func(hass, *args[1:], **kwargs) return contextfunction(wrapper)
def asdict(data_class) -> Dict: 'Type coerce items for easy serialization' data = asdict_(data_class) out = {} for (k, v) in data.items(): if isinstance(v, tuple): out[k] = list(v) elif isinstance(v, np.ndarray): out[k] = v.tolist() else: out[k] = v return out
-4,754,381,639,125,044,000
Type coerce items for easy serialization
src/covid_model_seiir_pipeline/lib/utilities.py
asdict
ihmeuw/covid-model-seiir-pipeline
python
def asdict(data_class) -> Dict: data = asdict_(data_class) out = {} for (k, v) in data.items(): if isinstance(v, tuple): out[k] = list(v) elif isinstance(v, np.ndarray): out[k] = v.tolist() else: out[k] = v return out
@classmethod def from_path(cls, specification_path: Union[(str, Path)]) -> Specification: 'Builds the specification from a file path.' spec_dict = cls._load(specification_path) return cls.from_dict(spec_dict)
7,862,137,455,821,725,000
Builds the specification from a file path.
src/covid_model_seiir_pipeline/lib/utilities.py
from_path
ihmeuw/covid-model-seiir-pipeline
python
@classmethod def from_path(cls, specification_path: Union[(str, Path)]) -> Specification: spec_dict = cls._load(specification_path) return cls.from_dict(spec_dict)
@classmethod def from_dict(cls, spec_dict: Dict) -> Specification: 'Builds the specification from a dictionary.' args = cls.parse_spec_dict(spec_dict) return cls(*args)
-3,883,012,938,789,640,000
Builds the specification from a dictionary.
src/covid_model_seiir_pipeline/lib/utilities.py
from_dict
ihmeuw/covid-model-seiir-pipeline
python
@classmethod def from_dict(cls, spec_dict: Dict) -> Specification: args = cls.parse_spec_dict(spec_dict) return cls(*args)
@classmethod @abc.abstractmethod def parse_spec_dict(cls, specification: Dict) -> Tuple: 'Parses a dict representation of the specification into init args.' raise NotImplementedError
9,132,255,730,173,572,000
Parses a dict representation of the specification into init args.
src/covid_model_seiir_pipeline/lib/utilities.py
parse_spec_dict
ihmeuw/covid-model-seiir-pipeline
python
@classmethod @abc.abstractmethod def parse_spec_dict(cls, specification: Dict) -> Tuple: raise NotImplementedError
@abc.abstractmethod def to_dict(self) -> Dict: 'Coerce the specification to a dict.' raise NotImplementedError
-3,401,670,680,929,234,000
Coerce the specification to a dict.
src/covid_model_seiir_pipeline/lib/utilities.py
to_dict
ihmeuw/covid-model-seiir-pipeline
python
@abc.abstractmethod def to_dict(self) -> Dict: raise NotImplementedError
def dump(self, path: Union[(str, Path)]) -> None: 'Writes this specification to a file.' data = self.to_dict() self._dump(data, path)
-5,803,425,927,007,623,000
Writes this specification to a file.
src/covid_model_seiir_pipeline/lib/utilities.py
dump
ihmeuw/covid-model-seiir-pipeline
python
def dump(self, path: Union[(str, Path)]) -> None: data = self.to_dict() self._dump(data, path)
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): '\n Distance weighted sampling.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n # Arguments:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n be l2 normalized.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n a_indices: indices of anchors.\n anchors: sampled anchor embeddings.\n positives: sampled positive embeddings.\n negatives: sampled negative embeddings.\n ' if (not isinstance(neg_multiplier, int)): raise ValueError('`neg_multiplier` must be an integer.') n = tf.size(labels) if (not isinstance(embeddings, tf.Tensor)): embeddings = tf.convert_to_tensor(embeddings) d = embeddings.shape[1].value distances = pairwise_distance(embeddings, squared=False) distances = tf.maximum(distances, high_var_threshold) log_weights = (((2 - d) * tf.log((distances + 1e-16))) - ((0.5 * (d - 3)) * tf.log(((1 + 1e-16) - (0.25 * (distances ** 2)))))) weights = tf.exp((log_weights - tf.reduce_max(log_weights))) lshape = tf.shape(labels) assert (lshape.shape == 1) labels = tf.reshape(labels, [lshape[0], 1]) adjacency = tf.equal(labels, tf.transpose(labels)) adjacency_not = tf.logical_not(adjacency) mask = tf.cast(adjacency_not, tf.float32) adjacency_ex = (tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))) m = tf.reduce_sum(adjacency_ex, axis=1) if (tf.reduce_min(m) == 0): m = tf.diag(tf.cast(tf.equal(m, 0), tf.int32)) adjacency_ex += m k = (tf.maximum(tf.reduce_max(m), 1) * neg_multiplier) pos_weights = tf.cast(adjacency_ex, tf.float32) weights = ((weights * mask) * tf.cast((distances < nonzero_loss_threshold), tf.float32)) weights = (weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)) a_indices = tf.reshape(tf.range(n), ((- 1), 1)) a_indices = tf.tile(a_indices, [1, k]) a_indices = tf.reshape(a_indices, ((- 1),)) def neg_sampling(i): s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims((weights[i] + 1e-16), axis=0)), k, output_dtype=tf.int32), axis=0) return s n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32) n_indices = tf.reshape(n_indices, ((- 1),)) def pos_sampling(i): s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims((pos_weights[i] + 1e-16), axis=0)), k, output_dtype=tf.int32), axis=0) return s p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32) p_indices = tf.reshape(p_indices, ((- 1),)) anchors = tf.gather(embeddings, a_indices, name='gather_anchors') positives = tf.gather(embeddings, p_indices, name='gather_pos') negatives = tf.gather(embeddings, n_indices, name='gather_neg') return (a_indices, anchors, positives, negatives)
6,369,874,639,233,533,000
Distance weighted sampling. # References - [sampling matters in deep embedding learning] (https://arxiv.org/abs/1706.07567) # Arguments: labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels. embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized. high_var_threshold: float. cutoff for high gradient variance. nonzero_loss_threshold: float. cutoff for non-zero loss zone. neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples. Returns: a_indices: indices of anchors. anchors: sampled anchor embeddings. positives: sampled positive embeddings. negatives: sampled negative embeddings.
loss.py
dist_weighted_sampling
miroozyx/Magin-Based-loss
python
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): '\n Distance weighted sampling.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n # Arguments:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n be l2 normalized.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n a_indices: indices of anchors.\n anchors: sampled anchor embeddings.\n positives: sampled positive embeddings.\n negatives: sampled negative embeddings.\n ' if (not isinstance(neg_multiplier, int)): raise ValueError('`neg_multiplier` must be an integer.') n = tf.size(labels) if (not isinstance(embeddings, tf.Tensor)): embeddings = tf.convert_to_tensor(embeddings) d = embeddings.shape[1].value distances = pairwise_distance(embeddings, squared=False) distances = tf.maximum(distances, high_var_threshold) log_weights = (((2 - d) * tf.log((distances + 1e-16))) - ((0.5 * (d - 3)) * tf.log(((1 + 1e-16) - (0.25 * (distances ** 2)))))) weights = tf.exp((log_weights - tf.reduce_max(log_weights))) lshape = tf.shape(labels) assert (lshape.shape == 1) labels = tf.reshape(labels, [lshape[0], 1]) adjacency = tf.equal(labels, tf.transpose(labels)) adjacency_not = tf.logical_not(adjacency) mask = tf.cast(adjacency_not, tf.float32) adjacency_ex = (tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))) m = tf.reduce_sum(adjacency_ex, axis=1) if (tf.reduce_min(m) == 0): m = tf.diag(tf.cast(tf.equal(m, 0), tf.int32)) adjacency_ex += m k = (tf.maximum(tf.reduce_max(m), 1) * neg_multiplier) pos_weights = tf.cast(adjacency_ex, tf.float32) weights = ((weights * mask) * tf.cast((distances < nonzero_loss_threshold), tf.float32)) weights = (weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)) a_indices = tf.reshape(tf.range(n), ((- 1), 1)) a_indices = tf.tile(a_indices, [1, k]) a_indices = tf.reshape(a_indices, ((- 1),)) def neg_sampling(i): s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims((weights[i] + 1e-16), axis=0)), k, output_dtype=tf.int32), axis=0) return s n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32) n_indices = tf.reshape(n_indices, ((- 1),)) def pos_sampling(i): s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims((pos_weights[i] + 1e-16), axis=0)), k, output_dtype=tf.int32), axis=0) return s p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32) p_indices = tf.reshape(p_indices, ((- 1),)) anchors = tf.gather(embeddings, a_indices, name='gather_anchors') positives = tf.gather(embeddings, p_indices, name='gather_pos') negatives = tf.gather(embeddings, n_indices, name='gather_neg') return (a_indices, anchors, positives, negatives)
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): '\n Computes the margin base loss.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.\n margin: Float, margin term in the loss function.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n margin_based_Loss: tf.float32 scalar\n ' (a_indices, anchors, positives, negatives) = dist_weighted_sampling(labels, embeddings, high_var_threshold=high_var_threshold, nonzero_loss_threshold=nonzero_loss_threshold, neg_multiplier=neg_multiplier) if isinstance(beta_in, (float, int)): beta = beta_in beta_reg_loss = 0.0 elif isinstance(beta_in, tf.Tensor): assert (tf.shape(beta_in).shape == 1) k = (tf.size(a_indices) / tf.size(labels)) k = tf.cast(k, tf.int32) beta = tf.reshape(beta_in, ((- 1), 1)) beta = tf.tile(beta, [1, k]) beta = tf.reshape(beta, ((- 1),)) beta_reg_loss = (tf.reduce_sum(beta) * nu) else: raise ValueError('`beta_in` must be one of [float, int, tf.Tensor].') d_ap = tf.sqrt((tf.reduce_sum(tf.square((positives - anchors)), axis=1) + 1e-16)) d_an = tf.sqrt((tf.reduce_sum(tf.square((negatives - anchors)), axis=1) + 1e-16)) pos_loss = tf.maximum(((margin + d_ap) - beta), 0) neg_loss = tf.maximum(((margin + beta) - d_an), 0) pair_cnt = tf.cast(tf.size(a_indices), tf.float32) loss = (((tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss)) + beta_reg_loss) / pair_cnt) return loss
-2,451,100,185,096,866,300
Computes the margin base loss. # References - [sampling matters in deep embedding learning] (https://arxiv.org/abs/1706.07567) Args: labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels. embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized. beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters. margin: Float, margin term in the loss function. nu: float. Regularization parameter for beta. high_var_threshold: float. cutoff for high gradient variance. nonzero_loss_threshold: float. cutoff for non-zero loss zone. neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples. Returns: margin_based_Loss: tf.float32 scalar
loss.py
margin_based_loss
miroozyx/Magin-Based-loss
python
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): '\n Computes the margin base loss.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.\n margin: Float, margin term in the loss function.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n margin_based_Loss: tf.float32 scalar\n ' (a_indices, anchors, positives, negatives) = dist_weighted_sampling(labels, embeddings, high_var_threshold=high_var_threshold, nonzero_loss_threshold=nonzero_loss_threshold, neg_multiplier=neg_multiplier) if isinstance(beta_in, (float, int)): beta = beta_in beta_reg_loss = 0.0 elif isinstance(beta_in, tf.Tensor): assert (tf.shape(beta_in).shape == 1) k = (tf.size(a_indices) / tf.size(labels)) k = tf.cast(k, tf.int32) beta = tf.reshape(beta_in, ((- 1), 1)) beta = tf.tile(beta, [1, k]) beta = tf.reshape(beta, ((- 1),)) beta_reg_loss = (tf.reduce_sum(beta) * nu) else: raise ValueError('`beta_in` must be one of [float, int, tf.Tensor].') d_ap = tf.sqrt((tf.reduce_sum(tf.square((positives - anchors)), axis=1) + 1e-16)) d_an = tf.sqrt((tf.reduce_sum(tf.square((negatives - anchors)), axis=1) + 1e-16)) pos_loss = tf.maximum(((margin + d_ap) - beta), 0) neg_loss = tf.maximum(((margin + beta) - d_an), 0) pair_cnt = tf.cast(tf.size(a_indices), tf.float32) loss = (((tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss)) + beta_reg_loss) / pair_cnt) return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): 'distance weighted sampling + triplet loss\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n margin: Float, margin term in the loss function.\n squared: Boolean, whether or not to square the triplet distances.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n triplet_loss: tf.float32 scalar\n\n ' (a_indices, anchors, positives, negatives) = dist_weighted_sampling(labels, embeddings, high_var_threshold=high_var_threshold, nonzero_loss_threshold=nonzero_loss_threshold, neg_multiplier=neg_multiplier) d_ap = tf.reduce_sum(tf.square((positives - anchors)), axis=1) d_an = tf.reduce_sum(tf.square((negatives - anchors)), axis=1) if (not squared): d_ap = K.sqrt((d_ap + 1e-16)) d_an = K.sqrt((d_an + 1e-16)) loss = tf.maximum(((d_ap - d_an) + margin), 0) loss = tf.reduce_mean(loss) return loss
4,112,237,657,098,011,600
distance weighted sampling + triplet loss Args: labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels. embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized. margin: Float, margin term in the loss function. squared: Boolean, whether or not to square the triplet distances. nu: float. Regularization parameter for beta. high_var_threshold: float. cutoff for high gradient variance. nonzero_loss_threshold: float. cutoff for non-zero loss zone. neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples. Returns: triplet_loss: tf.float32 scalar
loss.py
distance_weighted_triplet_loss
miroozyx/Magin-Based-loss
python
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1): 'distance weighted sampling + triplet loss\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n margin: Float, margin term in the loss function.\n squared: Boolean, whether or not to square the triplet distances.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n triplet_loss: tf.float32 scalar\n\n ' (a_indices, anchors, positives, negatives) = dist_weighted_sampling(labels, embeddings, high_var_threshold=high_var_threshold, nonzero_loss_threshold=nonzero_loss_threshold, neg_multiplier=neg_multiplier) d_ap = tf.reduce_sum(tf.square((positives - anchors)), axis=1) d_an = tf.reduce_sum(tf.square((negatives - anchors)), axis=1) if (not squared): d_ap = K.sqrt((d_ap + 1e-16)) d_an = K.sqrt((d_an + 1e-16)) loss = tf.maximum(((d_ap - d_an) + margin), 0) loss = tf.reduce_mean(loss) return loss
@property def example(self): 'Get and cache an example batch of `inputs, labels` for plotting.' result = getattr(self, '_example', None) if (result is None): result = next(iter(self.train)) self._example = result return result
-4,770,954,288,945,048,000
Get and cache an example batch of `inputs, labels` for plotting.
src/data_cleaning/window_generator.py
example
EFR-AI/AIBSIF
python
@property def example(self): result = getattr(self, '_example', None) if (result is None): result = next(iter(self.train)) self._example = result return result
def calculate_score_for_each_mood(self): '\n 利用谷歌nima模型对图片进行评分\n paper: https://arxiv.org/abs/1709.05424\n pytorch model: https://github.com/truskovskiyk/nima.pytorch.git\n\n 计算每条说说中图片的平均分\n 对于没有图片的按均值进行填充\n :return:\n ' self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json' with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r: self.image_score_dict = json.load(r) self.image_score_df = pd.DataFrame(self.image_score_dict) mean_score = self.image_score_df[(self.image_score_df['score'] != (- 1))].mean()[0] self.image_score_df.loc[((self.image_score_df.score == (- 1)), 'score')] = mean_score tid_list = self.mood_data_df['tid'].values for tid in tid_list: scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score if (len(scores) > 0): self.mood_data_df.loc[((self.mood_data_df.tid == tid), 'score')] = round(scores.mean(), 2) self.mood_data_df.fillna(mean_score) print('score shape:', self.mood_data_df.shape) self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
3,560,696,306,482,841,000
利用谷歌nima模型对图片进行评分 paper: https://arxiv.org/abs/1709.05424 pytorch model: https://github.com/truskovskiyk/nima.pytorch.git 计算每条说说中图片的平均分 对于没有图片的按均值进行填充 :return:
src/analysis/TrainMood.py
calculate_score_for_each_mood
343695222/QQZoneMood
python
def calculate_score_for_each_mood(self): '\n 利用谷歌nima模型对图片进行评分\n paper: https://arxiv.org/abs/1709.05424\n pytorch model: https://github.com/truskovskiyk/nima.pytorch.git\n\n 计算每条说说中图片的平均分\n 对于没有图片的按均值进行填充\n :return:\n ' self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json' with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r: self.image_score_dict = json.load(r) self.image_score_df = pd.DataFrame(self.image_score_dict) mean_score = self.image_score_df[(self.image_score_df['score'] != (- 1))].mean()[0] self.image_score_df.loc[((self.image_score_df.score == (- 1)), 'score')] = mean_score tid_list = self.mood_data_df['tid'].values for tid in tid_list: scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score if (len(scores) > 0): self.mood_data_df.loc[((self.mood_data_df.tid == tid), 'score')] = round(scores.mean(), 2) self.mood_data_df.fillna(mean_score) print('score shape:', self.mood_data_df.shape) self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self): '\n 计算每条说说的发送时间\n 分为以下五种类型:\n 0.午夜:0点-4点\n 1.凌晨:4点-8点\n 2.上午:8点-12点\n 3.下午:12点-16点\n 4.傍晚:16点-20点\n 5.晚上:20点-24点\n :return:\n ' day_begin_time = self.mood_data_df['time'].apply((lambda x: get_mktime2(x))) day_time_stamp = self.mood_data_df['time_stamp'] time_diff = (day_time_stamp - day_begin_time) time_step = ((60 * 60) * 4) time_state = time_diff.apply((lambda x: (x // time_step))) self.mood_data_df['time_state'] = time_state print('send time:', self.mood_data_df.shape)
2,151,849,557,618,110,000
计算每条说说的发送时间 分为以下五种类型: 0.午夜:0点-4点 1.凌晨:4点-8点 2.上午:8点-12点 3.下午:12点-16点 4.傍晚:16点-20点 5.晚上:20点-24点 :return:
src/analysis/TrainMood.py
calculate_send_time
343695222/QQZoneMood
python
def calculate_send_time(self): '\n 计算每条说说的发送时间\n 分为以下五种类型:\n 0.午夜:0点-4点\n 1.凌晨:4点-8点\n 2.上午:8点-12点\n 3.下午:12点-16点\n 4.傍晚:16点-20点\n 5.晚上:20点-24点\n :return:\n ' day_begin_time = self.mood_data_df['time'].apply((lambda x: get_mktime2(x))) day_time_stamp = self.mood_data_df['time_stamp'] time_diff = (day_time_stamp - day_begin_time) time_step = ((60 * 60) * 4) time_state = time_diff.apply((lambda x: (x // time_step))) self.mood_data_df['time_state'] = time_state print('send time:', self.mood_data_df.shape)
def export_classification_data(self): '\n 导出待分类待的数据\n :return:\n ' data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME) data_df = data[['content']] data_df['Y'] = '旅游与运动' data_df.fillna('空', inplace=True) columns = ['Y', 'content'] data_df = data_df.ix[:, columns] print(data_df.shape) data_df.to_csv((self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv'), sep='\t')
-7,152,024,162,251,435,000
导出待分类待的数据 :return:
src/analysis/TrainMood.py
export_classification_data
343695222/QQZoneMood
python
def export_classification_data(self): '\n 导出待分类待的数据\n :return:\n ' data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME) data_df = data[['content']] data_df['Y'] = '旅游与运动' data_df.fillna('空', inplace=True) columns = ['Y', 'content'] data_df = data_df.ix[:, columns] print(data_df.shape) data_df.to_csv((self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv'), sep='\t')
def get_nodes(self, request): '\n This method is used to build the menu tree.\n ' nodes = [] docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json') if (not os.path.exists(docsmap_file)): return nodes with io.open(docsmap_file) as fh: docs_map = json.load(fh, encoding='utf-8') for (counter, items) in enumerate(docs_map.items(), 1): bits = items[0].split('/') if (((len(bits) == 1) and (bits[0] == 'index')) or ((len(bits) == 2) and (bits[1] != 'index'))): continue node = NavigationNode(title=items[1], url=reverse_lazy('sphinx-documentation', args=(bits[0],)), id=counter) nodes.append(node) return nodes
-63,404,229,865,632,920
This method is used to build the menu tree.
cmsplugin_cascade/sphinx/cms_menus.py
get_nodes
beeduino/djangocms-cascade
python
def get_nodes(self, request): '\n \n ' nodes = [] docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json') if (not os.path.exists(docsmap_file)): return nodes with io.open(docsmap_file) as fh: docs_map = json.load(fh, encoding='utf-8') for (counter, items) in enumerate(docs_map.items(), 1): bits = items[0].split('/') if (((len(bits) == 1) and (bits[0] == 'index')) or ((len(bits) == 2) and (bits[1] != 'index'))): continue node = NavigationNode(title=items[1], url=reverse_lazy('sphinx-documentation', args=(bits[0],)), id=counter) nodes.append(node) return nodes
def Crc8(crc, data): 'Update CRC8 value.' for v in data: crc = (((crc << 4) & 255) ^ CRC_TABLE[((crc >> 4) ^ (v >> 4))]) crc = (((crc << 4) & 255) ^ CRC_TABLE[((crc >> 4) ^ (v & 15))]) return (crc ^ 85)
1,831,462,198,220,214,000
Update CRC8 value.
chip/mchp/util/pack_ec.py
Crc8
DHowett/fw-ectool
python
def Crc8(crc, data): for v in data: crc = (((crc << 4) & 255) ^ CRC_TABLE[((crc >> 4) ^ (v >> 4))]) crc = (((crc << 4) & 255) ^ CRC_TABLE[((crc >> 4) ^ (v & 15))]) return (crc ^ 85)
def GetEntryPoint(payload_file): 'Read entry point from payload EC image.' with open(payload_file, 'rb') as f: f.seek(4) s = f.read(4) return struct.unpack('<I', s)[0]
6,129,148,495,595,156,000
Read entry point from payload EC image.
chip/mchp/util/pack_ec.py
GetEntryPoint
DHowett/fw-ectool
python
def GetEntryPoint(payload_file): with open(payload_file, 'rb') as f: f.seek(4) s = f.read(4) return struct.unpack('<I', s)[0]
def GetPayloadFromOffset(payload_file, offset): 'Read payload and pad it to 64-byte aligned.' with open(payload_file, 'rb') as f: f.seek(offset) payload = bytearray(f.read()) rem_len = (len(payload) % 64) if rem_len: payload += (b'\x00' * (64 - rem_len)) return payload
3,025,281,954,015,257,600
Read payload and pad it to 64-byte aligned.
chip/mchp/util/pack_ec.py
GetPayloadFromOffset
DHowett/fw-ectool
python
def GetPayloadFromOffset(payload_file, offset): with open(payload_file, 'rb') as f: f.seek(offset) payload = bytearray(f.read()) rem_len = (len(payload) % 64) if rem_len: payload += (b'\x00' * (64 - rem_len)) return payload
def GetPayload(payload_file): 'Read payload and pad it to 64-byte aligned.' return GetPayloadFromOffset(payload_file, 0)
3,972,005,411,253,790,000
Read payload and pad it to 64-byte aligned.
chip/mchp/util/pack_ec.py
GetPayload
DHowett/fw-ectool
python
def GetPayload(payload_file): return GetPayloadFromOffset(payload_file, 0)
def GetPublicKey(pem_file): 'Extract public exponent and modulus from PEM file.' result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text', '-noout'], stdout=subprocess.PIPE, encoding='utf-8') modulus_raw = [] in_modulus = False for line in result.stdout.splitlines(): if line.startswith('modulus'): in_modulus = True elif (not line.startswith(' ')): in_modulus = False elif in_modulus: modulus_raw.extend(line.strip().strip(':').split(':')) if line.startswith('publicExponent'): exp = int(line.split(' ')[1], 10) modulus_raw.reverse() modulus = bytearray((int(x, 16) for x in modulus_raw[:256])) return (struct.pack('<Q', exp), modulus)
-834,223,368,120,348,900
Extract public exponent and modulus from PEM file.
chip/mchp/util/pack_ec.py
GetPublicKey
DHowett/fw-ectool
python
def GetPublicKey(pem_file): result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text', '-noout'], stdout=subprocess.PIPE, encoding='utf-8') modulus_raw = [] in_modulus = False for line in result.stdout.splitlines(): if line.startswith('modulus'): in_modulus = True elif (not line.startswith(' ')): in_modulus = False elif in_modulus: modulus_raw.extend(line.strip().strip(':').split(':')) if line.startswith('publicExponent'): exp = int(line.split(' ')[1], 10) modulus_raw.reverse() modulus = bytearray((int(x, 16) for x in modulus_raw[:256])) return (struct.pack('<Q', exp), modulus)
def PacklfwRoImage(rorw_file, loader_file, image_size): 'Create a temp file with the\n first image_size bytes from the loader file and append bytes\n from the rorw file.\n return the filename' fo = tempfile.NamedTemporaryFile(delete=False) with open(loader_file, 'rb') as fin1: pro = fin1.read() fo.write(pro) with open(rorw_file, 'rb') as fin: ro = fin.read(image_size) fo.write(ro) fo.close() return fo.name
-6,079,619,575,248,944,000
Create a temp file with the first image_size bytes from the loader file and append bytes from the rorw file. return the filename
chip/mchp/util/pack_ec.py
PacklfwRoImage
DHowett/fw-ectool
python
def PacklfwRoImage(rorw_file, loader_file, image_size): 'Create a temp file with the\n first image_size bytes from the loader file and append bytes\n from the rorw file.\n return the filename' fo = tempfile.NamedTemporaryFile(delete=False) with open(loader_file, 'rb') as fin1: pro = fin1.read() fo.write(pro) with open(rorw_file, 'rb') as fin: ro = fin.read(image_size) fo.write(ro) fo.close() return fo.name
def create_namespaced_job(self, namespace, body, **kwargs): "\n create a Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_namespaced_job(namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1Job body: (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1Job\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: data = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data
-8,222,020,624,813,056,000
create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
kubernetes/client/apis/batch_v1_api.py
create_namespaced_job
MiaoRachelYu/python
python
def create_namespaced_job(self, namespace, body, **kwargs): "\n create a Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_namespaced_job(namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1Job body: (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1Job\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: data = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): "\n create a Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1Job body: (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1Job\n If the method is called asynchronously,\n returns the request thread.\n " all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_namespaced_job" % key)) params[key] = val del params['kwargs'] if (('namespace' not in params) or (params['namespace'] is None)): raise ValueError('Missing the required parameter `namespace` when calling `create_namespaced_job`') if (('body' not in params) or (params['body'] is None)): raise ValueError('Missing the required parameter `body` when calling `create_namespaced_job`') collection_formats = {} path_params = {} if ('namespace' in params): path_params['namespace'] = params['namespace'] query_params = [] if ('include_uninitialized' in params): query_params.append(('includeUninitialized', params['include_uninitialized'])) if ('pretty' in params): query_params.append(('pretty', params['pretty'])) if ('dry_run' in params): query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) header_params['Content-Type'] = self.api_client.select_header_content_type(['*/*']) auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
-1,810,460,728,275,074,800
create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
kubernetes/client/apis/batch_v1_api.py
create_namespaced_job_with_http_info
MiaoRachelYu/python
python
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): "\n create a Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1Job body: (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1Job\n If the method is called asynchronously,\n returns the request thread.\n " all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_namespaced_job" % key)) params[key] = val del params['kwargs'] if (('namespace' not in params) or (params['namespace'] is None)): raise ValueError('Missing the required parameter `namespace` when calling `create_namespaced_job`') if (('body' not in params) or (params['body'] is None)): raise ValueError('Missing the required parameter `body` when calling `create_namespaced_job`') collection_formats = {} path_params = {} if ('namespace' in params): path_params['namespace'] = params['namespace'] query_params = [] if ('include_uninitialized' in params): query_params.append(('includeUninitialized', params['include_uninitialized'])) if ('pretty' in params): query_params.append(('pretty', params['pretty'])) if ('dry_run' in params): query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) header_params['Content-Type'] = self.api_client.select_header_content_type(['*/*']) auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs): '\n delete collection of Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: data = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data
5,780,434,193,283,119,000
delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
kubernetes/client/apis/batch_v1_api.py
delete_collection_namespaced_job
MiaoRachelYu/python
python
def delete_collection_namespaced_job(self, namespace, **kwargs): '\n delete collection of Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: data = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): '\n delete collection of Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method delete_collection_namespaced_job" % key)) params[key] = val del params['kwargs'] if (('namespace' not in params) or (params['namespace'] is None)): raise ValueError('Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`') collection_formats = {} path_params = {} if ('namespace' in params): path_params['namespace'] = params['namespace'] query_params = [] if ('include_uninitialized' in params): query_params.append(('includeUninitialized', params['include_uninitialized'])) if ('pretty' in params): query_params.append(('pretty', params['pretty'])) if ('_continue' in params): query_params.append(('continue', params['_continue'])) if ('field_selector' in params): query_params.append(('fieldSelector', params['field_selector'])) if ('label_selector' in params): query_params.append(('labelSelector', params['label_selector'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('resource_version' in params): query_params.append(('resourceVersion', params['resource_version'])) if ('timeout_seconds' in params): query_params.append(('timeoutSeconds', params['timeout_seconds'])) if ('watch' in params): query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) header_params['Content-Type'] = self.api_client.select_header_content_type(['*/*']) auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
-1,365,401,780,679,479,000
delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
kubernetes/client/apis/batch_v1_api.py
delete_collection_namespaced_job_with_http_info
MiaoRachelYu/python
python
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): '\n delete collection of Job\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method delete_collection_namespaced_job" % key)) params[key] = val del params['kwargs'] if (('namespace' not in params) or (params['namespace'] is None)): raise ValueError('Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`') collection_formats = {} path_params = {} if ('namespace' in params): path_params['namespace'] = params['namespace'] query_params = [] if ('include_uninitialized' in params): query_params.append(('includeUninitialized', params['include_uninitialized'])) if ('pretty' in params): query_params.append(('pretty', params['pretty'])) if ('_continue' in params): query_params.append(('continue', params['_continue'])) if ('field_selector' in params): query_params.append(('fieldSelector', params['field_selector'])) if ('label_selector' in params): query_params.append(('labelSelector', params['label_selector'])) if ('limit' in params): query_params.append(('limit', params['limit'])) if ('resource_version' in params): query_params.append(('resourceVersion', params['resource_version'])) if ('timeout_seconds' in params): query_params.append(('timeoutSeconds', params['timeout_seconds'])) if ('watch' in params): query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) header_params['Content-Type'] = self.api_client.select_header_content_type(['*/*']) auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)