repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
KE-works/pykechain | pykechain/models/activity.py | Activity.parts | def parts(self, *args, **kwargs):
"""Retrieve parts belonging to this activity.
Without any arguments it retrieves the Instances related to this task only.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
See :class:`pykechain.Client.parts` for additional available parameters.
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> parts = task.parts()
To retrieve the models only.
>>> parts = task.parts(category=Category.MODEL)
"""
return self._client.parts(*args, activity=self.id, **kwargs) | python | def parts(self, *args, **kwargs):
"""Retrieve parts belonging to this activity.
Without any arguments it retrieves the Instances related to this task only.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
See :class:`pykechain.Client.parts` for additional available parameters.
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> parts = task.parts()
To retrieve the models only.
>>> parts = task.parts(category=Category.MODEL)
"""
return self._client.parts(*args, activity=self.id, **kwargs) | [
"def",
"parts",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_client",
".",
"parts",
"(",
"*",
"args",
",",
"activity",
"=",
"self",
".",
"id",
",",
"*",
"*",
"kwargs",
")"
] | Retrieve parts belonging to this activity.
Without any arguments it retrieves the Instances related to this task only.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
See :class:`pykechain.Client.parts` for additional available parameters.
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> parts = task.parts()
To retrieve the models only.
>>> parts = task.parts(category=Category.MODEL) | [
"Retrieve",
"parts",
"belonging",
"to",
"this",
"activity",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L120-L139 | train |
KE-works/pykechain | pykechain/models/activity.py | Activity.associated_parts | def associated_parts(self, *args, **kwargs):
"""Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts()
"""
return (
self.parts(category=Category.MODEL, *args, **kwargs),
self.parts(category=Category.INSTANCE, *args, **kwargs)
) | python | def associated_parts(self, *args, **kwargs):
"""Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts()
"""
return (
self.parts(category=Category.MODEL, *args, **kwargs),
self.parts(category=Category.INSTANCE, *args, **kwargs)
) | [
"def",
"associated_parts",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"self",
".",
"parts",
"(",
"category",
"=",
"Category",
".",
"MODEL",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"self",
".",
"parts",
"(",
"category",
"=",
"Category",
".",
"INSTANCE",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts() | [
"Retrieve",
"models",
"and",
"instances",
"belonging",
"to",
"this",
"activity",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L141-L166 | train |
KE-works/pykechain | pykechain/models/activity.py | Activity.subprocess | def subprocess(self):
"""Retrieve the subprocess in which this activity is defined.
If this is a task on top level, it raises NotFounderror.
:return: a subprocess :class:`Activity`
:raises NotFoundError: when it is a task in the top level of a project
:raises APIError: when other error occurs
Example
-------
>>> task = project.activity('Subtask')
>>> subprocess = task.subprocess()
"""
subprocess_id = self._json_data.get('container')
if subprocess_id == self._json_data.get('root_container'):
raise NotFoundError("Cannot find subprocess for this task '{}', "
"as this task exist on top level.".format(self.name))
return self._client.activity(pk=subprocess_id, scope=self.scope_id) | python | def subprocess(self):
"""Retrieve the subprocess in which this activity is defined.
If this is a task on top level, it raises NotFounderror.
:return: a subprocess :class:`Activity`
:raises NotFoundError: when it is a task in the top level of a project
:raises APIError: when other error occurs
Example
-------
>>> task = project.activity('Subtask')
>>> subprocess = task.subprocess()
"""
subprocess_id = self._json_data.get('container')
if subprocess_id == self._json_data.get('root_container'):
raise NotFoundError("Cannot find subprocess for this task '{}', "
"as this task exist on top level.".format(self.name))
return self._client.activity(pk=subprocess_id, scope=self.scope_id) | [
"def",
"subprocess",
"(",
"self",
")",
":",
"subprocess_id",
"=",
"self",
".",
"_json_data",
".",
"get",
"(",
"'container'",
")",
"if",
"subprocess_id",
"==",
"self",
".",
"_json_data",
".",
"get",
"(",
"'root_container'",
")",
":",
"raise",
"NotFoundError",
"(",
"\"Cannot find subprocess for this task '{}', \"",
"\"as this task exist on top level.\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"_client",
".",
"activity",
"(",
"pk",
"=",
"subprocess_id",
",",
"scope",
"=",
"self",
".",
"scope_id",
")"
] | Retrieve the subprocess in which this activity is defined.
If this is a task on top level, it raises NotFounderror.
:return: a subprocess :class:`Activity`
:raises NotFoundError: when it is a task in the top level of a project
:raises APIError: when other error occurs
Example
-------
>>> task = project.activity('Subtask')
>>> subprocess = task.subprocess() | [
"Retrieve",
"the",
"subprocess",
"in",
"which",
"this",
"activity",
"is",
"defined",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L199-L218 | train |
KE-works/pykechain | pykechain/models/activity.py | Activity.siblings | def siblings(self, **kwargs):
"""Retrieve the other activities that also belong to the subprocess.
It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including
itself. This also works if the activity is of type `ActivityType.SUBPROCESS`.
:param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info
:type kwargs: dict or None
:return: list of :class:`Activity`
Example
-------
>>> task = project.activity('Some Task')
>>> siblings = task.siblings()
Example for siblings containing certain words in the task name
>>> task = project.activity('Some Task')
>>> siblings = task.siblings(name__contains='Another Task')
"""
container_id = self._json_data.get('container')
return self._client.activities(container=container_id, scope=self.scope_id, **kwargs) | python | def siblings(self, **kwargs):
"""Retrieve the other activities that also belong to the subprocess.
It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including
itself. This also works if the activity is of type `ActivityType.SUBPROCESS`.
:param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info
:type kwargs: dict or None
:return: list of :class:`Activity`
Example
-------
>>> task = project.activity('Some Task')
>>> siblings = task.siblings()
Example for siblings containing certain words in the task name
>>> task = project.activity('Some Task')
>>> siblings = task.siblings(name__contains='Another Task')
"""
container_id = self._json_data.get('container')
return self._client.activities(container=container_id, scope=self.scope_id, **kwargs) | [
"def",
"siblings",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"container_id",
"=",
"self",
".",
"_json_data",
".",
"get",
"(",
"'container'",
")",
"return",
"self",
".",
"_client",
".",
"activities",
"(",
"container",
"=",
"container_id",
",",
"scope",
"=",
"self",
".",
"scope_id",
",",
"*",
"*",
"kwargs",
")"
] | Retrieve the other activities that also belong to the subprocess.
It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including
itself. This also works if the activity is of type `ActivityType.SUBPROCESS`.
:param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info
:type kwargs: dict or None
:return: list of :class:`Activity`
Example
-------
>>> task = project.activity('Some Task')
>>> siblings = task.siblings()
Example for siblings containing certain words in the task name
>>> task = project.activity('Some Task')
>>> siblings = task.siblings(name__contains='Another Task') | [
"Retrieve",
"the",
"other",
"activities",
"that",
"also",
"belong",
"to",
"the",
"subprocess",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L248-L269 | train |
KE-works/pykechain | pykechain/models/activity.py | Activity.create | def create(self, *args, **kwargs):
"""Create a new activity belonging to this subprocess.
See :func:`pykechain.Client.create_activity` for available parameters.
:raises IllegalArgumentError: if the `Activity` is not a `SUBPROCESS`.
:raises APIError: if an Error occurs.
"""
if self.activity_type != ActivityType.SUBPROCESS:
raise IllegalArgumentError("One can only create a task under a subprocess.")
return self._client.create_activity(self.id, *args, **kwargs) | python | def create(self, *args, **kwargs):
"""Create a new activity belonging to this subprocess.
See :func:`pykechain.Client.create_activity` for available parameters.
:raises IllegalArgumentError: if the `Activity` is not a `SUBPROCESS`.
:raises APIError: if an Error occurs.
"""
if self.activity_type != ActivityType.SUBPROCESS:
raise IllegalArgumentError("One can only create a task under a subprocess.")
return self._client.create_activity(self.id, *args, **kwargs) | [
"def",
"create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"activity_type",
"!=",
"ActivityType",
".",
"SUBPROCESS",
":",
"raise",
"IllegalArgumentError",
"(",
"\"One can only create a task under a subprocess.\"",
")",
"return",
"self",
".",
"_client",
".",
"create_activity",
"(",
"self",
".",
"id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Create a new activity belonging to this subprocess.
See :func:`pykechain.Client.create_activity` for available parameters.
:raises IllegalArgumentError: if the `Activity` is not a `SUBPROCESS`.
:raises APIError: if an Error occurs. | [
"Create",
"a",
"new",
"activity",
"belonging",
"to",
"this",
"subprocess",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L271-L281 | train |
KE-works/pykechain | pykechain/models/activity.py | Activity.customization | def customization(self):
"""
Get a customization object representing the customization of the activity.
.. versionadded:: 1.11
:return: An instance of :class:`customization.ExtCustomization`
Example
-------
>>> activity = project.activity(name='Customizable activity')
>>> customization = activity.customization()
>>> part_to_show = project.part(name='Bike')
>>> customization.add_property_grid_widget(part_to_show, custom_title="My super bike"))
"""
from .customization import ExtCustomization
# For now, we only allow customization in an Ext JS context
return ExtCustomization(activity=self, client=self._client) | python | def customization(self):
"""
Get a customization object representing the customization of the activity.
.. versionadded:: 1.11
:return: An instance of :class:`customization.ExtCustomization`
Example
-------
>>> activity = project.activity(name='Customizable activity')
>>> customization = activity.customization()
>>> part_to_show = project.part(name='Bike')
>>> customization.add_property_grid_widget(part_to_show, custom_title="My super bike"))
"""
from .customization import ExtCustomization
# For now, we only allow customization in an Ext JS context
return ExtCustomization(activity=self, client=self._client) | [
"def",
"customization",
"(",
"self",
")",
":",
"from",
".",
"customization",
"import",
"ExtCustomization",
"# For now, we only allow customization in an Ext JS context",
"return",
"ExtCustomization",
"(",
"activity",
"=",
"self",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Get a customization object representing the customization of the activity.
.. versionadded:: 1.11
:return: An instance of :class:`customization.ExtCustomization`
Example
-------
>>> activity = project.activity(name='Customizable activity')
>>> customization = activity.customization()
>>> part_to_show = project.part(name='Bike')
>>> customization.add_property_grid_widget(part_to_show, custom_title="My super bike")) | [
"Get",
"a",
"customization",
"object",
"representing",
"the",
"customization",
"of",
"the",
"activity",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity.py#L398-L417 | train |
hfurubotten/enturclient | enturclient/api.py | EnturPublicTransportData.all_stop_places_quays | def all_stop_places_quays(self) -> list:
"""Get all stop places and quays"""
all_places = self.stops.copy()
for quay in self.quays:
all_places.append(quay)
return all_places | python | def all_stop_places_quays(self) -> list:
"""Get all stop places and quays"""
all_places = self.stops.copy()
for quay in self.quays:
all_places.append(quay)
return all_places | [
"def",
"all_stop_places_quays",
"(",
"self",
")",
"->",
"list",
":",
"all_places",
"=",
"self",
".",
"stops",
".",
"copy",
"(",
")",
"for",
"quay",
"in",
"self",
".",
"quays",
":",
"all_places",
".",
"append",
"(",
"quay",
")",
"return",
"all_places"
] | Get all stop places and quays | [
"Get",
"all",
"stop",
"places",
"and",
"quays"
] | 8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4 | https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L68-L73 | train |
hfurubotten/enturclient | enturclient/api.py | EnturPublicTransportData.expand_all_quays | async def expand_all_quays(self) -> None:
"""Find all quays from stop places."""
if not self.stops:
return
headers = {'ET-Client-Name': self._client_name}
request = {
'query': GRAPHQL_STOP_TO_QUAY_TEMPLATE,
'variables': {
'stops': self.stops,
'omitNonBoarding': self.omit_non_boarding
}
}
with async_timeout.timeout(10):
resp = await self.web_session.post(RESOURCE,
json=request,
headers=headers)
if resp.status != 200:
_LOGGER.error(
"Error connecting to Entur, response http status code: %s",
resp.status)
return None
result = await resp.json()
if 'errors' in result:
return
for stop_place in result['data']['stopPlaces']:
if len(stop_place['quays']) > 1:
for quay in stop_place['quays']:
if quay['estimatedCalls']:
self.quays.append(quay['id']) | python | async def expand_all_quays(self) -> None:
"""Find all quays from stop places."""
if not self.stops:
return
headers = {'ET-Client-Name': self._client_name}
request = {
'query': GRAPHQL_STOP_TO_QUAY_TEMPLATE,
'variables': {
'stops': self.stops,
'omitNonBoarding': self.omit_non_boarding
}
}
with async_timeout.timeout(10):
resp = await self.web_session.post(RESOURCE,
json=request,
headers=headers)
if resp.status != 200:
_LOGGER.error(
"Error connecting to Entur, response http status code: %s",
resp.status)
return None
result = await resp.json()
if 'errors' in result:
return
for stop_place in result['data']['stopPlaces']:
if len(stop_place['quays']) > 1:
for quay in stop_place['quays']:
if quay['estimatedCalls']:
self.quays.append(quay['id']) | [
"async",
"def",
"expand_all_quays",
"(",
"self",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"stops",
":",
"return",
"headers",
"=",
"{",
"'ET-Client-Name'",
":",
"self",
".",
"_client_name",
"}",
"request",
"=",
"{",
"'query'",
":",
"GRAPHQL_STOP_TO_QUAY_TEMPLATE",
",",
"'variables'",
":",
"{",
"'stops'",
":",
"self",
".",
"stops",
",",
"'omitNonBoarding'",
":",
"self",
".",
"omit_non_boarding",
"}",
"}",
"with",
"async_timeout",
".",
"timeout",
"(",
"10",
")",
":",
"resp",
"=",
"await",
"self",
".",
"web_session",
".",
"post",
"(",
"RESOURCE",
",",
"json",
"=",
"request",
",",
"headers",
"=",
"headers",
")",
"if",
"resp",
".",
"status",
"!=",
"200",
":",
"_LOGGER",
".",
"error",
"(",
"\"Error connecting to Entur, response http status code: %s\"",
",",
"resp",
".",
"status",
")",
"return",
"None",
"result",
"=",
"await",
"resp",
".",
"json",
"(",
")",
"if",
"'errors'",
"in",
"result",
":",
"return",
"for",
"stop_place",
"in",
"result",
"[",
"'data'",
"]",
"[",
"'stopPlaces'",
"]",
":",
"if",
"len",
"(",
"stop_place",
"[",
"'quays'",
"]",
")",
">",
"1",
":",
"for",
"quay",
"in",
"stop_place",
"[",
"'quays'",
"]",
":",
"if",
"quay",
"[",
"'estimatedCalls'",
"]",
":",
"self",
".",
"quays",
".",
"append",
"(",
"quay",
"[",
"'id'",
"]",
")"
] | Find all quays from stop places. | [
"Find",
"all",
"quays",
"from",
"stop",
"places",
"."
] | 8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4 | https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L75-L108 | train |
hfurubotten/enturclient | enturclient/api.py | EnturPublicTransportData.update | async def update(self) -> None:
"""Get the latest data from api.entur.org."""
headers = {'ET-Client-Name': self._client_name}
request = {
'query': self.get_gql_query(),
'variables': {
'stops': self.stops,
'quays': self.quays,
'whitelist': {
'lines': self.line_whitelist
},
'numberOfDepartures': self.number_of_departures,
'omitNonBoarding': self.omit_non_boarding
}
}
with async_timeout.timeout(10):
resp = await self.web_session.post(RESOURCE,
json=request,
headers=headers)
if resp.status != 200:
_LOGGER.error(
"Error connecting to Entur, response http status code: %s",
resp.status)
return None
result = await resp.json()
if 'errors' in result:
_LOGGER.warning("Entur API responded with error message: {error}",
result['errors'])
return
self._data = result['data']
if 'stopPlaces' in self._data:
for stop in self._data['stopPlaces']:
self._process_place(stop, False)
if 'quays' in self._data:
for quay in self._data['quays']:
self._process_place(quay, True) | python | async def update(self) -> None:
"""Get the latest data from api.entur.org."""
headers = {'ET-Client-Name': self._client_name}
request = {
'query': self.get_gql_query(),
'variables': {
'stops': self.stops,
'quays': self.quays,
'whitelist': {
'lines': self.line_whitelist
},
'numberOfDepartures': self.number_of_departures,
'omitNonBoarding': self.omit_non_boarding
}
}
with async_timeout.timeout(10):
resp = await self.web_session.post(RESOURCE,
json=request,
headers=headers)
if resp.status != 200:
_LOGGER.error(
"Error connecting to Entur, response http status code: %s",
resp.status)
return None
result = await resp.json()
if 'errors' in result:
_LOGGER.warning("Entur API responded with error message: {error}",
result['errors'])
return
self._data = result['data']
if 'stopPlaces' in self._data:
for stop in self._data['stopPlaces']:
self._process_place(stop, False)
if 'quays' in self._data:
for quay in self._data['quays']:
self._process_place(quay, True) | [
"async",
"def",
"update",
"(",
"self",
")",
"->",
"None",
":",
"headers",
"=",
"{",
"'ET-Client-Name'",
":",
"self",
".",
"_client_name",
"}",
"request",
"=",
"{",
"'query'",
":",
"self",
".",
"get_gql_query",
"(",
")",
",",
"'variables'",
":",
"{",
"'stops'",
":",
"self",
".",
"stops",
",",
"'quays'",
":",
"self",
".",
"quays",
",",
"'whitelist'",
":",
"{",
"'lines'",
":",
"self",
".",
"line_whitelist",
"}",
",",
"'numberOfDepartures'",
":",
"self",
".",
"number_of_departures",
",",
"'omitNonBoarding'",
":",
"self",
".",
"omit_non_boarding",
"}",
"}",
"with",
"async_timeout",
".",
"timeout",
"(",
"10",
")",
":",
"resp",
"=",
"await",
"self",
".",
"web_session",
".",
"post",
"(",
"RESOURCE",
",",
"json",
"=",
"request",
",",
"headers",
"=",
"headers",
")",
"if",
"resp",
".",
"status",
"!=",
"200",
":",
"_LOGGER",
".",
"error",
"(",
"\"Error connecting to Entur, response http status code: %s\"",
",",
"resp",
".",
"status",
")",
"return",
"None",
"result",
"=",
"await",
"resp",
".",
"json",
"(",
")",
"if",
"'errors'",
"in",
"result",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Entur API responded with error message: {error}\"",
",",
"result",
"[",
"'errors'",
"]",
")",
"return",
"self",
".",
"_data",
"=",
"result",
"[",
"'data'",
"]",
"if",
"'stopPlaces'",
"in",
"self",
".",
"_data",
":",
"for",
"stop",
"in",
"self",
".",
"_data",
"[",
"'stopPlaces'",
"]",
":",
"self",
".",
"_process_place",
"(",
"stop",
",",
"False",
")",
"if",
"'quays'",
"in",
"self",
".",
"_data",
":",
"for",
"quay",
"in",
"self",
".",
"_data",
"[",
"'quays'",
"]",
":",
"self",
".",
"_process_place",
"(",
"quay",
",",
"True",
")"
] | Get the latest data from api.entur.org. | [
"Get",
"the",
"latest",
"data",
"from",
"api",
".",
"entur",
".",
"org",
"."
] | 8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4 | https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L110-L152 | train |
hfurubotten/enturclient | enturclient/api.py | EnturPublicTransportData._process_place | def _process_place(self, place: dict, is_platform: bool) -> None:
"""Extract information from place dictionary."""
place_id = place['id']
self.info[place_id] = Place(place, is_platform) | python | def _process_place(self, place: dict, is_platform: bool) -> None:
"""Extract information from place dictionary."""
place_id = place['id']
self.info[place_id] = Place(place, is_platform) | [
"def",
"_process_place",
"(",
"self",
",",
"place",
":",
"dict",
",",
"is_platform",
":",
"bool",
")",
"->",
"None",
":",
"place_id",
"=",
"place",
"[",
"'id'",
"]",
"self",
".",
"info",
"[",
"place_id",
"]",
"=",
"Place",
"(",
"place",
",",
"is_platform",
")"
] | Extract information from place dictionary. | [
"Extract",
"information",
"from",
"place",
"dictionary",
"."
] | 8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4 | https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L158-L161 | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/responses.py | serializable_list | def serializable_list(
olist, attrs_to_serialize=None, rels_to_expand=None,
group_listrels_by=None, rels_to_serialize=None,
key_modifications=None, groupby=None, keyvals_to_merge=None,
preserve_order=False, dict_struct=None, dict_post_processors=None):
"""
Converts a list of model instances to a list of dictionaries
using their `todict` method.
Args:
olist (list): The list of instances to convert
attrs_to_serialize (list, optional): To be passed as an argument
to the `todict` method
rels_to_expand (list, optional): To be passed as an argument
to the `todict` method
group_listrels_by (dict, optional): To be passed as an argument
to the `todict` method
rels_to_serialize (list, optional): To be passed as an argument
to the `todict` method
key_modifications (dict, optional): To be passed as an argument
to the `todict` method
groupby (list, optional): An optional list of keys based on which
the result list will be hierarchially grouped ( and converted
into a dict)
keyvals_to_merge (list of dicts, optional): A list of parameters
to be merged with each dict of the output list
"""
if groupby:
if preserve_order:
result = json_encoder(deep_group(
olist, keys=groupby, serializer='todict',
preserve_order=preserve_order,
serializer_kwargs={
'rels_to_serialize': rels_to_serialize,
'rels_to_expand': rels_to_expand,
'attrs_to_serialize': attrs_to_serialize,
'group_listrels_by': group_listrels_by,
'key_modifications': key_modifications,
'dict_struct': dict_struct,
'dict_post_processors': dict_post_processors
}))
else:
result = deep_group(
olist, keys=groupby, serializer='todict',
preserve_order=preserve_order,
serializer_kwargs={
'rels_to_serialize': rels_to_serialize,
'rels_to_expand': rels_to_expand,
'attrs_to_serialize': attrs_to_serialize,
'group_listrels_by': group_listrels_by,
'key_modifications': key_modifications,
'dict_struct': dict_struct,
'dict_post_processors': dict_post_processors
})
return result
else:
result_list = map(
lambda o: serialized_obj(
o, attrs_to_serialize=attrs_to_serialize,
rels_to_expand=rels_to_expand,
group_listrels_by=group_listrels_by,
rels_to_serialize=rels_to_serialize,
key_modifications=key_modifications,
dict_struct=dict_struct,
dict_post_processors=dict_post_processors),
olist)
if keyvals_to_merge:
result_list = [merge(obj_dict, kvdict)
for obj_dict, kvdict in
zip(result_list, keyvals_to_merge)]
return result_list | python | def serializable_list(
olist, attrs_to_serialize=None, rels_to_expand=None,
group_listrels_by=None, rels_to_serialize=None,
key_modifications=None, groupby=None, keyvals_to_merge=None,
preserve_order=False, dict_struct=None, dict_post_processors=None):
"""
Converts a list of model instances to a list of dictionaries
using their `todict` method.
Args:
olist (list): The list of instances to convert
attrs_to_serialize (list, optional): To be passed as an argument
to the `todict` method
rels_to_expand (list, optional): To be passed as an argument
to the `todict` method
group_listrels_by (dict, optional): To be passed as an argument
to the `todict` method
rels_to_serialize (list, optional): To be passed as an argument
to the `todict` method
key_modifications (dict, optional): To be passed as an argument
to the `todict` method
groupby (list, optional): An optional list of keys based on which
the result list will be hierarchially grouped ( and converted
into a dict)
keyvals_to_merge (list of dicts, optional): A list of parameters
to be merged with each dict of the output list
"""
if groupby:
if preserve_order:
result = json_encoder(deep_group(
olist, keys=groupby, serializer='todict',
preserve_order=preserve_order,
serializer_kwargs={
'rels_to_serialize': rels_to_serialize,
'rels_to_expand': rels_to_expand,
'attrs_to_serialize': attrs_to_serialize,
'group_listrels_by': group_listrels_by,
'key_modifications': key_modifications,
'dict_struct': dict_struct,
'dict_post_processors': dict_post_processors
}))
else:
result = deep_group(
olist, keys=groupby, serializer='todict',
preserve_order=preserve_order,
serializer_kwargs={
'rels_to_serialize': rels_to_serialize,
'rels_to_expand': rels_to_expand,
'attrs_to_serialize': attrs_to_serialize,
'group_listrels_by': group_listrels_by,
'key_modifications': key_modifications,
'dict_struct': dict_struct,
'dict_post_processors': dict_post_processors
})
return result
else:
result_list = map(
lambda o: serialized_obj(
o, attrs_to_serialize=attrs_to_serialize,
rels_to_expand=rels_to_expand,
group_listrels_by=group_listrels_by,
rels_to_serialize=rels_to_serialize,
key_modifications=key_modifications,
dict_struct=dict_struct,
dict_post_processors=dict_post_processors),
olist)
if keyvals_to_merge:
result_list = [merge(obj_dict, kvdict)
for obj_dict, kvdict in
zip(result_list, keyvals_to_merge)]
return result_list | [
"def",
"serializable_list",
"(",
"olist",
",",
"attrs_to_serialize",
"=",
"None",
",",
"rels_to_expand",
"=",
"None",
",",
"group_listrels_by",
"=",
"None",
",",
"rels_to_serialize",
"=",
"None",
",",
"key_modifications",
"=",
"None",
",",
"groupby",
"=",
"None",
",",
"keyvals_to_merge",
"=",
"None",
",",
"preserve_order",
"=",
"False",
",",
"dict_struct",
"=",
"None",
",",
"dict_post_processors",
"=",
"None",
")",
":",
"if",
"groupby",
":",
"if",
"preserve_order",
":",
"result",
"=",
"json_encoder",
"(",
"deep_group",
"(",
"olist",
",",
"keys",
"=",
"groupby",
",",
"serializer",
"=",
"'todict'",
",",
"preserve_order",
"=",
"preserve_order",
",",
"serializer_kwargs",
"=",
"{",
"'rels_to_serialize'",
":",
"rels_to_serialize",
",",
"'rels_to_expand'",
":",
"rels_to_expand",
",",
"'attrs_to_serialize'",
":",
"attrs_to_serialize",
",",
"'group_listrels_by'",
":",
"group_listrels_by",
",",
"'key_modifications'",
":",
"key_modifications",
",",
"'dict_struct'",
":",
"dict_struct",
",",
"'dict_post_processors'",
":",
"dict_post_processors",
"}",
")",
")",
"else",
":",
"result",
"=",
"deep_group",
"(",
"olist",
",",
"keys",
"=",
"groupby",
",",
"serializer",
"=",
"'todict'",
",",
"preserve_order",
"=",
"preserve_order",
",",
"serializer_kwargs",
"=",
"{",
"'rels_to_serialize'",
":",
"rels_to_serialize",
",",
"'rels_to_expand'",
":",
"rels_to_expand",
",",
"'attrs_to_serialize'",
":",
"attrs_to_serialize",
",",
"'group_listrels_by'",
":",
"group_listrels_by",
",",
"'key_modifications'",
":",
"key_modifications",
",",
"'dict_struct'",
":",
"dict_struct",
",",
"'dict_post_processors'",
":",
"dict_post_processors",
"}",
")",
"return",
"result",
"else",
":",
"result_list",
"=",
"map",
"(",
"lambda",
"o",
":",
"serialized_obj",
"(",
"o",
",",
"attrs_to_serialize",
"=",
"attrs_to_serialize",
",",
"rels_to_expand",
"=",
"rels_to_expand",
",",
"group_listrels_by",
"=",
"group_listrels_by",
",",
"rels_to_serialize",
"=",
"rels_to_serialize",
",",
"key_modifications",
"=",
"key_modifications",
",",
"dict_struct",
"=",
"dict_struct",
",",
"dict_post_processors",
"=",
"dict_post_processors",
")",
",",
"olist",
")",
"if",
"keyvals_to_merge",
":",
"result_list",
"=",
"[",
"merge",
"(",
"obj_dict",
",",
"kvdict",
")",
"for",
"obj_dict",
",",
"kvdict",
"in",
"zip",
"(",
"result_list",
",",
"keyvals_to_merge",
")",
"]",
"return",
"result_list"
] | Converts a list of model instances to a list of dictionaries
using their `todict` method.
Args:
olist (list): The list of instances to convert
attrs_to_serialize (list, optional): To be passed as an argument
to the `todict` method
rels_to_expand (list, optional): To be passed as an argument
to the `todict` method
group_listrels_by (dict, optional): To be passed as an argument
to the `todict` method
rels_to_serialize (list, optional): To be passed as an argument
to the `todict` method
key_modifications (dict, optional): To be passed as an argument
to the `todict` method
groupby (list, optional): An optional list of keys based on which
the result list will be hierarchially grouped ( and converted
into a dict)
keyvals_to_merge (list of dicts, optional): A list of parameters
to be merged with each dict of the output list | [
"Converts",
"a",
"list",
"of",
"model",
"instances",
"to",
"a",
"list",
"of",
"dictionaries",
"using",
"their",
"todict",
"method",
"."
] | 444048d167ab7718f758e943665ef32d101423a5 | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L96-L168 | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/responses.py | jsoned | def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None):
""" Provides a json dump of the struct
Args:
struct: The data to dump
wrap (bool, optional): Specify whether to wrap the
struct in an enclosing dict
struct_key (str, optional): The string key which will
contain the struct in the result dict
meta (dict, optional): An optional dictonary to merge
with the output dictionary.
Examples:
>>> jsoned([3,4,5])
... '{"status": "success", "result": [3, 4, 5]}'
>>> jsoned([3,4,5], wrap=False)
... '[3, 4, 5]'
"""
return _json.dumps(
structured(
struct, wrap=wrap, meta=meta, struct_key=struct_key,
pre_render_callback=pre_render_callback),
default=json_encoder) | python | def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None):
""" Provides a json dump of the struct
Args:
struct: The data to dump
wrap (bool, optional): Specify whether to wrap the
struct in an enclosing dict
struct_key (str, optional): The string key which will
contain the struct in the result dict
meta (dict, optional): An optional dictonary to merge
with the output dictionary.
Examples:
>>> jsoned([3,4,5])
... '{"status": "success", "result": [3, 4, 5]}'
>>> jsoned([3,4,5], wrap=False)
... '[3, 4, 5]'
"""
return _json.dumps(
structured(
struct, wrap=wrap, meta=meta, struct_key=struct_key,
pre_render_callback=pre_render_callback),
default=json_encoder) | [
"def",
"jsoned",
"(",
"struct",
",",
"wrap",
"=",
"True",
",",
"meta",
"=",
"None",
",",
"struct_key",
"=",
"'result'",
",",
"pre_render_callback",
"=",
"None",
")",
":",
"return",
"_json",
".",
"dumps",
"(",
"structured",
"(",
"struct",
",",
"wrap",
"=",
"wrap",
",",
"meta",
"=",
"meta",
",",
"struct_key",
"=",
"struct_key",
",",
"pre_render_callback",
"=",
"pre_render_callback",
")",
",",
"default",
"=",
"json_encoder",
")"
] | Provides a json dump of the struct
Args:
struct: The data to dump
wrap (bool, optional): Specify whether to wrap the
struct in an enclosing dict
struct_key (str, optional): The string key which will
contain the struct in the result dict
meta (dict, optional): An optional dictonary to merge
with the output dictionary.
Examples:
>>> jsoned([3,4,5])
... '{"status": "success", "result": [3, 4, 5]}'
>>> jsoned([3,4,5], wrap=False)
... '[3, 4, 5]' | [
"Provides",
"a",
"json",
"dump",
"of",
"the",
"struct"
] | 444048d167ab7718f758e943665ef32d101423a5 | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L191-L216 | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/responses.py | as_list | def as_list(func):
""" A decorator used to return a JSON response of a list of model
objects. It expects the decorated function to return a list
of model instances. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api')
... @as_list
... def list_customers():
... return Customer.all()
"""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if isinstance(response, Response):
return response
return as_json_list(
response,
**_serializable_params(request.args, check_groupby=True))
return wrapper | python | def as_list(func):
""" A decorator used to return a JSON response of a list of model
objects. It expects the decorated function to return a list
of model instances. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api')
... @as_list
... def list_customers():
... return Customer.all()
"""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if isinstance(response, Response):
return response
return as_json_list(
response,
**_serializable_params(request.args, check_groupby=True))
return wrapper | [
"def",
"as_list",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"response",
",",
"Response",
")",
":",
"return",
"response",
"return",
"as_json_list",
"(",
"response",
",",
"*",
"*",
"_serializable_params",
"(",
"request",
".",
"args",
",",
"check_groupby",
"=",
"True",
")",
")",
"return",
"wrapper"
] | A decorator used to return a JSON response of a list of model
objects. It expects the decorated function to return a list
of model instances. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api')
... @as_list
... def list_customers():
... return Customer.all() | [
"A",
"decorator",
"used",
"to",
"return",
"a",
"JSON",
"response",
"of",
"a",
"list",
"of",
"model",
"objects",
".",
"It",
"expects",
"the",
"decorated",
"function",
"to",
"return",
"a",
"list",
"of",
"model",
"instances",
".",
"It",
"then",
"converts",
"the",
"instances",
"to",
"dicts",
"and",
"serializes",
"them",
"into",
"a",
"json",
"response"
] | 444048d167ab7718f758e943665ef32d101423a5 | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L419-L441 | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/responses.py | as_processed_list | def as_processed_list(func):
""" A decorator used to return a JSON response of a list of model
objects. It differs from `as_list` in that it accepts a variety
of querying parameters and can use them to filter and modify the
results. It expects the decorated function to return either Model Class
to query or a SQLAlchemy filter which exposes a subset of the instances
of the Model class. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api/customers')
... @as_processed_list
... def list_all_customers():
... return Customer
>>> @app.route('/api/editors')
... @as_processed_list
... def list_editors():
... return User.filter(role='editor')
"""
@wraps(func)
def wrapper(*args, **kwargs):
func_argspec = inspect.getargspec(func)
func_args = func_argspec.args
for kw in request.args:
if (kw in func_args and kw not in RESTRICTED and
not any(request.args.get(kw).startswith(op)
for op in OPERATORS)
and not any(kw.endswith(op) for op in OPERATORS)):
kwargs[kw] = request.args.get(kw)
func_output = func(*args, **kwargs)
return process_args_and_render_json_list(func_output)
return wrapper | python | def as_processed_list(func):
""" A decorator used to return a JSON response of a list of model
objects. It differs from `as_list` in that it accepts a variety
of querying parameters and can use them to filter and modify the
results. It expects the decorated function to return either Model Class
to query or a SQLAlchemy filter which exposes a subset of the instances
of the Model class. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api/customers')
... @as_processed_list
... def list_all_customers():
... return Customer
>>> @app.route('/api/editors')
... @as_processed_list
... def list_editors():
... return User.filter(role='editor')
"""
@wraps(func)
def wrapper(*args, **kwargs):
func_argspec = inspect.getargspec(func)
func_args = func_argspec.args
for kw in request.args:
if (kw in func_args and kw not in RESTRICTED and
not any(request.args.get(kw).startswith(op)
for op in OPERATORS)
and not any(kw.endswith(op) for op in OPERATORS)):
kwargs[kw] = request.args.get(kw)
func_output = func(*args, **kwargs)
return process_args_and_render_json_list(func_output)
return wrapper | [
"def",
"as_processed_list",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func_argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"func_args",
"=",
"func_argspec",
".",
"args",
"for",
"kw",
"in",
"request",
".",
"args",
":",
"if",
"(",
"kw",
"in",
"func_args",
"and",
"kw",
"not",
"in",
"RESTRICTED",
"and",
"not",
"any",
"(",
"request",
".",
"args",
".",
"get",
"(",
"kw",
")",
".",
"startswith",
"(",
"op",
")",
"for",
"op",
"in",
"OPERATORS",
")",
"and",
"not",
"any",
"(",
"kw",
".",
"endswith",
"(",
"op",
")",
"for",
"op",
"in",
"OPERATORS",
")",
")",
":",
"kwargs",
"[",
"kw",
"]",
"=",
"request",
".",
"args",
".",
"get",
"(",
"kw",
")",
"func_output",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"process_args_and_render_json_list",
"(",
"func_output",
")",
"return",
"wrapper"
] | A decorator used to return a JSON response of a list of model
objects. It differs from `as_list` in that it accepts a variety
of querying parameters and can use them to filter and modify the
results. It expects the decorated function to return either Model Class
to query or a SQLAlchemy filter which exposes a subset of the instances
of the Model class. It then converts the instances to dicts
and serializes them into a json response
Examples:
>>> @app.route('/api/customers')
... @as_processed_list
... def list_all_customers():
... return Customer
>>> @app.route('/api/editors')
... @as_processed_list
... def list_editors():
... return User.filter(role='editor') | [
"A",
"decorator",
"used",
"to",
"return",
"a",
"JSON",
"response",
"of",
"a",
"list",
"of",
"model",
"objects",
".",
"It",
"differs",
"from",
"as_list",
"in",
"that",
"it",
"accepts",
"a",
"variety",
"of",
"querying",
"parameters",
"and",
"can",
"use",
"them",
"to",
"filter",
"and",
"modify",
"the",
"results",
".",
"It",
"expects",
"the",
"decorated",
"function",
"to",
"return",
"either",
"Model",
"Class",
"to",
"query",
"or",
"a",
"SQLAlchemy",
"filter",
"which",
"exposes",
"a",
"subset",
"of",
"the",
"instances",
"of",
"the",
"Model",
"class",
".",
"It",
"then",
"converts",
"the",
"instances",
"to",
"dicts",
"and",
"serializes",
"them",
"into",
"a",
"json",
"response"
] | 444048d167ab7718f758e943665ef32d101423a5 | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L911-L946 | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/responses.py | as_obj | def as_obj(func):
""" A decorator used to return a JSON response with a dict
representation of the model instance. It expects the decorated function
to return a Model instance. It then converts the instance to dicts
and serializes it into a json response
Examples:
>>> @app.route('/api/shipments/<id>')
... @as_obj
... def get_shipment(id):
... return Shipment.get(id)
"""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
return render_json_obj_with_requested_structure(response)
return wrapper | python | def as_obj(func):
""" A decorator used to return a JSON response with a dict
representation of the model instance. It expects the decorated function
to return a Model instance. It then converts the instance to dicts
and serializes it into a json response
Examples:
>>> @app.route('/api/shipments/<id>')
... @as_obj
... def get_shipment(id):
... return Shipment.get(id)
"""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
return render_json_obj_with_requested_structure(response)
return wrapper | [
"def",
"as_obj",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"render_json_obj_with_requested_structure",
"(",
"response",
")",
"return",
"wrapper"
] | A decorator used to return a JSON response with a dict
representation of the model instance. It expects the decorated function
to return a Model instance. It then converts the instance to dicts
and serializes it into a json response
Examples:
>>> @app.route('/api/shipments/<id>')
... @as_obj
... def get_shipment(id):
... return Shipment.get(id) | [
"A",
"decorator",
"used",
"to",
"return",
"a",
"JSON",
"response",
"with",
"a",
"dict",
"representation",
"of",
"the",
"model",
"instance",
".",
"It",
"expects",
"the",
"decorated",
"function",
"to",
"return",
"a",
"Model",
"instance",
".",
"It",
"then",
"converts",
"the",
"instance",
"to",
"dicts",
"and",
"serializes",
"it",
"into",
"a",
"json",
"response"
] | 444048d167ab7718f758e943665ef32d101423a5 | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L949-L966 | train |
KE-works/pykechain | pykechain/models/service.py | Service.execute | def execute(self, interactive=False):
"""
Execute the service.
For interactive (notebook) service execution, set interactive to True, defaults to False.
.. versionadded:: 1.13
:param interactive: (optional) True if the notebook service should execute in interactive mode.
:type interactive: bool or None
:return: ServiceExecution when successful.
:raises APIError: when unable to execute
"""
url = self._client._build_url('service_execute', service_id=self.id)
response = self._client._request('GET', url, params=dict(interactive=interactive, format='json'))
if response.status_code != requests.codes.accepted: # pragma: no cover
raise APIError("Could not execute service '{}': {}".format(self, (response.status_code, response.json())))
data = response.json()
return ServiceExecution(json=data.get('results')[0], client=self._client) | python | def execute(self, interactive=False):
"""
Execute the service.
For interactive (notebook) service execution, set interactive to True, defaults to False.
.. versionadded:: 1.13
:param interactive: (optional) True if the notebook service should execute in interactive mode.
:type interactive: bool or None
:return: ServiceExecution when successful.
:raises APIError: when unable to execute
"""
url = self._client._build_url('service_execute', service_id=self.id)
response = self._client._request('GET', url, params=dict(interactive=interactive, format='json'))
if response.status_code != requests.codes.accepted: # pragma: no cover
raise APIError("Could not execute service '{}': {}".format(self, (response.status_code, response.json())))
data = response.json()
return ServiceExecution(json=data.get('results')[0], client=self._client) | [
"def",
"execute",
"(",
"self",
",",
"interactive",
"=",
"False",
")",
":",
"url",
"=",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service_execute'",
",",
"service_id",
"=",
"self",
".",
"id",
")",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'GET'",
",",
"url",
",",
"params",
"=",
"dict",
"(",
"interactive",
"=",
"interactive",
",",
"format",
"=",
"'json'",
")",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"accepted",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not execute service '{}': {}\"",
".",
"format",
"(",
"self",
",",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"json",
"(",
")",
")",
")",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"return",
"ServiceExecution",
"(",
"json",
"=",
"data",
".",
"get",
"(",
"'results'",
")",
"[",
"0",
"]",
",",
"client",
"=",
"self",
".",
"_client",
")"
] | Execute the service.
For interactive (notebook) service execution, set interactive to True, defaults to False.
.. versionadded:: 1.13
:param interactive: (optional) True if the notebook service should execute in interactive mode.
:type interactive: bool or None
:return: ServiceExecution when successful.
:raises APIError: when unable to execute | [
"Execute",
"the",
"service",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L51-L71 | train |
KE-works/pykechain | pykechain/models/service.py | Service.edit | def edit(self, name=None, description=None, version=None, **kwargs):
"""
Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated.
"""
update_dict = {'id': self.id}
if name:
if not isinstance(name, str):
raise IllegalArgumentError("name should be provided as a string")
update_dict.update({'name': name})
if description:
if not isinstance(description, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'description': description})
if version:
if not isinstance(version, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'script_version': version})
if kwargs: # pragma: no cover
update_dict.update(**kwargs)
response = self._client._request('PUT',
self._client._build_url('service', service_id=self.id), json=update_dict)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not update Service ({})".format(response))
if name:
self.name = name
if version:
self.version = version | python | def edit(self, name=None, description=None, version=None, **kwargs):
"""
Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated.
"""
update_dict = {'id': self.id}
if name:
if not isinstance(name, str):
raise IllegalArgumentError("name should be provided as a string")
update_dict.update({'name': name})
if description:
if not isinstance(description, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'description': description})
if version:
if not isinstance(version, str):
raise IllegalArgumentError("description should be provided as a string")
update_dict.update({'script_version': version})
if kwargs: # pragma: no cover
update_dict.update(**kwargs)
response = self._client._request('PUT',
self._client._build_url('service', service_id=self.id), json=update_dict)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not update Service ({})".format(response))
if name:
self.name = name
if version:
self.version = version | [
"def",
"edit",
"(",
"self",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"update_dict",
"=",
"{",
"'id'",
":",
"self",
".",
"id",
"}",
"if",
"name",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"str",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"\"name should be provided as a string\"",
")",
"update_dict",
".",
"update",
"(",
"{",
"'name'",
":",
"name",
"}",
")",
"if",
"description",
":",
"if",
"not",
"isinstance",
"(",
"description",
",",
"str",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"\"description should be provided as a string\"",
")",
"update_dict",
".",
"update",
"(",
"{",
"'description'",
":",
"description",
"}",
")",
"if",
"version",
":",
"if",
"not",
"isinstance",
"(",
"version",
",",
"str",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"\"description should be provided as a string\"",
")",
"update_dict",
".",
"update",
"(",
"{",
"'script_version'",
":",
"version",
"}",
")",
"if",
"kwargs",
":",
"# pragma: no cover",
"update_dict",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'PUT'",
",",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service'",
",",
"service_id",
"=",
"self",
".",
"id",
")",
",",
"json",
"=",
"update_dict",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not update Service ({})\"",
".",
"format",
"(",
"response",
")",
")",
"if",
"name",
":",
"self",
".",
"name",
"=",
"name",
"if",
"version",
":",
"self",
".",
"version",
"=",
"version"
] | Edit Service details.
.. versionadded:: 1.13
:param name: (optional) name of the service to change.
:type name: basestring or None
:param description: (optional) description of the service.
:type description: basestring or None
:param version: (optional) version number of the service.
:type version: basestring or None
:param kwargs: (optional) additional keyword arguments to change.
:type kwargs: dict or None
:raises IllegalArgumentError: when you provide an illegal argument.
:raises APIError: if the service could not be updated. | [
"Edit",
"Service",
"details",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L73-L115 | train |
KE-works/pykechain | pykechain/models/service.py | Service.delete | def delete(self):
# type: () -> None
"""Delete this service.
:raises APIError: if delete was not succesfull.
"""
response = self._client._request('DELETE', self._client._build_url('service', service_id=self.id))
if response.status_code != requests.codes.no_content: # pragma: no cover
raise APIError("Could not delete service: {} with id {}".format(self.name, self.id)) | python | def delete(self):
# type: () -> None
"""Delete this service.
:raises APIError: if delete was not succesfull.
"""
response = self._client._request('DELETE', self._client._build_url('service', service_id=self.id))
if response.status_code != requests.codes.no_content: # pragma: no cover
raise APIError("Could not delete service: {} with id {}".format(self.name, self.id)) | [
"def",
"delete",
"(",
"self",
")",
":",
"# type: () -> None",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'DELETE'",
",",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service'",
",",
"service_id",
"=",
"self",
".",
"id",
")",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"no_content",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not delete service: {} with id {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"self",
".",
"id",
")",
")"
] | Delete this service.
:raises APIError: if delete was not succesfull. | [
"Delete",
"this",
"service",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L117-L126 | train |
KE-works/pykechain | pykechain/models/service.py | Service.get_executions | def get_executions(self, **kwargs):
"""
Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service.
"""
return self._client.service_executions(service=self.id, scope=self.scope_id, **kwargs) | python | def get_executions(self, **kwargs):
"""
Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service.
"""
return self._client.service_executions(service=self.id, scope=self.scope_id, **kwargs) | [
"def",
"get_executions",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_client",
".",
"service_executions",
"(",
"service",
"=",
"self",
".",
"id",
",",
"scope",
"=",
"self",
".",
"scope_id",
",",
"*",
"*",
"kwargs",
")"
] | Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service. | [
"Retrieve",
"the",
"executions",
"related",
"to",
"the",
"current",
"service",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L177-L187 | train |
KE-works/pykechain | pykechain/models/service.py | ServiceExecution.service | def service(self):
"""Retrieve the `Service` object to which this execution is associated."""
if not self._service:
self._service = self._client.service(id=self.service_id)
return self._service | python | def service(self):
"""Retrieve the `Service` object to which this execution is associated."""
if not self._service:
self._service = self._client.service(id=self.service_id)
return self._service | [
"def",
"service",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_service",
":",
"self",
".",
"_service",
"=",
"self",
".",
"_client",
".",
"service",
"(",
"id",
"=",
"self",
".",
"service_id",
")",
"return",
"self",
".",
"_service"
] | Retrieve the `Service` object to which this execution is associated. | [
"Retrieve",
"the",
"Service",
"object",
"to",
"which",
"this",
"execution",
"is",
"associated",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L232-L236 | train |
KE-works/pykechain | pykechain/models/service.py | ServiceExecution.terminate | def terminate(self):
"""
Terminate the Service execution.
.. versionadded:: 1.13
:return: None if the termination request was successful
:raises APIError: When the service execution could not be terminated.
"""
url = self._client._build_url('service_execution_terminate', service_execution_id=self.id)
response = self._client._request('GET', url, params=dict(format='json'))
if response.status_code != requests.codes.accepted: # pragma: no cover
raise APIError("Could not execute service '{}': {}".format(self, response)) | python | def terminate(self):
"""
Terminate the Service execution.
.. versionadded:: 1.13
:return: None if the termination request was successful
:raises APIError: When the service execution could not be terminated.
"""
url = self._client._build_url('service_execution_terminate', service_execution_id=self.id)
response = self._client._request('GET', url, params=dict(format='json'))
if response.status_code != requests.codes.accepted: # pragma: no cover
raise APIError("Could not execute service '{}': {}".format(self, response)) | [
"def",
"terminate",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service_execution_terminate'",
",",
"service_execution_id",
"=",
"self",
".",
"id",
")",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'GET'",
",",
"url",
",",
"params",
"=",
"dict",
"(",
"format",
"=",
"'json'",
")",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"accepted",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not execute service '{}': {}\"",
".",
"format",
"(",
"self",
",",
"response",
")",
")"
] | Terminate the Service execution.
.. versionadded:: 1.13
:return: None if the termination request was successful
:raises APIError: When the service execution could not be terminated. | [
"Terminate",
"the",
"Service",
"execution",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L241-L254 | train |
KE-works/pykechain | pykechain/models/service.py | ServiceExecution.get_log | def get_log(self, target_dir=None, log_filename='log.txt'):
"""
Retrieve the log of the service execution.
.. versionadded:: 1.13
:param target_dir: (optional) directory path name where the store the log.txt to.
:type target_dir: basestring or None
:param log_filename: (optional) log filename to write the log to, defaults to `log.txt`.
:type log_filename: basestring or None
:raises APIError: if the logfile could not be found.
:raises OSError: if the file could not be written.
"""
full_path = os.path.join(target_dir or os.getcwd(), log_filename)
url = self._client._build_url('service_execution_log', service_execution_id=self.id)
response = self._client._request('GET', url)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not download service execution log")
with open(full_path, 'w+b') as f:
for chunk in response:
f.write(chunk) | python | def get_log(self, target_dir=None, log_filename='log.txt'):
"""
Retrieve the log of the service execution.
.. versionadded:: 1.13
:param target_dir: (optional) directory path name where the store the log.txt to.
:type target_dir: basestring or None
:param log_filename: (optional) log filename to write the log to, defaults to `log.txt`.
:type log_filename: basestring or None
:raises APIError: if the logfile could not be found.
:raises OSError: if the file could not be written.
"""
full_path = os.path.join(target_dir or os.getcwd(), log_filename)
url = self._client._build_url('service_execution_log', service_execution_id=self.id)
response = self._client._request('GET', url)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not download service execution log")
with open(full_path, 'w+b') as f:
for chunk in response:
f.write(chunk) | [
"def",
"get_log",
"(",
"self",
",",
"target_dir",
"=",
"None",
",",
"log_filename",
"=",
"'log.txt'",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
"or",
"os",
".",
"getcwd",
"(",
")",
",",
"log_filename",
")",
"url",
"=",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service_execution_log'",
",",
"service_execution_id",
"=",
"self",
".",
"id",
")",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'GET'",
",",
"url",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not download service execution log\"",
")",
"with",
"open",
"(",
"full_path",
",",
"'w+b'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"response",
":",
"f",
".",
"write",
"(",
"chunk",
")"
] | Retrieve the log of the service execution.
.. versionadded:: 1.13
:param target_dir: (optional) directory path name where the store the log.txt to.
:type target_dir: basestring or None
:param log_filename: (optional) log filename to write the log to, defaults to `log.txt`.
:type log_filename: basestring or None
:raises APIError: if the logfile could not be found.
:raises OSError: if the file could not be written. | [
"Retrieve",
"the",
"log",
"of",
"the",
"service",
"execution",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L256-L278 | train |
KE-works/pykechain | pykechain/models/service.py | ServiceExecution.get_notebook_url | def get_notebook_url(self):
"""
Get the url of the notebook, if the notebook is executed in interactive mode.
.. versionadded:: 1.13
:return: full url to the interactive running notebook as `basestring`
:raises APIError: when the url cannot be retrieved.
"""
url = self._client._build_url('service_execution_notebook_url', service_execution_id=self.id)
response = self._client._request('GET', url, params=dict(format='json'))
if response.status_code != requests.codes.ok:
raise APIError("Could not retrieve notebook url '{}': {}".format(self, response))
data = response.json()
url = data.get('results')[0].get('url')
return url | python | def get_notebook_url(self):
"""
Get the url of the notebook, if the notebook is executed in interactive mode.
.. versionadded:: 1.13
:return: full url to the interactive running notebook as `basestring`
:raises APIError: when the url cannot be retrieved.
"""
url = self._client._build_url('service_execution_notebook_url', service_execution_id=self.id)
response = self._client._request('GET', url, params=dict(format='json'))
if response.status_code != requests.codes.ok:
raise APIError("Could not retrieve notebook url '{}': {}".format(self, response))
data = response.json()
url = data.get('results')[0].get('url')
return url | [
"def",
"get_notebook_url",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'service_execution_notebook_url'",
",",
"service_execution_id",
"=",
"self",
".",
"id",
")",
"response",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'GET'",
",",
"url",
",",
"params",
"=",
"dict",
"(",
"format",
"=",
"'json'",
")",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"raise",
"APIError",
"(",
"\"Could not retrieve notebook url '{}': {}\"",
".",
"format",
"(",
"self",
",",
"response",
")",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"url",
"=",
"data",
".",
"get",
"(",
"'results'",
")",
"[",
"0",
"]",
".",
"get",
"(",
"'url'",
")",
"return",
"url"
] | Get the url of the notebook, if the notebook is executed in interactive mode.
.. versionadded:: 1.13
:return: full url to the interactive running notebook as `basestring`
:raises APIError: when the url cannot be retrieved. | [
"Get",
"the",
"url",
"of",
"the",
"notebook",
"if",
"the",
"notebook",
"is",
"executed",
"in",
"interactive",
"mode",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/service.py#L280-L297 | train |
pdxjohnny/SimpleHTTPSServer | SimpleHTTPSServer/SimpleWebSocketServer.py | WebSocket.sendMessage | def sendMessage(self, data):
"""
Send websocket data frame to the client.
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
opcode = BINARY
if isinstance(data, unicode):
opcode = TEXT
self._sendMessage(False, opcode, data) | python | def sendMessage(self, data):
"""
Send websocket data frame to the client.
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary.
"""
opcode = BINARY
if isinstance(data, unicode):
opcode = TEXT
self._sendMessage(False, opcode, data) | [
"def",
"sendMessage",
"(",
"self",
",",
"data",
")",
":",
"opcode",
"=",
"BINARY",
"if",
"isinstance",
"(",
"data",
",",
"unicode",
")",
":",
"opcode",
"=",
"TEXT",
"self",
".",
"_sendMessage",
"(",
"False",
",",
"opcode",
",",
"data",
")"
] | Send websocket data frame to the client.
If data is a unicode object then the frame is sent as Text.
If the data is a bytearray object then the frame is sent as Binary. | [
"Send",
"websocket",
"data",
"frame",
"to",
"the",
"client",
".",
"If",
"data",
"is",
"a",
"unicode",
"object",
"then",
"the",
"frame",
"is",
"sent",
"as",
"Text",
".",
"If",
"the",
"data",
"is",
"a",
"bytearray",
"object",
"then",
"the",
"frame",
"is",
"sent",
"as",
"Binary",
"."
] | 5ba0490e1c15541287f89abedfdcd2ff70ad1e88 | https://github.com/pdxjohnny/SimpleHTTPSServer/blob/5ba0490e1c15541287f89abedfdcd2ff70ad1e88/SimpleHTTPSServer/SimpleWebSocketServer.py#L343-L353 | train |
biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/biosignalsnotebooks/synchronisation.py | _shape_array | def _shape_array(array1, array2):
"""
Function that equalises the input arrays by zero-padding the shortest one.
----------
Parameters
----------
array1: list or numpy.array
Array
array2: list or numpy.array
Array
Return
------
arrays: numpy.array
Array containing the equal-length arrays.
"""
if len(array1) > len(array2):
new_array = array2
old_array = array1
else:
new_array = array1
old_array = array2
length = len(old_array) - len(new_array)
for i in range(length):
n = new_array[-1].copy()
n[0::3] += 1
n[2::3] = 0
new_array = np.vstack([new_array, [n]])
arrays = np.hstack([old_array, new_array])
return arrays | python | def _shape_array(array1, array2):
"""
Function that equalises the input arrays by zero-padding the shortest one.
----------
Parameters
----------
array1: list or numpy.array
Array
array2: list or numpy.array
Array
Return
------
arrays: numpy.array
Array containing the equal-length arrays.
"""
if len(array1) > len(array2):
new_array = array2
old_array = array1
else:
new_array = array1
old_array = array2
length = len(old_array) - len(new_array)
for i in range(length):
n = new_array[-1].copy()
n[0::3] += 1
n[2::3] = 0
new_array = np.vstack([new_array, [n]])
arrays = np.hstack([old_array, new_array])
return arrays | [
"def",
"_shape_array",
"(",
"array1",
",",
"array2",
")",
":",
"if",
"len",
"(",
"array1",
")",
">",
"len",
"(",
"array2",
")",
":",
"new_array",
"=",
"array2",
"old_array",
"=",
"array1",
"else",
":",
"new_array",
"=",
"array1",
"old_array",
"=",
"array2",
"length",
"=",
"len",
"(",
"old_array",
")",
"-",
"len",
"(",
"new_array",
")",
"for",
"i",
"in",
"range",
"(",
"length",
")",
":",
"n",
"=",
"new_array",
"[",
"-",
"1",
"]",
".",
"copy",
"(",
")",
"n",
"[",
"0",
":",
":",
"3",
"]",
"+=",
"1",
"n",
"[",
"2",
":",
":",
"3",
"]",
"=",
"0",
"new_array",
"=",
"np",
".",
"vstack",
"(",
"[",
"new_array",
",",
"[",
"n",
"]",
"]",
")",
"arrays",
"=",
"np",
".",
"hstack",
"(",
"[",
"old_array",
",",
"new_array",
"]",
")",
"return",
"arrays"
] | Function that equalises the input arrays by zero-padding the shortest one.
----------
Parameters
----------
array1: list or numpy.array
Array
array2: list or numpy.array
Array
Return
------
arrays: numpy.array
Array containing the equal-length arrays. | [
"Function",
"that",
"equalises",
"the",
"input",
"arrays",
"by",
"zero",
"-",
"padding",
"the",
"shortest",
"one",
"."
] | aaa01d4125180b3a34f1e26e0d3ff08c23f666d3 | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/synchronisation.py#L315-L348 | train |
biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/biosignalsnotebooks/synchronisation.py | _create_txt_from_str | def _create_txt_from_str(in_path, channels, new_path):
"""
This function allows to generate a text file with synchronised signals from the input file.
----------
Parameters
----------
in_path : str
Path to the file containing the two signals that will be synchronised.
channels : list
List with the strings identifying the channels of each signal.
new_path : str
The path to create the new file.
"""
header = ["# OpenSignals Text File Format"]
files = [bsnb.load(in_path)]
with open(in_path, encoding="latin-1") as opened_p:
header.append(opened_p.readlines()[1])
header.append("# EndOfHeader")
data = []
nr_channels = []
for file in files:
for i, device in enumerate(file.keys()):
nr_channels.append(len(list(file[device])))
data.append(file[device][channels[i]])
dephase, s1, s2 = synchronise_signals(data[0], data[1])
new_header = [h.replace("\n", "") for h in header]
sync_file = open(new_path, 'w')
sync_file.write(' \n'.join(new_header) + '\n')
old_columns = np.loadtxt(in_path)
if np.array_equal(s1, data[0]):
# Change the second device
aux = 3 * nr_channels[0]
columns = old_columns[dephase:, aux:]
new_file = _shape_array(old_columns[:, :aux], columns)
elif np.array_equal(s2, data[1]):
# Change the first device
aux = 3 * nr_channels[1]
columns = old_columns[dephase:, :aux]
new_file = _shape_array(columns, old_columns[:, aux:])
else:
print("The devices are synchronised.")
return
for line in new_file:
sync_file.write('\t'.join(str(int(i)) for i in line) + '\t\n')
sync_file.close() | python | def _create_txt_from_str(in_path, channels, new_path):
"""
This function allows to generate a text file with synchronised signals from the input file.
----------
Parameters
----------
in_path : str
Path to the file containing the two signals that will be synchronised.
channels : list
List with the strings identifying the channels of each signal.
new_path : str
The path to create the new file.
"""
header = ["# OpenSignals Text File Format"]
files = [bsnb.load(in_path)]
with open(in_path, encoding="latin-1") as opened_p:
header.append(opened_p.readlines()[1])
header.append("# EndOfHeader")
data = []
nr_channels = []
for file in files:
for i, device in enumerate(file.keys()):
nr_channels.append(len(list(file[device])))
data.append(file[device][channels[i]])
dephase, s1, s2 = synchronise_signals(data[0], data[1])
new_header = [h.replace("\n", "") for h in header]
sync_file = open(new_path, 'w')
sync_file.write(' \n'.join(new_header) + '\n')
old_columns = np.loadtxt(in_path)
if np.array_equal(s1, data[0]):
# Change the second device
aux = 3 * nr_channels[0]
columns = old_columns[dephase:, aux:]
new_file = _shape_array(old_columns[:, :aux], columns)
elif np.array_equal(s2, data[1]):
# Change the first device
aux = 3 * nr_channels[1]
columns = old_columns[dephase:, :aux]
new_file = _shape_array(columns, old_columns[:, aux:])
else:
print("The devices are synchronised.")
return
for line in new_file:
sync_file.write('\t'.join(str(int(i)) for i in line) + '\t\n')
sync_file.close() | [
"def",
"_create_txt_from_str",
"(",
"in_path",
",",
"channels",
",",
"new_path",
")",
":",
"header",
"=",
"[",
"\"# OpenSignals Text File Format\"",
"]",
"files",
"=",
"[",
"bsnb",
".",
"load",
"(",
"in_path",
")",
"]",
"with",
"open",
"(",
"in_path",
",",
"encoding",
"=",
"\"latin-1\"",
")",
"as",
"opened_p",
":",
"header",
".",
"append",
"(",
"opened_p",
".",
"readlines",
"(",
")",
"[",
"1",
"]",
")",
"header",
".",
"append",
"(",
"\"# EndOfHeader\"",
")",
"data",
"=",
"[",
"]",
"nr_channels",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"for",
"i",
",",
"device",
"in",
"enumerate",
"(",
"file",
".",
"keys",
"(",
")",
")",
":",
"nr_channels",
".",
"append",
"(",
"len",
"(",
"list",
"(",
"file",
"[",
"device",
"]",
")",
")",
")",
"data",
".",
"append",
"(",
"file",
"[",
"device",
"]",
"[",
"channels",
"[",
"i",
"]",
"]",
")",
"dephase",
",",
"s1",
",",
"s2",
"=",
"synchronise_signals",
"(",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
")",
"new_header",
"=",
"[",
"h",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"for",
"h",
"in",
"header",
"]",
"sync_file",
"=",
"open",
"(",
"new_path",
",",
"'w'",
")",
"sync_file",
".",
"write",
"(",
"' \\n'",
".",
"join",
"(",
"new_header",
")",
"+",
"'\\n'",
")",
"old_columns",
"=",
"np",
".",
"loadtxt",
"(",
"in_path",
")",
"if",
"np",
".",
"array_equal",
"(",
"s1",
",",
"data",
"[",
"0",
"]",
")",
":",
"# Change the second device",
"aux",
"=",
"3",
"*",
"nr_channels",
"[",
"0",
"]",
"columns",
"=",
"old_columns",
"[",
"dephase",
":",
",",
"aux",
":",
"]",
"new_file",
"=",
"_shape_array",
"(",
"old_columns",
"[",
":",
",",
":",
"aux",
"]",
",",
"columns",
")",
"elif",
"np",
".",
"array_equal",
"(",
"s2",
",",
"data",
"[",
"1",
"]",
")",
":",
"# Change the first device",
"aux",
"=",
"3",
"*",
"nr_channels",
"[",
"1",
"]",
"columns",
"=",
"old_columns",
"[",
"dephase",
":",
",",
":",
"aux",
"]",
"new_file",
"=",
"_shape_array",
"(",
"columns",
",",
"old_columns",
"[",
":",
",",
"aux",
":",
"]",
")",
"else",
":",
"print",
"(",
"\"The devices are synchronised.\"",
")",
"return",
"for",
"line",
"in",
"new_file",
":",
"sync_file",
".",
"write",
"(",
"'\\t'",
".",
"join",
"(",
"str",
"(",
"int",
"(",
"i",
")",
")",
"for",
"i",
"in",
"line",
")",
"+",
"'\\t\\n'",
")",
"sync_file",
".",
"close",
"(",
")"
] | This function allows to generate a text file with synchronised signals from the input file.
----------
Parameters
----------
in_path : str
Path to the file containing the two signals that will be synchronised.
channels : list
List with the strings identifying the channels of each signal.
new_path : str
The path to create the new file. | [
"This",
"function",
"allows",
"to",
"generate",
"a",
"text",
"file",
"with",
"synchronised",
"signals",
"from",
"the",
"input",
"file",
"."
] | aaa01d4125180b3a34f1e26e0d3ff08c23f666d3 | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/synchronisation.py#L444-L493 | train |
googleapis/protoc-java-resource-names-plugin | plugin/utils/path_template.py | PathTemplate.render | def render(self, bindings):
"""Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
"""
out = []
binding = False
for segment in self.segments:
if segment.kind == _BINDING:
if segment.literal not in bindings:
raise ValidationException(
('rendering error: value for key \'{}\' '
'not provided').format(segment.literal))
out.extend(PathTemplate(bindings[segment.literal]).segments)
binding = True
elif segment.kind == _END_BINDING:
binding = False
else:
if binding:
continue
out.append(segment)
path = _format(out)
self.match(path)
return path | python | def render(self, bindings):
"""Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
"""
out = []
binding = False
for segment in self.segments:
if segment.kind == _BINDING:
if segment.literal not in bindings:
raise ValidationException(
('rendering error: value for key \'{}\' '
'not provided').format(segment.literal))
out.extend(PathTemplate(bindings[segment.literal]).segments)
binding = True
elif segment.kind == _END_BINDING:
binding = False
else:
if binding:
continue
out.append(segment)
path = _format(out)
self.match(path)
return path | [
"def",
"render",
"(",
"self",
",",
"bindings",
")",
":",
"out",
"=",
"[",
"]",
"binding",
"=",
"False",
"for",
"segment",
"in",
"self",
".",
"segments",
":",
"if",
"segment",
".",
"kind",
"==",
"_BINDING",
":",
"if",
"segment",
".",
"literal",
"not",
"in",
"bindings",
":",
"raise",
"ValidationException",
"(",
"(",
"'rendering error: value for key \\'{}\\' '",
"'not provided'",
")",
".",
"format",
"(",
"segment",
".",
"literal",
")",
")",
"out",
".",
"extend",
"(",
"PathTemplate",
"(",
"bindings",
"[",
"segment",
".",
"literal",
"]",
")",
".",
"segments",
")",
"binding",
"=",
"True",
"elif",
"segment",
".",
"kind",
"==",
"_END_BINDING",
":",
"binding",
"=",
"False",
"else",
":",
"if",
"binding",
":",
"continue",
"out",
".",
"append",
"(",
"segment",
")",
"path",
"=",
"_format",
"(",
"out",
")",
"self",
".",
"match",
"(",
"path",
")",
"return",
"path"
] | Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed. | [
"Renders",
"a",
"string",
"from",
"a",
"path",
"template",
"using",
"the",
"provided",
"bindings",
"."
] | 3fb2ec9b778f62646c05a7b960c893464c7791c0 | https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L82-L113 | train |
googleapis/protoc-java-resource-names-plugin | plugin/utils/path_template.py | PathTemplate.match | def match(self, path):
"""Matches a fully qualified path template string.
Args:
path (str): A fully qualified path template string.
Returns:
dict: Var names to matched binding values.
Raises:
ValidationException: If path can't be matched to the template.
"""
this = self.segments
that = path.split('/')
current_var = None
bindings = {}
segment_count = self.segment_count
j = 0
for i in range(0, len(this)):
if j >= len(that):
break
if this[i].kind == _TERMINAL:
if this[i].literal == '*':
bindings[current_var] = that[j]
j += 1
elif this[i].literal == '**':
until = j + len(that) - segment_count + 1
segment_count += len(that) - segment_count
bindings[current_var] = '/'.join(that[j:until])
j = until
elif this[i].literal != that[j]:
raise ValidationException(
'mismatched literal: \'%s\' != \'%s\'' % (
this[i].literal, that[j]))
else:
j += 1
elif this[i].kind == _BINDING:
current_var = this[i].literal
if j != len(that) or j != segment_count:
raise ValidationException(
'match error: could not render from the path template: {}'
.format(path))
return bindings | python | def match(self, path):
"""Matches a fully qualified path template string.
Args:
path (str): A fully qualified path template string.
Returns:
dict: Var names to matched binding values.
Raises:
ValidationException: If path can't be matched to the template.
"""
this = self.segments
that = path.split('/')
current_var = None
bindings = {}
segment_count = self.segment_count
j = 0
for i in range(0, len(this)):
if j >= len(that):
break
if this[i].kind == _TERMINAL:
if this[i].literal == '*':
bindings[current_var] = that[j]
j += 1
elif this[i].literal == '**':
until = j + len(that) - segment_count + 1
segment_count += len(that) - segment_count
bindings[current_var] = '/'.join(that[j:until])
j = until
elif this[i].literal != that[j]:
raise ValidationException(
'mismatched literal: \'%s\' != \'%s\'' % (
this[i].literal, that[j]))
else:
j += 1
elif this[i].kind == _BINDING:
current_var = this[i].literal
if j != len(that) or j != segment_count:
raise ValidationException(
'match error: could not render from the path template: {}'
.format(path))
return bindings | [
"def",
"match",
"(",
"self",
",",
"path",
")",
":",
"this",
"=",
"self",
".",
"segments",
"that",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"current_var",
"=",
"None",
"bindings",
"=",
"{",
"}",
"segment_count",
"=",
"self",
".",
"segment_count",
"j",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"this",
")",
")",
":",
"if",
"j",
">=",
"len",
"(",
"that",
")",
":",
"break",
"if",
"this",
"[",
"i",
"]",
".",
"kind",
"==",
"_TERMINAL",
":",
"if",
"this",
"[",
"i",
"]",
".",
"literal",
"==",
"'*'",
":",
"bindings",
"[",
"current_var",
"]",
"=",
"that",
"[",
"j",
"]",
"j",
"+=",
"1",
"elif",
"this",
"[",
"i",
"]",
".",
"literal",
"==",
"'**'",
":",
"until",
"=",
"j",
"+",
"len",
"(",
"that",
")",
"-",
"segment_count",
"+",
"1",
"segment_count",
"+=",
"len",
"(",
"that",
")",
"-",
"segment_count",
"bindings",
"[",
"current_var",
"]",
"=",
"'/'",
".",
"join",
"(",
"that",
"[",
"j",
":",
"until",
"]",
")",
"j",
"=",
"until",
"elif",
"this",
"[",
"i",
"]",
".",
"literal",
"!=",
"that",
"[",
"j",
"]",
":",
"raise",
"ValidationException",
"(",
"'mismatched literal: \\'%s\\' != \\'%s\\''",
"%",
"(",
"this",
"[",
"i",
"]",
".",
"literal",
",",
"that",
"[",
"j",
"]",
")",
")",
"else",
":",
"j",
"+=",
"1",
"elif",
"this",
"[",
"i",
"]",
".",
"kind",
"==",
"_BINDING",
":",
"current_var",
"=",
"this",
"[",
"i",
"]",
".",
"literal",
"if",
"j",
"!=",
"len",
"(",
"that",
")",
"or",
"j",
"!=",
"segment_count",
":",
"raise",
"ValidationException",
"(",
"'match error: could not render from the path template: {}'",
".",
"format",
"(",
"path",
")",
")",
"return",
"bindings"
] | Matches a fully qualified path template string.
Args:
path (str): A fully qualified path template string.
Returns:
dict: Var names to matched binding values.
Raises:
ValidationException: If path can't be matched to the template. | [
"Matches",
"a",
"fully",
"qualified",
"path",
"template",
"string",
"."
] | 3fb2ec9b778f62646c05a7b960c893464c7791c0 | https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L115-L157 | train |
googleapis/protoc-java-resource-names-plugin | plugin/utils/path_template.py | _Parser.parse | def parse(self, data):
"""Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
"""
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
# Validation step: checks that there are no nested bindings.
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments | python | def parse(self, data):
"""Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment.
"""
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
# Validation step: checks that there are no nested bindings.
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments | [
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"binding_var_count",
"=",
"0",
"self",
".",
"segment_count",
"=",
"0",
"segments",
"=",
"self",
".",
"parser",
".",
"parse",
"(",
"data",
")",
"# Validation step: checks that there are no nested bindings.",
"path_wildcard",
"=",
"False",
"for",
"segment",
"in",
"segments",
":",
"if",
"segment",
".",
"kind",
"==",
"_TERMINAL",
"and",
"segment",
".",
"literal",
"==",
"'**'",
":",
"if",
"path_wildcard",
":",
"raise",
"ValidationException",
"(",
"'validation error: path template cannot contain more '",
"'than one path wildcard'",
")",
"path_wildcard",
"=",
"True",
"return",
"segments"
] | Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment. | [
"Returns",
"a",
"list",
"of",
"path",
"template",
"segments",
"parsed",
"from",
"data",
"."
] | 3fb2ec9b778f62646c05a7b960c893464c7791c0 | https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L190-L211 | train |
mozilla/FoxPuppet | foxpuppet/windows/browser/notifications/base.py | BaseNotification.create | def create(window, root):
"""Create a notification object.
Args:
window (:py:class:`BrowserWindow`): Window object this region
appears in.
root
(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):
WebDriver element object that serves as the root for the
notification.
Returns:
:py:class:`BaseNotification`: Firefox notification.
"""
notifications = {}
_id = root.get_property("id")
from foxpuppet.windows.browser.notifications import addons
notifications.update(addons.NOTIFICATIONS)
return notifications.get(_id, BaseNotification)(window, root) | python | def create(window, root):
"""Create a notification object.
Args:
window (:py:class:`BrowserWindow`): Window object this region
appears in.
root
(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):
WebDriver element object that serves as the root for the
notification.
Returns:
:py:class:`BaseNotification`: Firefox notification.
"""
notifications = {}
_id = root.get_property("id")
from foxpuppet.windows.browser.notifications import addons
notifications.update(addons.NOTIFICATIONS)
return notifications.get(_id, BaseNotification)(window, root) | [
"def",
"create",
"(",
"window",
",",
"root",
")",
":",
"notifications",
"=",
"{",
"}",
"_id",
"=",
"root",
".",
"get_property",
"(",
"\"id\"",
")",
"from",
"foxpuppet",
".",
"windows",
".",
"browser",
".",
"notifications",
"import",
"addons",
"notifications",
".",
"update",
"(",
"addons",
".",
"NOTIFICATIONS",
")",
"return",
"notifications",
".",
"get",
"(",
"_id",
",",
"BaseNotification",
")",
"(",
"window",
",",
"root",
")"
] | Create a notification object.
Args:
window (:py:class:`BrowserWindow`): Window object this region
appears in.
root
(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):
WebDriver element object that serves as the root for the
notification.
Returns:
:py:class:`BaseNotification`: Firefox notification. | [
"Create",
"a",
"notification",
"object",
"."
] | 6575eb4c72fd024c986b254e198c8b4e6f68cddd | https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L19-L39 | train |
mozilla/FoxPuppet | foxpuppet/windows/browser/notifications/base.py | BaseNotification.label | def label(self):
"""Provide access to the notification label.
Returns:
str: The notification label
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("label") | python | def label(self):
"""Provide access to the notification label.
Returns:
str: The notification label
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("label") | [
"def",
"label",
"(",
"self",
")",
":",
"with",
"self",
".",
"selenium",
".",
"context",
"(",
"self",
".",
"selenium",
".",
"CONTEXT_CHROME",
")",
":",
"return",
"self",
".",
"root",
".",
"get_attribute",
"(",
"\"label\"",
")"
] | Provide access to the notification label.
Returns:
str: The notification label | [
"Provide",
"access",
"to",
"the",
"notification",
"label",
"."
] | 6575eb4c72fd024c986b254e198c8b4e6f68cddd | https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L42-L50 | train |
mozilla/FoxPuppet | foxpuppet/windows/browser/notifications/base.py | BaseNotification.origin | def origin(self):
"""Provide access to the notification origin.
Returns:
str: The notification origin.
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("origin") | python | def origin(self):
"""Provide access to the notification origin.
Returns:
str: The notification origin.
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("origin") | [
"def",
"origin",
"(",
"self",
")",
":",
"with",
"self",
".",
"selenium",
".",
"context",
"(",
"self",
".",
"selenium",
".",
"CONTEXT_CHROME",
")",
":",
"return",
"self",
".",
"root",
".",
"get_attribute",
"(",
"\"origin\"",
")"
] | Provide access to the notification origin.
Returns:
str: The notification origin. | [
"Provide",
"access",
"to",
"the",
"notification",
"origin",
"."
] | 6575eb4c72fd024c986b254e198c8b4e6f68cddd | https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L53-L61 | train |
mozilla/FoxPuppet | foxpuppet/windows/browser/notifications/base.py | BaseNotification.find_primary_button | def find_primary_button(self):
"""Retrieve the primary button."""
if self.window.firefox_version >= 67:
return self.root.find_element(
By.CLASS_NAME, "popup-notification-primary-button")
return self.root.find_anonymous_element_by_attribute(
"anonid", "button") | python | def find_primary_button(self):
"""Retrieve the primary button."""
if self.window.firefox_version >= 67:
return self.root.find_element(
By.CLASS_NAME, "popup-notification-primary-button")
return self.root.find_anonymous_element_by_attribute(
"anonid", "button") | [
"def",
"find_primary_button",
"(",
"self",
")",
":",
"if",
"self",
".",
"window",
".",
"firefox_version",
">=",
"67",
":",
"return",
"self",
".",
"root",
".",
"find_element",
"(",
"By",
".",
"CLASS_NAME",
",",
"\"popup-notification-primary-button\"",
")",
"return",
"self",
".",
"root",
".",
"find_anonymous_element_by_attribute",
"(",
"\"anonid\"",
",",
"\"button\"",
")"
] | Retrieve the primary button. | [
"Retrieve",
"the",
"primary",
"button",
"."
] | 6575eb4c72fd024c986b254e198c8b4e6f68cddd | https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L63-L69 | train |
mozilla/FoxPuppet | foxpuppet/windows/manager.py | WindowManager.windows | def windows(self):
"""Return a list of all open windows.
Returns:
list: List of FoxPuppet BrowserWindow objects.
"""
from foxpuppet.windows import BrowserWindow
return [
BrowserWindow(self.selenium, handle)
for handle in self.selenium.window_handles
] | python | def windows(self):
"""Return a list of all open windows.
Returns:
list: List of FoxPuppet BrowserWindow objects.
"""
from foxpuppet.windows import BrowserWindow
return [
BrowserWindow(self.selenium, handle)
for handle in self.selenium.window_handles
] | [
"def",
"windows",
"(",
"self",
")",
":",
"from",
"foxpuppet",
".",
"windows",
"import",
"BrowserWindow",
"return",
"[",
"BrowserWindow",
"(",
"self",
".",
"selenium",
",",
"handle",
")",
"for",
"handle",
"in",
"self",
".",
"selenium",
".",
"window_handles",
"]"
] | Return a list of all open windows.
Returns:
list: List of FoxPuppet BrowserWindow objects. | [
"Return",
"a",
"list",
"of",
"all",
"open",
"windows",
"."
] | 6575eb4c72fd024c986b254e198c8b4e6f68cddd | https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/manager.py#L26-L38 | train |
thomasdelaet/python-velbus | velbus/connections/socket.py | SocketConnection.read_daemon | def read_daemon(self):
"""Read thread."""
while True:
data = self._socket.recv(9999)
self.feed_parser(data) | python | def read_daemon(self):
"""Read thread."""
while True:
data = self._socket.recv(9999)
self.feed_parser(data) | [
"def",
"read_daemon",
"(",
"self",
")",
":",
"while",
"True",
":",
"data",
"=",
"self",
".",
"_socket",
".",
"recv",
"(",
"9999",
")",
"self",
".",
"feed_parser",
"(",
"data",
")"
] | Read thread. | [
"Read",
"thread",
"."
] | af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/connections/socket.py#L66-L70 | train |
KE-works/pykechain | pykechain/models/validators/validators_base.py | PropertyValidator._logic | def _logic(self, value=None):
# type: (Any) -> Tuple[Union[bool, None], str]
"""Process the inner logic of the validator.
The validation results are returned as tuple (boolean (true/false), reasontext)
"""
self._validation_result, self._validation_reason = None, 'No reason'
return self._validation_result, self._validation_reason | python | def _logic(self, value=None):
# type: (Any) -> Tuple[Union[bool, None], str]
"""Process the inner logic of the validator.
The validation results are returned as tuple (boolean (true/false), reasontext)
"""
self._validation_result, self._validation_reason = None, 'No reason'
return self._validation_result, self._validation_reason | [
"def",
"_logic",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"# type: (Any) -> Tuple[Union[bool, None], str]",
"self",
".",
"_validation_result",
",",
"self",
".",
"_validation_reason",
"=",
"None",
",",
"'No reason'",
"return",
"self",
".",
"_validation_result",
",",
"self",
".",
"_validation_reason"
] | Process the inner logic of the validator.
The validation results are returned as tuple (boolean (true/false), reasontext) | [
"Process",
"the",
"inner",
"logic",
"of",
"the",
"validator",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/validators/validators_base.py#L194-L201 | train |
robert-b-clarke/nre-darwin-py | nredarwin/webservice.py | DarwinLdbSession.get_station_board | def get_station_board(
self,
crs,
rows=17,
include_departures=True,
include_arrivals=False,
destination_crs=None,
origin_crs=None
):
"""
Query the darwin webservice to obtain a board for a particular station
and return a StationBoard instance
Positional arguments:
crs -- the three letter CRS code of a UK station
Keyword arguments:
rows -- the number of rows to retrieve (default 10)
include_departures -- include departing services in the departure board
(default True)
include_arrivals -- include arriving services in the departure board
(default False)
destination_crs -- filter results so they only include services
calling at a particular destination (default None)
origin_crs -- filter results so they only include services
originating from a particular station (default None)
"""
# Determine the darwn query we want to make
if include_departures and include_arrivals:
query_type = 'GetArrivalDepartureBoard'
elif include_departures:
query_type = 'GetDepartureBoard'
elif include_arrivals:
query_type = 'GetArrivalBoard'
else:
raise ValueError(
"get_station_board must have either include_departures or \
include_arrivals set to True"
)
# build a query function
q = partial(self._base_query()[query_type], crs=crs, numRows=rows)
if destination_crs:
if origin_crs:
log.warn(
"Station board query can only filter on one of \
destination_crs and origin_crs, using only destination_crs"
)
q = partial(q, filterCrs=destination_crs, filterType='to')
elif origin_crs:
q = partial(q, filterCrs=origin_crs, filterType='from')
try:
soap_response = q()
except WebFault:
raise WebServiceError
return StationBoard(soap_response) | python | def get_station_board(
self,
crs,
rows=17,
include_departures=True,
include_arrivals=False,
destination_crs=None,
origin_crs=None
):
"""
Query the darwin webservice to obtain a board for a particular station
and return a StationBoard instance
Positional arguments:
crs -- the three letter CRS code of a UK station
Keyword arguments:
rows -- the number of rows to retrieve (default 10)
include_departures -- include departing services in the departure board
(default True)
include_arrivals -- include arriving services in the departure board
(default False)
destination_crs -- filter results so they only include services
calling at a particular destination (default None)
origin_crs -- filter results so they only include services
originating from a particular station (default None)
"""
# Determine the darwn query we want to make
if include_departures and include_arrivals:
query_type = 'GetArrivalDepartureBoard'
elif include_departures:
query_type = 'GetDepartureBoard'
elif include_arrivals:
query_type = 'GetArrivalBoard'
else:
raise ValueError(
"get_station_board must have either include_departures or \
include_arrivals set to True"
)
# build a query function
q = partial(self._base_query()[query_type], crs=crs, numRows=rows)
if destination_crs:
if origin_crs:
log.warn(
"Station board query can only filter on one of \
destination_crs and origin_crs, using only destination_crs"
)
q = partial(q, filterCrs=destination_crs, filterType='to')
elif origin_crs:
q = partial(q, filterCrs=origin_crs, filterType='from')
try:
soap_response = q()
except WebFault:
raise WebServiceError
return StationBoard(soap_response) | [
"def",
"get_station_board",
"(",
"self",
",",
"crs",
",",
"rows",
"=",
"17",
",",
"include_departures",
"=",
"True",
",",
"include_arrivals",
"=",
"False",
",",
"destination_crs",
"=",
"None",
",",
"origin_crs",
"=",
"None",
")",
":",
"# Determine the darwn query we want to make",
"if",
"include_departures",
"and",
"include_arrivals",
":",
"query_type",
"=",
"'GetArrivalDepartureBoard'",
"elif",
"include_departures",
":",
"query_type",
"=",
"'GetDepartureBoard'",
"elif",
"include_arrivals",
":",
"query_type",
"=",
"'GetArrivalBoard'",
"else",
":",
"raise",
"ValueError",
"(",
"\"get_station_board must have either include_departures or \\\ninclude_arrivals set to True\"",
")",
"# build a query function",
"q",
"=",
"partial",
"(",
"self",
".",
"_base_query",
"(",
")",
"[",
"query_type",
"]",
",",
"crs",
"=",
"crs",
",",
"numRows",
"=",
"rows",
")",
"if",
"destination_crs",
":",
"if",
"origin_crs",
":",
"log",
".",
"warn",
"(",
"\"Station board query can only filter on one of \\\ndestination_crs and origin_crs, using only destination_crs\"",
")",
"q",
"=",
"partial",
"(",
"q",
",",
"filterCrs",
"=",
"destination_crs",
",",
"filterType",
"=",
"'to'",
")",
"elif",
"origin_crs",
":",
"q",
"=",
"partial",
"(",
"q",
",",
"filterCrs",
"=",
"origin_crs",
",",
"filterType",
"=",
"'from'",
")",
"try",
":",
"soap_response",
"=",
"q",
"(",
")",
"except",
"WebFault",
":",
"raise",
"WebServiceError",
"return",
"StationBoard",
"(",
"soap_response",
")"
] | Query the darwin webservice to obtain a board for a particular station
and return a StationBoard instance
Positional arguments:
crs -- the three letter CRS code of a UK station
Keyword arguments:
rows -- the number of rows to retrieve (default 10)
include_departures -- include departing services in the departure board
(default True)
include_arrivals -- include arriving services in the departure board
(default False)
destination_crs -- filter results so they only include services
calling at a particular destination (default None)
origin_crs -- filter results so they only include services
originating from a particular station (default None) | [
"Query",
"the",
"darwin",
"webservice",
"to",
"obtain",
"a",
"board",
"for",
"a",
"particular",
"station",
"and",
"return",
"a",
"StationBoard",
"instance"
] | 6b0b181770e085dc7f71fbd2eb3fe779f653da62 | https://github.com/robert-b-clarke/nre-darwin-py/blob/6b0b181770e085dc7f71fbd2eb3fe779f653da62/nredarwin/webservice.py#L67-L121 | train |
robert-b-clarke/nre-darwin-py | nredarwin/webservice.py | DarwinLdbSession.get_service_details | def get_service_details(self, service_id):
"""
Get the details of an individual service and return a ServiceDetails
instance.
Positional arguments:
service_id: A Darwin LDB service id
"""
service_query = \
self._soap_client.service['LDBServiceSoap']['GetServiceDetails']
try:
soap_response = service_query(serviceID=service_id)
except WebFault:
raise WebServiceError
return ServiceDetails(soap_response) | python | def get_service_details(self, service_id):
"""
Get the details of an individual service and return a ServiceDetails
instance.
Positional arguments:
service_id: A Darwin LDB service id
"""
service_query = \
self._soap_client.service['LDBServiceSoap']['GetServiceDetails']
try:
soap_response = service_query(serviceID=service_id)
except WebFault:
raise WebServiceError
return ServiceDetails(soap_response) | [
"def",
"get_service_details",
"(",
"self",
",",
"service_id",
")",
":",
"service_query",
"=",
"self",
".",
"_soap_client",
".",
"service",
"[",
"'LDBServiceSoap'",
"]",
"[",
"'GetServiceDetails'",
"]",
"try",
":",
"soap_response",
"=",
"service_query",
"(",
"serviceID",
"=",
"service_id",
")",
"except",
"WebFault",
":",
"raise",
"WebServiceError",
"return",
"ServiceDetails",
"(",
"soap_response",
")"
] | Get the details of an individual service and return a ServiceDetails
instance.
Positional arguments:
service_id: A Darwin LDB service id | [
"Get",
"the",
"details",
"of",
"an",
"individual",
"service",
"and",
"return",
"a",
"ServiceDetails",
"instance",
"."
] | 6b0b181770e085dc7f71fbd2eb3fe779f653da62 | https://github.com/robert-b-clarke/nre-darwin-py/blob/6b0b181770e085dc7f71fbd2eb3fe779f653da62/nredarwin/webservice.py#L123-L137 | train |
kytos/kytos-utils | kytos/utils/openapi.py | OpenAPI.render_template | def render_template(self):
"""Render and save API doc in openapi.yml."""
self._parse_paths()
context = dict(napp=self._napp.__dict__, paths=self._paths)
self._save(context) | python | def render_template(self):
"""Render and save API doc in openapi.yml."""
self._parse_paths()
context = dict(napp=self._napp.__dict__, paths=self._paths)
self._save(context) | [
"def",
"render_template",
"(",
"self",
")",
":",
"self",
".",
"_parse_paths",
"(",
")",
"context",
"=",
"dict",
"(",
"napp",
"=",
"self",
".",
"_napp",
".",
"__dict__",
",",
"paths",
"=",
"self",
".",
"_paths",
")",
"self",
".",
"_save",
"(",
"context",
")"
] | Render and save API doc in openapi.yml. | [
"Render",
"and",
"save",
"API",
"doc",
"in",
"openapi",
".",
"yml",
"."
] | b4750c618d15cff75970ea6124bda4d2b9a33578 | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L35-L39 | train |
kytos/kytos-utils | kytos/utils/openapi.py | OpenAPI._parse_decorated_functions | def _parse_decorated_functions(self, code):
"""Return URL rule, HTTP methods and docstring."""
matches = re.finditer(r"""
# @rest decorators
(?P<decorators>
(?:@rest\(.+?\)\n)+ # one or more @rest decorators inside
)
# docstring delimited by 3 double quotes
.+?"{3}(?P<docstring>.+?)"{3}
""", code, re.VERBOSE | re.DOTALL)
for function_match in matches:
m_dict = function_match.groupdict()
self._parse_docstring(m_dict['docstring'])
self._add_function_paths(m_dict['decorators']) | python | def _parse_decorated_functions(self, code):
"""Return URL rule, HTTP methods and docstring."""
matches = re.finditer(r"""
# @rest decorators
(?P<decorators>
(?:@rest\(.+?\)\n)+ # one or more @rest decorators inside
)
# docstring delimited by 3 double quotes
.+?"{3}(?P<docstring>.+?)"{3}
""", code, re.VERBOSE | re.DOTALL)
for function_match in matches:
m_dict = function_match.groupdict()
self._parse_docstring(m_dict['docstring'])
self._add_function_paths(m_dict['decorators']) | [
"def",
"_parse_decorated_functions",
"(",
"self",
",",
"code",
")",
":",
"matches",
"=",
"re",
".",
"finditer",
"(",
"r\"\"\"\n # @rest decorators\n (?P<decorators>\n (?:@rest\\(.+?\\)\\n)+ # one or more @rest decorators inside\n )\n # docstring delimited by 3 double quotes\n .+?\"{3}(?P<docstring>.+?)\"{3}\n \"\"\"",
",",
"code",
",",
"re",
".",
"VERBOSE",
"|",
"re",
".",
"DOTALL",
")",
"for",
"function_match",
"in",
"matches",
":",
"m_dict",
"=",
"function_match",
".",
"groupdict",
"(",
")",
"self",
".",
"_parse_docstring",
"(",
"m_dict",
"[",
"'docstring'",
"]",
")",
"self",
".",
"_add_function_paths",
"(",
"m_dict",
"[",
"'decorators'",
"]",
")"
] | Return URL rule, HTTP methods and docstring. | [
"Return",
"URL",
"rule",
"HTTP",
"methods",
"and",
"docstring",
"."
] | b4750c618d15cff75970ea6124bda4d2b9a33578 | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L46-L60 | train |
kytos/kytos-utils | kytos/utils/openapi.py | OpenAPI._parse_docstring | def _parse_docstring(self, docstring):
"""Parse the method docstring."""
match = re.match(r"""
# Following PEP 257
\s* (?P<summary>[^\n]+?) \s* # First line
( # Description and YAML are optional
(\n \s*){2} # Blank line
# Description (optional)
(
(?!-{3,}) # Don't use YAML as description
\s* (?P<description>.+?) \s* # Third line and maybe others
(?=-{3,})? # Stop if "---" is found
)?
# YAML spec (optional) **currently not used**
(
-{3,}\n # "---" begins yaml spec
(?P<open_api>.+)
)?
)?
$""", docstring, re.VERBOSE | re.DOTALL)
summary = 'TODO write the summary.'
description = 'TODO write/remove the description'
if match:
m_dict = match.groupdict()
summary = m_dict['summary']
if m_dict['description']:
description = re.sub(r'(\s|\n){2,}', ' ',
m_dict['description'])
self._summary = summary
self._description = description | python | def _parse_docstring(self, docstring):
"""Parse the method docstring."""
match = re.match(r"""
# Following PEP 257
\s* (?P<summary>[^\n]+?) \s* # First line
( # Description and YAML are optional
(\n \s*){2} # Blank line
# Description (optional)
(
(?!-{3,}) # Don't use YAML as description
\s* (?P<description>.+?) \s* # Third line and maybe others
(?=-{3,})? # Stop if "---" is found
)?
# YAML spec (optional) **currently not used**
(
-{3,}\n # "---" begins yaml spec
(?P<open_api>.+)
)?
)?
$""", docstring, re.VERBOSE | re.DOTALL)
summary = 'TODO write the summary.'
description = 'TODO write/remove the description'
if match:
m_dict = match.groupdict()
summary = m_dict['summary']
if m_dict['description']:
description = re.sub(r'(\s|\n){2,}', ' ',
m_dict['description'])
self._summary = summary
self._description = description | [
"def",
"_parse_docstring",
"(",
"self",
",",
"docstring",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r\"\"\"\n # Following PEP 257\n \\s* (?P<summary>[^\\n]+?) \\s* # First line\n\n ( # Description and YAML are optional\n (\\n \\s*){2} # Blank line\n\n # Description (optional)\n (\n (?!-{3,}) # Don't use YAML as description\n \\s* (?P<description>.+?) \\s* # Third line and maybe others\n (?=-{3,})? # Stop if \"---\" is found\n )?\n\n # YAML spec (optional) **currently not used**\n (\n -{3,}\\n # \"---\" begins yaml spec\n (?P<open_api>.+)\n )?\n )?\n $\"\"\"",
",",
"docstring",
",",
"re",
".",
"VERBOSE",
"|",
"re",
".",
"DOTALL",
")",
"summary",
"=",
"'TODO write the summary.'",
"description",
"=",
"'TODO write/remove the description'",
"if",
"match",
":",
"m_dict",
"=",
"match",
".",
"groupdict",
"(",
")",
"summary",
"=",
"m_dict",
"[",
"'summary'",
"]",
"if",
"m_dict",
"[",
"'description'",
"]",
":",
"description",
"=",
"re",
".",
"sub",
"(",
"r'(\\s|\\n){2,}'",
",",
"' '",
",",
"m_dict",
"[",
"'description'",
"]",
")",
"self",
".",
"_summary",
"=",
"summary",
"self",
".",
"_description",
"=",
"description"
] | Parse the method docstring. | [
"Parse",
"the",
"method",
"docstring",
"."
] | b4750c618d15cff75970ea6124bda4d2b9a33578 | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L69-L102 | train |
kytos/kytos-utils | kytos/utils/openapi.py | OpenAPI._parse_methods | def _parse_methods(cls, list_string):
"""Return HTTP method list. Use json for security reasons."""
if list_string is None:
return APIServer.DEFAULT_METHODS
# json requires double quotes
json_list = list_string.replace("'", '"')
return json.loads(json_list) | python | def _parse_methods(cls, list_string):
"""Return HTTP method list. Use json for security reasons."""
if list_string is None:
return APIServer.DEFAULT_METHODS
# json requires double quotes
json_list = list_string.replace("'", '"')
return json.loads(json_list) | [
"def",
"_parse_methods",
"(",
"cls",
",",
"list_string",
")",
":",
"if",
"list_string",
"is",
"None",
":",
"return",
"APIServer",
".",
"DEFAULT_METHODS",
"# json requires double quotes",
"json_list",
"=",
"list_string",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"return",
"json",
".",
"loads",
"(",
"json_list",
")"
] | Return HTTP method list. Use json for security reasons. | [
"Return",
"HTTP",
"method",
"list",
".",
"Use",
"json",
"for",
"security",
"reasons",
"."
] | b4750c618d15cff75970ea6124bda4d2b9a33578 | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L127-L133 | train |
kytos/kytos-utils | kytos/utils/openapi.py | OpenAPI._rule2path | def _rule2path(cls, rule):
"""Convert relative Flask rule to absolute OpenAPI path."""
typeless = re.sub(r'<\w+?:', '<', rule) # remove Flask types
return typeless.replace('<', '{').replace('>', '}') | python | def _rule2path(cls, rule):
"""Convert relative Flask rule to absolute OpenAPI path."""
typeless = re.sub(r'<\w+?:', '<', rule) # remove Flask types
return typeless.replace('<', '{').replace('>', '}') | [
"def",
"_rule2path",
"(",
"cls",
",",
"rule",
")",
":",
"typeless",
"=",
"re",
".",
"sub",
"(",
"r'<\\w+?:'",
",",
"'<'",
",",
"rule",
")",
"# remove Flask types",
"return",
"typeless",
".",
"replace",
"(",
"'<'",
",",
"'{'",
")",
".",
"replace",
"(",
"'>'",
",",
"'}'",
")"
] | Convert relative Flask rule to absolute OpenAPI path. | [
"Convert",
"relative",
"Flask",
"rule",
"to",
"absolute",
"OpenAPI",
"path",
"."
] | b4750c618d15cff75970ea6124bda4d2b9a33578 | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/openapi.py#L142-L145 | train |
KE-works/pykechain | pykechain/models/part.py | Part.property | def property(self, name):
"""Retrieve the property belonging to this part based on its name or uuid.
:param name: property name or property UUID to search for
:type name: basestring
:return: a single :class:`Property`
:raises NotFoundError: if the `Property` is not part of the `Part`
Example
-------
>>> part = project.part('Bike')
>>> part.properties
[<pyke Property ...>, ...]
# this returns a list of all properties of this part
>>> gears = part.property('Gears')
>>> gears.value
6
>>> gears = part.property('123e4567-e89b-12d3-a456-426655440000')
>>> gears.value
6
"""
found = None
if is_uuid(name):
found = find(self.properties, lambda p: name == p.id)
else:
found = find(self.properties, lambda p: name == p.name)
if not found:
raise NotFoundError("Could not find property with name or id {}".format(name))
return found | python | def property(self, name):
"""Retrieve the property belonging to this part based on its name or uuid.
:param name: property name or property UUID to search for
:type name: basestring
:return: a single :class:`Property`
:raises NotFoundError: if the `Property` is not part of the `Part`
Example
-------
>>> part = project.part('Bike')
>>> part.properties
[<pyke Property ...>, ...]
# this returns a list of all properties of this part
>>> gears = part.property('Gears')
>>> gears.value
6
>>> gears = part.property('123e4567-e89b-12d3-a456-426655440000')
>>> gears.value
6
"""
found = None
if is_uuid(name):
found = find(self.properties, lambda p: name == p.id)
else:
found = find(self.properties, lambda p: name == p.name)
if not found:
raise NotFoundError("Could not find property with name or id {}".format(name))
return found | [
"def",
"property",
"(",
"self",
",",
"name",
")",
":",
"found",
"=",
"None",
"if",
"is_uuid",
"(",
"name",
")",
":",
"found",
"=",
"find",
"(",
"self",
".",
"properties",
",",
"lambda",
"p",
":",
"name",
"==",
"p",
".",
"id",
")",
"else",
":",
"found",
"=",
"find",
"(",
"self",
".",
"properties",
",",
"lambda",
"p",
":",
"name",
"==",
"p",
".",
"name",
")",
"if",
"not",
"found",
":",
"raise",
"NotFoundError",
"(",
"\"Could not find property with name or id {}\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"found"
] | Retrieve the property belonging to this part based on its name or uuid.
:param name: property name or property UUID to search for
:type name: basestring
:return: a single :class:`Property`
:raises NotFoundError: if the `Property` is not part of the `Part`
Example
-------
>>> part = project.part('Bike')
>>> part.properties
[<pyke Property ...>, ...]
# this returns a list of all properties of this part
>>> gears = part.property('Gears')
>>> gears.value
6
>>> gears = part.property('123e4567-e89b-12d3-a456-426655440000')
>>> gears.value
6 | [
"Retrieve",
"the",
"property",
"belonging",
"to",
"this",
"part",
"based",
"on",
"its",
"name",
"or",
"uuid",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L69-L103 | train |
KE-works/pykechain | pykechain/models/part.py | Part.parent | def parent(self):
# type: () -> Any
"""Retrieve the parent of this `Part`.
:return: the parent :class:`Part` of this part
:raises APIError: if an Error occurs
Example
-------
>>> part = project.part('Frame')
>>> bike = part.parent()
"""
if self.parent_id:
return self._client.part(pk=self.parent_id, category=self.category)
else:
return None | python | def parent(self):
# type: () -> Any
"""Retrieve the parent of this `Part`.
:return: the parent :class:`Part` of this part
:raises APIError: if an Error occurs
Example
-------
>>> part = project.part('Frame')
>>> bike = part.parent()
"""
if self.parent_id:
return self._client.part(pk=self.parent_id, category=self.category)
else:
return None | [
"def",
"parent",
"(",
"self",
")",
":",
"# type: () -> Any",
"if",
"self",
".",
"parent_id",
":",
"return",
"self",
".",
"_client",
".",
"part",
"(",
"pk",
"=",
"self",
".",
"parent_id",
",",
"category",
"=",
"self",
".",
"category",
")",
"else",
":",
"return",
"None"
] | Retrieve the parent of this `Part`.
:return: the parent :class:`Part` of this part
:raises APIError: if an Error occurs
Example
-------
>>> part = project.part('Frame')
>>> bike = part.parent() | [
"Retrieve",
"the",
"parent",
"of",
"this",
"Part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L105-L122 | train |
KE-works/pykechain | pykechain/models/part.py | Part.children | def children(self, **kwargs):
"""Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel')
"""
if not kwargs:
# no kwargs provided is the default, we aim to cache it.
if not self._cached_children:
self._cached_children = list(self._client.parts(parent=self.id, category=self.category))
return self._cached_children
else:
# if kwargs are provided, we assume no use of cache as specific filtering on the children is performed.
return self._client.parts(parent=self.id, category=self.category, **kwargs) | python | def children(self, **kwargs):
"""Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel')
"""
if not kwargs:
# no kwargs provided is the default, we aim to cache it.
if not self._cached_children:
self._cached_children = list(self._client.parts(parent=self.id, category=self.category))
return self._cached_children
else:
# if kwargs are provided, we assume no use of cache as specific filtering on the children is performed.
return self._client.parts(parent=self.id, category=self.category, **kwargs) | [
"def",
"children",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
":",
"# no kwargs provided is the default, we aim to cache it.",
"if",
"not",
"self",
".",
"_cached_children",
":",
"self",
".",
"_cached_children",
"=",
"list",
"(",
"self",
".",
"_client",
".",
"parts",
"(",
"parent",
"=",
"self",
".",
"id",
",",
"category",
"=",
"self",
".",
"category",
")",
")",
"return",
"self",
".",
"_cached_children",
"else",
":",
"# if kwargs are provided, we assume no use of cache as specific filtering on the children is performed.",
"return",
"self",
".",
"_client",
".",
"parts",
"(",
"parent",
"=",
"self",
".",
"id",
",",
"category",
"=",
"self",
".",
"category",
",",
"*",
"*",
"kwargs",
")"
] | Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel') | [
"Retrieve",
"the",
"children",
"of",
"this",
"Part",
"as",
"Partset",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L124-L163 | train |
KE-works/pykechain | pykechain/models/part.py | Part.siblings | def siblings(self, **kwargs):
# type: (Any) -> Any
"""Retrieve the siblings of this `Part` as `Partset`.
Siblings are other Parts sharing the same parent of this `Part`, including the part itself.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no siblings.
:raises APIError: When an error occurs.
"""
if self.parent_id:
return self._client.parts(parent=self.parent_id, category=self.category, **kwargs)
else:
from pykechain.models.partset import PartSet
return PartSet(parts=[]) | python | def siblings(self, **kwargs):
# type: (Any) -> Any
"""Retrieve the siblings of this `Part` as `Partset`.
Siblings are other Parts sharing the same parent of this `Part`, including the part itself.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no siblings.
:raises APIError: When an error occurs.
"""
if self.parent_id:
return self._client.parts(parent=self.parent_id, category=self.category, **kwargs)
else:
from pykechain.models.partset import PartSet
return PartSet(parts=[]) | [
"def",
"siblings",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Any) -> Any",
"if",
"self",
".",
"parent_id",
":",
"return",
"self",
".",
"_client",
".",
"parts",
"(",
"parent",
"=",
"self",
".",
"parent_id",
",",
"category",
"=",
"self",
".",
"category",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"from",
"pykechain",
".",
"models",
".",
"partset",
"import",
"PartSet",
"return",
"PartSet",
"(",
"parts",
"=",
"[",
"]",
")"
] | Retrieve the siblings of this `Part` as `Partset`.
Siblings are other Parts sharing the same parent of this `Part`, including the part itself.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no siblings.
:raises APIError: When an error occurs. | [
"Retrieve",
"the",
"siblings",
"of",
"this",
"Part",
"as",
"Partset",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L165-L181 | train |
KE-works/pykechain | pykechain/models/part.py | Part.model | def model(self):
"""
Retrieve the model of this `Part` as `Part`.
For instance, you can get the part model of a part instance. But trying to get the model of a part that
has no model, like a part model, will raise a :exc:`NotFoundError`.
.. versionadded:: 1.8
:return: the model of this part instance as :class:`Part` with category `MODEL`
:raises NotFoundError: if no model found
Example
-------
>>> front_fork = project.part('Front Fork')
>>> front_fork_model = front_fork.model()
"""
if self.category == Category.INSTANCE:
model_id = self._json_data['model'].get('id')
return self._client.model(pk=model_id)
else:
raise NotFoundError("Part {} has no model".format(self.name)) | python | def model(self):
"""
Retrieve the model of this `Part` as `Part`.
For instance, you can get the part model of a part instance. But trying to get the model of a part that
has no model, like a part model, will raise a :exc:`NotFoundError`.
.. versionadded:: 1.8
:return: the model of this part instance as :class:`Part` with category `MODEL`
:raises NotFoundError: if no model found
Example
-------
>>> front_fork = project.part('Front Fork')
>>> front_fork_model = front_fork.model()
"""
if self.category == Category.INSTANCE:
model_id = self._json_data['model'].get('id')
return self._client.model(pk=model_id)
else:
raise NotFoundError("Part {} has no model".format(self.name)) | [
"def",
"model",
"(",
"self",
")",
":",
"if",
"self",
".",
"category",
"==",
"Category",
".",
"INSTANCE",
":",
"model_id",
"=",
"self",
".",
"_json_data",
"[",
"'model'",
"]",
".",
"get",
"(",
"'id'",
")",
"return",
"self",
".",
"_client",
".",
"model",
"(",
"pk",
"=",
"model_id",
")",
"else",
":",
"raise",
"NotFoundError",
"(",
"\"Part {} has no model\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")"
] | Retrieve the model of this `Part` as `Part`.
For instance, you can get the part model of a part instance. But trying to get the model of a part that
has no model, like a part model, will raise a :exc:`NotFoundError`.
.. versionadded:: 1.8
:return: the model of this part instance as :class:`Part` with category `MODEL`
:raises NotFoundError: if no model found
Example
-------
>>> front_fork = project.part('Front Fork')
>>> front_fork_model = front_fork.model() | [
"Retrieve",
"the",
"model",
"of",
"this",
"Part",
"as",
"Part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L183-L205 | train |
KE-works/pykechain | pykechain/models/part.py | Part.instances | def instances(self, **kwargs):
"""
Retrieve the instances of this `Part` as a `PartSet`.
For instance, if you have a model part, you can get the list of instances that are created based on this
moodel. If there are no instances (only possible if the multiplicity is :attr:`enums.Multiplicity.ZERO_MANY`)
than a :exc:`NotFoundError` is returned
.. versionadded:: 1.8
:return: the instances of this part model :class:`PartSet` with category `INSTANCE`
:raises NotFoundError: if no instances found
Example
-------
>>> wheel_model = project.model('Wheel')
>>> wheel_instance_set = wheel_model.instances()
An example with retrieving the front wheels only using the 'name__contains' search argument.
>>> wheel_model = project.model('Wheel')
>>> front_wheel_instances = wheel_model.instances(name__contains='Front')
"""
if self.category == Category.MODEL:
return self._client.parts(model=self, category=Category.INSTANCE, **kwargs)
else:
raise NotFoundError("Part {} is not a model".format(self.name)) | python | def instances(self, **kwargs):
"""
Retrieve the instances of this `Part` as a `PartSet`.
For instance, if you have a model part, you can get the list of instances that are created based on this
moodel. If there are no instances (only possible if the multiplicity is :attr:`enums.Multiplicity.ZERO_MANY`)
than a :exc:`NotFoundError` is returned
.. versionadded:: 1.8
:return: the instances of this part model :class:`PartSet` with category `INSTANCE`
:raises NotFoundError: if no instances found
Example
-------
>>> wheel_model = project.model('Wheel')
>>> wheel_instance_set = wheel_model.instances()
An example with retrieving the front wheels only using the 'name__contains' search argument.
>>> wheel_model = project.model('Wheel')
>>> front_wheel_instances = wheel_model.instances(name__contains='Front')
"""
if self.category == Category.MODEL:
return self._client.parts(model=self, category=Category.INSTANCE, **kwargs)
else:
raise NotFoundError("Part {} is not a model".format(self.name)) | [
"def",
"instances",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"category",
"==",
"Category",
".",
"MODEL",
":",
"return",
"self",
".",
"_client",
".",
"parts",
"(",
"model",
"=",
"self",
",",
"category",
"=",
"Category",
".",
"INSTANCE",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"NotFoundError",
"(",
"\"Part {} is not a model\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")"
] | Retrieve the instances of this `Part` as a `PartSet`.
For instance, if you have a model part, you can get the list of instances that are created based on this
moodel. If there are no instances (only possible if the multiplicity is :attr:`enums.Multiplicity.ZERO_MANY`)
than a :exc:`NotFoundError` is returned
.. versionadded:: 1.8
:return: the instances of this part model :class:`PartSet` with category `INSTANCE`
:raises NotFoundError: if no instances found
Example
-------
>>> wheel_model = project.model('Wheel')
>>> wheel_instance_set = wheel_model.instances()
An example with retrieving the front wheels only using the 'name__contains' search argument.
>>> wheel_model = project.model('Wheel')
>>> front_wheel_instances = wheel_model.instances(name__contains='Front') | [
"Retrieve",
"the",
"instances",
"of",
"this",
"Part",
"as",
"a",
"PartSet",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L207-L234 | train |
KE-works/pykechain | pykechain/models/part.py | Part.proxy_model | def proxy_model(self):
"""
Retrieve the proxy model of this proxied `Part` as a `Part`.
Allows you to retrieve the model of a proxy. But trying to get the catalog model of a part that
has no proxy, will raise an :exc:`NotFoundError`. Only models can have a proxy.
:return: :class:`Part` with category `MODEL` and from which the current part is proxied
:raises NotFoundError: When no proxy model is found
Example
-------
>>> proxy_part = project.model('Proxy based on catalog model')
>>> catalog_model_of_proxy_part = proxy_part.proxy_model()
>>> proxied_material_of_the_bolt_model = project.model('Bolt Material')
>>> proxy_basis_for_the_material_model = proxied_material_of_the_bolt_model.proxy_model()
"""
if self.category != Category.MODEL:
raise IllegalArgumentError("Part {} is not a model, therefore it cannot have a proxy model".format(self))
if 'proxy' in self._json_data and self._json_data.get('proxy'):
catalog_model_id = self._json_data['proxy'].get('id')
return self._client.model(pk=catalog_model_id)
else:
raise NotFoundError("Part {} is not a proxy".format(self.name)) | python | def proxy_model(self):
"""
Retrieve the proxy model of this proxied `Part` as a `Part`.
Allows you to retrieve the model of a proxy. But trying to get the catalog model of a part that
has no proxy, will raise an :exc:`NotFoundError`. Only models can have a proxy.
:return: :class:`Part` with category `MODEL` and from which the current part is proxied
:raises NotFoundError: When no proxy model is found
Example
-------
>>> proxy_part = project.model('Proxy based on catalog model')
>>> catalog_model_of_proxy_part = proxy_part.proxy_model()
>>> proxied_material_of_the_bolt_model = project.model('Bolt Material')
>>> proxy_basis_for_the_material_model = proxied_material_of_the_bolt_model.proxy_model()
"""
if self.category != Category.MODEL:
raise IllegalArgumentError("Part {} is not a model, therefore it cannot have a proxy model".format(self))
if 'proxy' in self._json_data and self._json_data.get('proxy'):
catalog_model_id = self._json_data['proxy'].get('id')
return self._client.model(pk=catalog_model_id)
else:
raise NotFoundError("Part {} is not a proxy".format(self.name)) | [
"def",
"proxy_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"IllegalArgumentError",
"(",
"\"Part {} is not a model, therefore it cannot have a proxy model\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"'proxy'",
"in",
"self",
".",
"_json_data",
"and",
"self",
".",
"_json_data",
".",
"get",
"(",
"'proxy'",
")",
":",
"catalog_model_id",
"=",
"self",
".",
"_json_data",
"[",
"'proxy'",
"]",
".",
"get",
"(",
"'id'",
")",
"return",
"self",
".",
"_client",
".",
"model",
"(",
"pk",
"=",
"catalog_model_id",
")",
"else",
":",
"raise",
"NotFoundError",
"(",
"\"Part {} is not a proxy\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")"
] | Retrieve the proxy model of this proxied `Part` as a `Part`.
Allows you to retrieve the model of a proxy. But trying to get the catalog model of a part that
has no proxy, will raise an :exc:`NotFoundError`. Only models can have a proxy.
:return: :class:`Part` with category `MODEL` and from which the current part is proxied
:raises NotFoundError: When no proxy model is found
Example
-------
>>> proxy_part = project.model('Proxy based on catalog model')
>>> catalog_model_of_proxy_part = proxy_part.proxy_model()
>>> proxied_material_of_the_bolt_model = project.model('Bolt Material')
>>> proxy_basis_for_the_material_model = proxied_material_of_the_bolt_model.proxy_model() | [
"Retrieve",
"the",
"proxy",
"model",
"of",
"this",
"proxied",
"Part",
"as",
"a",
"Part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L255-L281 | train |
KE-works/pykechain | pykechain/models/part.py | Part.add | def add(self, model, **kwargs):
# type: (Part, **Any) -> Part
"""Add a new child instance, based on a model, to this part.
This can only act on instances. It needs a model from which to create the child instance.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:type kwargs: dict or None
:type model: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`.
:raises APIError: if unable to add the new child instance
Example
-------
>>> bike = project.part('Bike')
>>> wheel_model = project.model('Wheel')
>>> bike.add(wheel_model)
"""
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
return self._client.create_part(self, model, **kwargs) | python | def add(self, model, **kwargs):
# type: (Part, **Any) -> Part
"""Add a new child instance, based on a model, to this part.
This can only act on instances. It needs a model from which to create the child instance.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:type kwargs: dict or None
:type model: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`.
:raises APIError: if unable to add the new child instance
Example
-------
>>> bike = project.part('Bike')
>>> wheel_model = project.model('Wheel')
>>> bike.add(wheel_model)
"""
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
return self._client.create_part(self, model, **kwargs) | [
"def",
"add",
"(",
"self",
",",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Part, **Any) -> Part",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"INSTANCE",
":",
"raise",
"APIError",
"(",
"\"Part should be of category INSTANCE\"",
")",
"return",
"self",
".",
"_client",
".",
"create_part",
"(",
"self",
",",
"model",
",",
"*",
"*",
"kwargs",
")"
] | Add a new child instance, based on a model, to this part.
This can only act on instances. It needs a model from which to create the child instance.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:type kwargs: dict or None
:type model: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`.
:raises APIError: if unable to add the new child instance
Example
-------
>>> bike = project.part('Bike')
>>> wheel_model = project.model('Wheel')
>>> bike.add(wheel_model) | [
"Add",
"a",
"new",
"child",
"instance",
"based",
"on",
"a",
"model",
"to",
"this",
"part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L283-L311 | train |
KE-works/pykechain | pykechain/models/part.py | Part.add_to | def add_to(self, parent, **kwargs):
# type: (Part, **Any) -> Part
"""Add a new instance of this model to a part.
This works if the current part is a model and an instance of this model is to be added
to a part instances in the tree.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param parent: part to add the new instance to
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:type parent: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`
:raises APIError: if unable to add the new child instance
Example
-------
>>> wheel_model = project.model('wheel')
>>> bike = project.part('Bike')
>>> wheel_model.add_to(bike)
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_part(parent, self, **kwargs) | python | def add_to(self, parent, **kwargs):
# type: (Part, **Any) -> Part
"""Add a new instance of this model to a part.
This works if the current part is a model and an instance of this model is to be added
to a part instances in the tree.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param parent: part to add the new instance to
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:type parent: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`
:raises APIError: if unable to add the new child instance
Example
-------
>>> wheel_model = project.model('wheel')
>>> bike = project.part('Bike')
>>> wheel_model.add_to(bike)
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_part(parent, self, **kwargs) | [
"def",
"add_to",
"(",
"self",
",",
"parent",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Part, **Any) -> Part",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"APIError",
"(",
"\"Part should be of category MODEL\"",
")",
"return",
"self",
".",
"_client",
".",
"create_part",
"(",
"parent",
",",
"self",
",",
"*",
"*",
"kwargs",
")"
] | Add a new instance of this model to a part.
This works if the current part is a model and an instance of this model is to be added
to a part instances in the tree.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param parent: part to add the new instance to
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:type parent: :class:`Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: :class:`Part` with category `INSTANCE`
:raises APIError: if unable to add the new child instance
Example
-------
>>> wheel_model = project.model('wheel')
>>> bike = project.part('Bike')
>>> wheel_model.add_to(bike) | [
"Add",
"a",
"new",
"instance",
"of",
"this",
"model",
"to",
"a",
"part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L313-L344 | train |
KE-works/pykechain | pykechain/models/part.py | Part.add_model | def add_model(self, *args, **kwargs):
# type: (*Any, **Any) -> Part
"""Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL`
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_model(self, *args, **kwargs) | python | def add_model(self, *args, **kwargs):
# type: (*Any, **Any) -> Part
"""Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL`
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_model(self, *args, **kwargs) | [
"def",
"add_model",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (*Any, **Any) -> Part",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"APIError",
"(",
"\"Part should be of category MODEL\"",
")",
"return",
"self",
".",
"_client",
".",
"create_model",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL` | [
"Add",
"a",
"new",
"child",
"model",
"to",
"this",
"model",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L346-L360 | train |
KE-works/pykechain | pykechain/models/part.py | Part.add_property | def add_property(self, *args, **kwargs):
# type: (*Any, **Any) -> Property
"""Add a new property to this model.
See :class:`pykechain.Client.create_property` for available parameters.
:return: :class:`Property`
:raises APIError: in case an Error occurs
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_property(self, *args, **kwargs) | python | def add_property(self, *args, **kwargs):
# type: (*Any, **Any) -> Property
"""Add a new property to this model.
See :class:`pykechain.Client.create_property` for available parameters.
:return: :class:`Property`
:raises APIError: in case an Error occurs
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_property(self, *args, **kwargs) | [
"def",
"add_property",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (*Any, **Any) -> Property",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"APIError",
"(",
"\"Part should be of category MODEL\"",
")",
"return",
"self",
".",
"_client",
".",
"create_property",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Add a new property to this model.
See :class:`pykechain.Client.create_property` for available parameters.
:return: :class:`Property`
:raises APIError: in case an Error occurs | [
"Add",
"a",
"new",
"property",
"to",
"this",
"model",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L398-L410 | train |
KE-works/pykechain | pykechain/models/part.py | Part.update | def update(self, name=None, update_dict=None, bulk=True, **kwargs):
"""
Edit part name and property values in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: new part name (defined as a string)
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property ids (uuid)
and values being property values
:type update_dict: dict
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the updated :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises IllegalArgumentError: when the type or value of an argument provided is incorrect
:raises APIError: in case an Error occurs
Example
-------
>>> bike = client.scope('Bike Project').part('Bike')
>>> bike.update(name='Good name', update_dict={'Gears': 11, 'Total Height': 56.3}, bulk=True)
"""
# dict(name=name, properties=json.dumps(update_dict))) with property ids:value
action = 'bulk_update_properties'
request_body = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
request_body[prop_name_or_id] = property_value
else:
request_body[self.property(prop_name_or_id).id] = property_value
if bulk and len(update_dict.keys()) > 1:
if name:
if not isinstance(name, str):
raise IllegalArgumentError("Name of the part should be provided as a string")
r = self._client._request('PUT', self._client._build_url('part', part_id=self.id),
data=dict(name=name, properties=json.dumps(request_body), **kwargs),
params=dict(select_action=action))
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError('{}: {}'.format(str(r), r.content))
else:
for property_name, property_value in update_dict.items():
self.property(property_name).value = property_value | python | def update(self, name=None, update_dict=None, bulk=True, **kwargs):
"""
Edit part name and property values in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: new part name (defined as a string)
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property ids (uuid)
and values being property values
:type update_dict: dict
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the updated :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises IllegalArgumentError: when the type or value of an argument provided is incorrect
:raises APIError: in case an Error occurs
Example
-------
>>> bike = client.scope('Bike Project').part('Bike')
>>> bike.update(name='Good name', update_dict={'Gears': 11, 'Total Height': 56.3}, bulk=True)
"""
# dict(name=name, properties=json.dumps(update_dict))) with property ids:value
action = 'bulk_update_properties'
request_body = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
request_body[prop_name_or_id] = property_value
else:
request_body[self.property(prop_name_or_id).id] = property_value
if bulk and len(update_dict.keys()) > 1:
if name:
if not isinstance(name, str):
raise IllegalArgumentError("Name of the part should be provided as a string")
r = self._client._request('PUT', self._client._build_url('part', part_id=self.id),
data=dict(name=name, properties=json.dumps(request_body), **kwargs),
params=dict(select_action=action))
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError('{}: {}'.format(str(r), r.content))
else:
for property_name, property_value in update_dict.items():
self.property(property_name).value = property_value | [
"def",
"update",
"(",
"self",
",",
"name",
"=",
"None",
",",
"update_dict",
"=",
"None",
",",
"bulk",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# dict(name=name, properties=json.dumps(update_dict))) with property ids:value",
"action",
"=",
"'bulk_update_properties'",
"request_body",
"=",
"dict",
"(",
")",
"for",
"prop_name_or_id",
",",
"property_value",
"in",
"update_dict",
".",
"items",
"(",
")",
":",
"if",
"is_uuid",
"(",
"prop_name_or_id",
")",
":",
"request_body",
"[",
"prop_name_or_id",
"]",
"=",
"property_value",
"else",
":",
"request_body",
"[",
"self",
".",
"property",
"(",
"prop_name_or_id",
")",
".",
"id",
"]",
"=",
"property_value",
"if",
"bulk",
"and",
"len",
"(",
"update_dict",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"if",
"name",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"str",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"\"Name of the part should be provided as a string\"",
")",
"r",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'PUT'",
",",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'part'",
",",
"part_id",
"=",
"self",
".",
"id",
")",
",",
"data",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"properties",
"=",
"json",
".",
"dumps",
"(",
"request_body",
")",
",",
"*",
"*",
"kwargs",
")",
",",
"params",
"=",
"dict",
"(",
"select_action",
"=",
"action",
")",
")",
"if",
"r",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"'{}: {}'",
".",
"format",
"(",
"str",
"(",
"r",
")",
",",
"r",
".",
"content",
")",
")",
"else",
":",
"for",
"property_name",
",",
"property_value",
"in",
"update_dict",
".",
"items",
"(",
")",
":",
"self",
".",
"property",
"(",
"property_name",
")",
".",
"value",
"=",
"property_value"
] | Edit part name and property values in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: new part name (defined as a string)
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property ids (uuid)
and values being property values
:type update_dict: dict
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the updated :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises IllegalArgumentError: when the type or value of an argument provided is incorrect
:raises APIError: in case an Error occurs
Example
-------
>>> bike = client.scope('Bike Project').part('Bike')
>>> bike.update(name='Good name', update_dict={'Gears': 11, 'Total Height': 56.3}, bulk=True) | [
"Edit",
"part",
"name",
"and",
"property",
"values",
"in",
"one",
"go",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L505-L556 | train |
KE-works/pykechain | pykechain/models/part.py | Part.add_with_properties | def add_with_properties(self, model, name=None, update_dict=None, bulk=True, **kwargs):
"""
Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'})
"""
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
name = name or model.name
action = 'new_instance_with_properties'
properties_update_dict = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
properties_update_dict[prop_name_or_id] = property_value
else:
properties_update_dict[model.property(prop_name_or_id).id] = property_value
if bulk:
r = self._client._request('POST', self._client._build_url('parts'),
data=dict(
name=name,
model=model.id,
parent=self.id,
properties=json.dumps(properties_update_dict),
**kwargs
),
params=dict(select_action=action))
if r.status_code != requests.codes.created: # pragma: no cover
raise APIError('{}: {}'.format(str(r), r.content))
return Part(r.json()['results'][0], client=self._client)
else: # do the old way
new_part = self.add(model, name=name) # type: Part
new_part.update(update_dict=update_dict, bulk=bulk)
return new_part | python | def add_with_properties(self, model, name=None, update_dict=None, bulk=True, **kwargs):
"""
Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'})
"""
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
name = name or model.name
action = 'new_instance_with_properties'
properties_update_dict = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
properties_update_dict[prop_name_or_id] = property_value
else:
properties_update_dict[model.property(prop_name_or_id).id] = property_value
if bulk:
r = self._client._request('POST', self._client._build_url('parts'),
data=dict(
name=name,
model=model.id,
parent=self.id,
properties=json.dumps(properties_update_dict),
**kwargs
),
params=dict(select_action=action))
if r.status_code != requests.codes.created: # pragma: no cover
raise APIError('{}: {}'.format(str(r), r.content))
return Part(r.json()['results'][0], client=self._client)
else: # do the old way
new_part = self.add(model, name=name) # type: Part
new_part.update(update_dict=update_dict, bulk=bulk)
return new_part | [
"def",
"add_with_properties",
"(",
"self",
",",
"model",
",",
"name",
"=",
"None",
",",
"update_dict",
"=",
"None",
",",
"bulk",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"INSTANCE",
":",
"raise",
"APIError",
"(",
"\"Part should be of category INSTANCE\"",
")",
"name",
"=",
"name",
"or",
"model",
".",
"name",
"action",
"=",
"'new_instance_with_properties'",
"properties_update_dict",
"=",
"dict",
"(",
")",
"for",
"prop_name_or_id",
",",
"property_value",
"in",
"update_dict",
".",
"items",
"(",
")",
":",
"if",
"is_uuid",
"(",
"prop_name_or_id",
")",
":",
"properties_update_dict",
"[",
"prop_name_or_id",
"]",
"=",
"property_value",
"else",
":",
"properties_update_dict",
"[",
"model",
".",
"property",
"(",
"prop_name_or_id",
")",
".",
"id",
"]",
"=",
"property_value",
"if",
"bulk",
":",
"r",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'POST'",
",",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'parts'",
")",
",",
"data",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"model",
"=",
"model",
".",
"id",
",",
"parent",
"=",
"self",
".",
"id",
",",
"properties",
"=",
"json",
".",
"dumps",
"(",
"properties_update_dict",
")",
",",
"*",
"*",
"kwargs",
")",
",",
"params",
"=",
"dict",
"(",
"select_action",
"=",
"action",
")",
")",
"if",
"r",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"created",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"'{}: {}'",
".",
"format",
"(",
"str",
"(",
"r",
")",
",",
"r",
".",
"content",
")",
")",
"return",
"Part",
"(",
"r",
".",
"json",
"(",
")",
"[",
"'results'",
"]",
"[",
"0",
"]",
",",
"client",
"=",
"self",
".",
"_client",
")",
"else",
":",
"# do the old way",
"new_part",
"=",
"self",
".",
"add",
"(",
"model",
",",
"name",
"=",
"name",
")",
"# type: Part",
"new_part",
".",
"update",
"(",
"update_dict",
"=",
"update_dict",
",",
"bulk",
"=",
"bulk",
")",
"return",
"new_part"
] | Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'}) | [
"Add",
"a",
"part",
"and",
"update",
"its",
"properties",
"in",
"one",
"go",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L558-L618 | train |
KE-works/pykechain | pykechain/models/part.py | Part.order_properties | def order_properties(self, property_list=None):
"""
Order the properties of a part model using a list of property objects or property names or property id's.
:param property_list: ordered list of property names (basestring) or property id's (uuid)
:type property_list: list(basestring)
:returns: the :class:`Part` with the reordered list of properties
:raises APIError: when an Error occurs
:raises IllegalArgumentError: When provided a wrong argument
Examples
--------
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> front_fork.order_properties(['Material', 'Height (mm)', 'Color'])
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> material = front_fork.property('Material')
>>> height = front_fork.property('Height (mm)')
>>> color = front_fork.property('Color')
>>> front_fork.order_properties([material, height, color])
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
if not isinstance(property_list, list):
raise IllegalArgumentError('Expected a list of strings or Property() objects, got a {} object'.
format(type(property_list)))
order_dict = dict()
for prop in property_list:
if isinstance(prop, (str, text_type)):
order_dict[self.property(name=prop).id] = property_list.index(prop)
else:
order_dict[prop.id] = property_list.index(prop)
r = self._client._request('PUT', self._client._build_url('part', part_id=self.id),
data=dict(
property_order=json.dumps(order_dict)
))
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not reorder properties") | python | def order_properties(self, property_list=None):
"""
Order the properties of a part model using a list of property objects or property names or property id's.
:param property_list: ordered list of property names (basestring) or property id's (uuid)
:type property_list: list(basestring)
:returns: the :class:`Part` with the reordered list of properties
:raises APIError: when an Error occurs
:raises IllegalArgumentError: When provided a wrong argument
Examples
--------
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> front_fork.order_properties(['Material', 'Height (mm)', 'Color'])
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> material = front_fork.property('Material')
>>> height = front_fork.property('Height (mm)')
>>> color = front_fork.property('Color')
>>> front_fork.order_properties([material, height, color])
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
if not isinstance(property_list, list):
raise IllegalArgumentError('Expected a list of strings or Property() objects, got a {} object'.
format(type(property_list)))
order_dict = dict()
for prop in property_list:
if isinstance(prop, (str, text_type)):
order_dict[self.property(name=prop).id] = property_list.index(prop)
else:
order_dict[prop.id] = property_list.index(prop)
r = self._client._request('PUT', self._client._build_url('part', part_id=self.id),
data=dict(
property_order=json.dumps(order_dict)
))
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not reorder properties") | [
"def",
"order_properties",
"(",
"self",
",",
"property_list",
"=",
"None",
")",
":",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"APIError",
"(",
"\"Part should be of category MODEL\"",
")",
"if",
"not",
"isinstance",
"(",
"property_list",
",",
"list",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"'Expected a list of strings or Property() objects, got a {} object'",
".",
"format",
"(",
"type",
"(",
"property_list",
")",
")",
")",
"order_dict",
"=",
"dict",
"(",
")",
"for",
"prop",
"in",
"property_list",
":",
"if",
"isinstance",
"(",
"prop",
",",
"(",
"str",
",",
"text_type",
")",
")",
":",
"order_dict",
"[",
"self",
".",
"property",
"(",
"name",
"=",
"prop",
")",
".",
"id",
"]",
"=",
"property_list",
".",
"index",
"(",
"prop",
")",
"else",
":",
"order_dict",
"[",
"prop",
".",
"id",
"]",
"=",
"property_list",
".",
"index",
"(",
"prop",
")",
"r",
"=",
"self",
".",
"_client",
".",
"_request",
"(",
"'PUT'",
",",
"self",
".",
"_client",
".",
"_build_url",
"(",
"'part'",
",",
"part_id",
"=",
"self",
".",
"id",
")",
",",
"data",
"=",
"dict",
"(",
"property_order",
"=",
"json",
".",
"dumps",
"(",
"order_dict",
")",
")",
")",
"if",
"r",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"ok",
":",
"# pragma: no cover",
"raise",
"APIError",
"(",
"\"Could not reorder properties\"",
")"
] | Order the properties of a part model using a list of property objects or property names or property id's.
:param property_list: ordered list of property names (basestring) or property id's (uuid)
:type property_list: list(basestring)
:returns: the :class:`Part` with the reordered list of properties
:raises APIError: when an Error occurs
:raises IllegalArgumentError: When provided a wrong argument
Examples
--------
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> front_fork.order_properties(['Material', 'Height (mm)', 'Color'])
>>> front_fork = client.scope('Bike Project').model('Front Fork')
>>> material = front_fork.property('Material')
>>> height = front_fork.property('Height (mm)')
>>> color = front_fork.property('Color')
>>> front_fork.order_properties([material, height, color]) | [
"Order",
"the",
"properties",
"of",
"a",
"part",
"model",
"using",
"a",
"list",
"of",
"property",
"objects",
"or",
"property",
"names",
"or",
"property",
"id",
"s",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L644-L685 | train |
KE-works/pykechain | pykechain/models/part.py | Part.clone | def clone(self, **kwargs):
"""
Clone a part.
.. versionadded:: 2.3
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: cloned :class:`models.Part`
:raises APIError: if the `Part` could not be cloned
Example
-------
>>> bike = client.model('Bike')
>>> bike2 = bike.clone()
"""
parent = self.parent()
return self._client._create_clone(parent, self, **kwargs) | python | def clone(self, **kwargs):
"""
Clone a part.
.. versionadded:: 2.3
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: cloned :class:`models.Part`
:raises APIError: if the `Part` could not be cloned
Example
-------
>>> bike = client.model('Bike')
>>> bike2 = bike.clone()
"""
parent = self.parent()
return self._client._create_clone(parent, self, **kwargs) | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"parent",
"=",
"self",
".",
"parent",
"(",
")",
"return",
"self",
".",
"_client",
".",
"_create_clone",
"(",
"parent",
",",
"self",
",",
"*",
"*",
"kwargs",
")"
] | Clone a part.
.. versionadded:: 2.3
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: cloned :class:`models.Part`
:raises APIError: if the `Part` could not be cloned
Example
-------
>>> bike = client.model('Bike')
>>> bike2 = bike.clone() | [
"Clone",
"a",
"part",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L719-L737 | train |
KE-works/pykechain | pykechain/models/part.py | Part.copy | def copy(self, target_parent, name=None, include_children=True, include_instances=True):
"""
Copy the `Part` to target parent, both of them having the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is copied
:type target_parent: :class:`Part`
:param name: how the copied top-level `Part` should be called
:type name: basestring
:param include_children: True to copy also the descendants of `Part`.
:type include_children: bool
:param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: copied :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if part and target_parent are identical
Example
-------
>>> model_to_copy = client.model(name='Model to be copied')
>>> bike = client.model('Bike')
>>> model_to_copy.copy(target_parent=bike, name='Copied model',
>>> include_children=True,
>>> include_instances=True)
"""
if self.category == Category.MODEL and target_parent.category == Category.MODEL:
# Cannot add a model under an instance or vice versa
copied_model = relocate_model(part=self, target_parent=target_parent, name=name,
include_children=include_children)
if include_instances:
instances_to_be_copied = list(self.instances())
parent_instances = list(target_parent.instances())
for parent_instance in parent_instances:
for instance in instances_to_be_copied:
instance.populate_descendants()
move_part_instance(part_instance=instance, target_parent=parent_instance,
part_model=self, name=instance.name, include_children=include_children)
return copied_model
elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE:
copied_instance = relocate_instance(part=self, target_parent=target_parent, name=name,
include_children=include_children)
return copied_instance
else:
raise IllegalArgumentError('part "{}" and target parent "{}" must have the same category') | python | def copy(self, target_parent, name=None, include_children=True, include_instances=True):
"""
Copy the `Part` to target parent, both of them having the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is copied
:type target_parent: :class:`Part`
:param name: how the copied top-level `Part` should be called
:type name: basestring
:param include_children: True to copy also the descendants of `Part`.
:type include_children: bool
:param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: copied :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if part and target_parent are identical
Example
-------
>>> model_to_copy = client.model(name='Model to be copied')
>>> bike = client.model('Bike')
>>> model_to_copy.copy(target_parent=bike, name='Copied model',
>>> include_children=True,
>>> include_instances=True)
"""
if self.category == Category.MODEL and target_parent.category == Category.MODEL:
# Cannot add a model under an instance or vice versa
copied_model = relocate_model(part=self, target_parent=target_parent, name=name,
include_children=include_children)
if include_instances:
instances_to_be_copied = list(self.instances())
parent_instances = list(target_parent.instances())
for parent_instance in parent_instances:
for instance in instances_to_be_copied:
instance.populate_descendants()
move_part_instance(part_instance=instance, target_parent=parent_instance,
part_model=self, name=instance.name, include_children=include_children)
return copied_model
elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE:
copied_instance = relocate_instance(part=self, target_parent=target_parent, name=name,
include_children=include_children)
return copied_instance
else:
raise IllegalArgumentError('part "{}" and target parent "{}" must have the same category') | [
"def",
"copy",
"(",
"self",
",",
"target_parent",
",",
"name",
"=",
"None",
",",
"include_children",
"=",
"True",
",",
"include_instances",
"=",
"True",
")",
":",
"if",
"self",
".",
"category",
"==",
"Category",
".",
"MODEL",
"and",
"target_parent",
".",
"category",
"==",
"Category",
".",
"MODEL",
":",
"# Cannot add a model under an instance or vice versa",
"copied_model",
"=",
"relocate_model",
"(",
"part",
"=",
"self",
",",
"target_parent",
"=",
"target_parent",
",",
"name",
"=",
"name",
",",
"include_children",
"=",
"include_children",
")",
"if",
"include_instances",
":",
"instances_to_be_copied",
"=",
"list",
"(",
"self",
".",
"instances",
"(",
")",
")",
"parent_instances",
"=",
"list",
"(",
"target_parent",
".",
"instances",
"(",
")",
")",
"for",
"parent_instance",
"in",
"parent_instances",
":",
"for",
"instance",
"in",
"instances_to_be_copied",
":",
"instance",
".",
"populate_descendants",
"(",
")",
"move_part_instance",
"(",
"part_instance",
"=",
"instance",
",",
"target_parent",
"=",
"parent_instance",
",",
"part_model",
"=",
"self",
",",
"name",
"=",
"instance",
".",
"name",
",",
"include_children",
"=",
"include_children",
")",
"return",
"copied_model",
"elif",
"self",
".",
"category",
"==",
"Category",
".",
"INSTANCE",
"and",
"target_parent",
".",
"category",
"==",
"Category",
".",
"INSTANCE",
":",
"copied_instance",
"=",
"relocate_instance",
"(",
"part",
"=",
"self",
",",
"target_parent",
"=",
"target_parent",
",",
"name",
"=",
"name",
",",
"include_children",
"=",
"include_children",
")",
"return",
"copied_instance",
"else",
":",
"raise",
"IllegalArgumentError",
"(",
"'part \"{}\" and target parent \"{}\" must have the same category'",
")"
] | Copy the `Part` to target parent, both of them having the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is copied
:type target_parent: :class:`Part`
:param name: how the copied top-level `Part` should be called
:type name: basestring
:param include_children: True to copy also the descendants of `Part`.
:type include_children: bool
:param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: copied :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if part and target_parent are identical
Example
-------
>>> model_to_copy = client.model(name='Model to be copied')
>>> bike = client.model('Bike')
>>> model_to_copy.copy(target_parent=bike, name='Copied model',
>>> include_children=True,
>>> include_instances=True) | [
"Copy",
"the",
"Part",
"to",
"target",
"parent",
"both",
"of",
"them",
"having",
"the",
"same",
"category",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L739-L785 | train |
KE-works/pykechain | pykechain/models/part.py | Part.move | def move(self, target_parent, name=None, include_children=True, include_instances=True):
"""
Move the `Part` to target parent, both of them the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is moved
:type target_parent: :class:`Part`
:param name: how the moved top-level `Part` should be called
:type name: basestring
:param include_children: True to move also the descendants of `Part`. If False, the children will be lost.
:type include_children: bool
:param include_instances: True to move also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: moved :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if target_parent is descendant of part
Example
-------
>>> model_to_move = client.model(name='Model to be moved')
>>> bike = client.model('Bike')
>>> model_to_move.move(target_parent=bike, name='Moved model',
>>> include_children=True,
>>> include_instances=True)
"""
if not name:
name = self.name
if self.category == Category.MODEL and target_parent.category == Category.MODEL:
moved_model = relocate_model(part=self, target_parent=target_parent, name=name,
include_children=include_children)
if include_instances:
retrieve_instances_to_copied = list(self.instances())
retrieve_parent_instances = list(target_parent.instances())
for parent_instance in retrieve_parent_instances:
for instance in retrieve_instances_to_copied:
instance.populate_descendants()
move_part_instance(part_instance=instance, target_parent=parent_instance,
part_model=self, name=instance.name, include_children=include_children)
self.delete()
return moved_model
elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE:
moved_instance = relocate_instance(part=self, target_parent=target_parent, name=name,
include_children=include_children)
try:
self.delete()
except APIError:
model_of_instance = self.model()
model_of_instance.delete()
return moved_instance
else:
raise IllegalArgumentError('part "{}" and target parent "{}" must have the same category') | python | def move(self, target_parent, name=None, include_children=True, include_instances=True):
"""
Move the `Part` to target parent, both of them the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is moved
:type target_parent: :class:`Part`
:param name: how the moved top-level `Part` should be called
:type name: basestring
:param include_children: True to move also the descendants of `Part`. If False, the children will be lost.
:type include_children: bool
:param include_instances: True to move also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: moved :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if target_parent is descendant of part
Example
-------
>>> model_to_move = client.model(name='Model to be moved')
>>> bike = client.model('Bike')
>>> model_to_move.move(target_parent=bike, name='Moved model',
>>> include_children=True,
>>> include_instances=True)
"""
if not name:
name = self.name
if self.category == Category.MODEL and target_parent.category == Category.MODEL:
moved_model = relocate_model(part=self, target_parent=target_parent, name=name,
include_children=include_children)
if include_instances:
retrieve_instances_to_copied = list(self.instances())
retrieve_parent_instances = list(target_parent.instances())
for parent_instance in retrieve_parent_instances:
for instance in retrieve_instances_to_copied:
instance.populate_descendants()
move_part_instance(part_instance=instance, target_parent=parent_instance,
part_model=self, name=instance.name, include_children=include_children)
self.delete()
return moved_model
elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE:
moved_instance = relocate_instance(part=self, target_parent=target_parent, name=name,
include_children=include_children)
try:
self.delete()
except APIError:
model_of_instance = self.model()
model_of_instance.delete()
return moved_instance
else:
raise IllegalArgumentError('part "{}" and target parent "{}" must have the same category') | [
"def",
"move",
"(",
"self",
",",
"target_parent",
",",
"name",
"=",
"None",
",",
"include_children",
"=",
"True",
",",
"include_instances",
"=",
"True",
")",
":",
"if",
"not",
"name",
":",
"name",
"=",
"self",
".",
"name",
"if",
"self",
".",
"category",
"==",
"Category",
".",
"MODEL",
"and",
"target_parent",
".",
"category",
"==",
"Category",
".",
"MODEL",
":",
"moved_model",
"=",
"relocate_model",
"(",
"part",
"=",
"self",
",",
"target_parent",
"=",
"target_parent",
",",
"name",
"=",
"name",
",",
"include_children",
"=",
"include_children",
")",
"if",
"include_instances",
":",
"retrieve_instances_to_copied",
"=",
"list",
"(",
"self",
".",
"instances",
"(",
")",
")",
"retrieve_parent_instances",
"=",
"list",
"(",
"target_parent",
".",
"instances",
"(",
")",
")",
"for",
"parent_instance",
"in",
"retrieve_parent_instances",
":",
"for",
"instance",
"in",
"retrieve_instances_to_copied",
":",
"instance",
".",
"populate_descendants",
"(",
")",
"move_part_instance",
"(",
"part_instance",
"=",
"instance",
",",
"target_parent",
"=",
"parent_instance",
",",
"part_model",
"=",
"self",
",",
"name",
"=",
"instance",
".",
"name",
",",
"include_children",
"=",
"include_children",
")",
"self",
".",
"delete",
"(",
")",
"return",
"moved_model",
"elif",
"self",
".",
"category",
"==",
"Category",
".",
"INSTANCE",
"and",
"target_parent",
".",
"category",
"==",
"Category",
".",
"INSTANCE",
":",
"moved_instance",
"=",
"relocate_instance",
"(",
"part",
"=",
"self",
",",
"target_parent",
"=",
"target_parent",
",",
"name",
"=",
"name",
",",
"include_children",
"=",
"include_children",
")",
"try",
":",
"self",
".",
"delete",
"(",
")",
"except",
"APIError",
":",
"model_of_instance",
"=",
"self",
".",
"model",
"(",
")",
"model_of_instance",
".",
"delete",
"(",
")",
"return",
"moved_instance",
"else",
":",
"raise",
"IllegalArgumentError",
"(",
"'part \"{}\" and target parent \"{}\" must have the same category'",
")"
] | Move the `Part` to target parent, both of them the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is moved
:type target_parent: :class:`Part`
:param name: how the moved top-level `Part` should be called
:type name: basestring
:param include_children: True to move also the descendants of `Part`. If False, the children will be lost.
:type include_children: bool
:param include_instances: True to move also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: moved :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if target_parent is descendant of part
Example
-------
>>> model_to_move = client.model(name='Model to be moved')
>>> bike = client.model('Bike')
>>> model_to_move.move(target_parent=bike, name='Moved model',
>>> include_children=True,
>>> include_instances=True) | [
"Move",
"the",
"Part",
"to",
"target",
"parent",
"both",
"of",
"them",
"the",
"same",
"category",
"."
] | b0296cf34328fd41660bf6f0b9114fd0167c40c4 | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L787-L839 | train |
biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/biosignalsnotebooks/old/_factory.py | _generate_notebook_by_difficulty_body | def _generate_notebook_by_difficulty_body(notebook_object, dict_by_difficulty):
"""
Internal function that is used for generation of the page where notebooks are organized by
difficulty level.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the body will be created.
dict_by_difficulty : dict
Global Dictionary that groups Notebooks names/files by difficulty level.
"""
difficulty_keys = list(dict_by_difficulty.keys())
difficulty_keys.sort()
for difficulty in difficulty_keys:
markdown_cell = STAR_TABLE_HEADER
markdown_cell = _set_star_value(markdown_cell, int(difficulty))
for notebook_file in dict_by_difficulty[str(difficulty)]:
split_path = notebook_file.split("/")
notebook_type = split_path[-2]
notebook_name = split_path[-1].split("&")[0]
notebook_title = split_path[-1].split("&")[1]
markdown_cell += "\n\t<tr>\n\t\t<td width='20%' class='header_image_color_" + \
str(NOTEBOOK_KEYS[notebook_type]) + "'><img " \
"src='../../images/icons/" + notebook_type.title() +\
".png' width='15%'>\n\t\t</td>"
markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + \
notebook_title + "\n\t\t</td>"
markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" \
"../" + notebook_type.title() + "/" + notebook_name + \
"'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>"
markdown_cell += "</table>"
# ==================== Insertion of HTML table in a new Notebook cell ======================
notebook_object["cells"].append(nb.v4.new_markdown_cell(markdown_cell)) | python | def _generate_notebook_by_difficulty_body(notebook_object, dict_by_difficulty):
"""
Internal function that is used for generation of the page where notebooks are organized by
difficulty level.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the body will be created.
dict_by_difficulty : dict
Global Dictionary that groups Notebooks names/files by difficulty level.
"""
difficulty_keys = list(dict_by_difficulty.keys())
difficulty_keys.sort()
for difficulty in difficulty_keys:
markdown_cell = STAR_TABLE_HEADER
markdown_cell = _set_star_value(markdown_cell, int(difficulty))
for notebook_file in dict_by_difficulty[str(difficulty)]:
split_path = notebook_file.split("/")
notebook_type = split_path[-2]
notebook_name = split_path[-1].split("&")[0]
notebook_title = split_path[-1].split("&")[1]
markdown_cell += "\n\t<tr>\n\t\t<td width='20%' class='header_image_color_" + \
str(NOTEBOOK_KEYS[notebook_type]) + "'><img " \
"src='../../images/icons/" + notebook_type.title() +\
".png' width='15%'>\n\t\t</td>"
markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + \
notebook_title + "\n\t\t</td>"
markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" \
"../" + notebook_type.title() + "/" + notebook_name + \
"'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>"
markdown_cell += "</table>"
# ==================== Insertion of HTML table in a new Notebook cell ======================
notebook_object["cells"].append(nb.v4.new_markdown_cell(markdown_cell)) | [
"def",
"_generate_notebook_by_difficulty_body",
"(",
"notebook_object",
",",
"dict_by_difficulty",
")",
":",
"difficulty_keys",
"=",
"list",
"(",
"dict_by_difficulty",
".",
"keys",
"(",
")",
")",
"difficulty_keys",
".",
"sort",
"(",
")",
"for",
"difficulty",
"in",
"difficulty_keys",
":",
"markdown_cell",
"=",
"STAR_TABLE_HEADER",
"markdown_cell",
"=",
"_set_star_value",
"(",
"markdown_cell",
",",
"int",
"(",
"difficulty",
")",
")",
"for",
"notebook_file",
"in",
"dict_by_difficulty",
"[",
"str",
"(",
"difficulty",
")",
"]",
":",
"split_path",
"=",
"notebook_file",
".",
"split",
"(",
"\"/\"",
")",
"notebook_type",
"=",
"split_path",
"[",
"-",
"2",
"]",
"notebook_name",
"=",
"split_path",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"&\"",
")",
"[",
"0",
"]",
"notebook_title",
"=",
"split_path",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"&\"",
")",
"[",
"1",
"]",
"markdown_cell",
"+=",
"\"\\n\\t<tr>\\n\\t\\t<td width='20%' class='header_image_color_\"",
"+",
"str",
"(",
"NOTEBOOK_KEYS",
"[",
"notebook_type",
"]",
")",
"+",
"\"'><img \"",
"\"src='../../images/icons/\"",
"+",
"notebook_type",
".",
"title",
"(",
")",
"+",
"\".png' width='15%'>\\n\\t\\t</td>\"",
"markdown_cell",
"+=",
"\"\\n\\t\\t<td width='60%' class='center_cell open_cell_light'>\"",
"+",
"notebook_title",
"+",
"\"\\n\\t\\t</td>\"",
"markdown_cell",
"+=",
"\"\\n\\t\\t<td width='20%' class='center_cell'>\\n\\t\\t\\t<a href='\"",
"\"../\"",
"+",
"notebook_type",
".",
"title",
"(",
")",
"+",
"\"/\"",
"+",
"notebook_name",
"+",
"\"'><div class='file_icon'></div></a>\\n\\t\\t</td>\\n\\t</tr>\"",
"markdown_cell",
"+=",
"\"</table>\"",
"# ==================== Insertion of HTML table in a new Notebook cell ======================",
"notebook_object",
"[",
"\"cells\"",
"]",
".",
"append",
"(",
"nb",
".",
"v4",
".",
"new_markdown_cell",
"(",
"markdown_cell",
")",
")"
] | Internal function that is used for generation of the page where notebooks are organized by
difficulty level.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the body will be created.
dict_by_difficulty : dict
Global Dictionary that groups Notebooks names/files by difficulty level. | [
"Internal",
"function",
"that",
"is",
"used",
"for",
"generation",
"of",
"the",
"page",
"where",
"notebooks",
"are",
"organized",
"by",
"difficulty",
"level",
"."
] | aaa01d4125180b3a34f1e26e0d3ff08c23f666d3 | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/old/_factory.py#L442-L481 | train |
biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/biosignalsnotebooks/old/_factory.py | _generate_dir_structure | def _generate_dir_structure(path):
"""
Internal function intended to generate the biosignalsnotebooks directories in order to the user
can visualise and execute the Notebook created with "notebook" class in Jupyter.
----------
Parameters
----------
path : str
Path where the biosignalsnotebooks environment (files and folders) will be stored.
Returns
-------
out : str
Path of the directory that contains the folders (one folder per category) where the
Notebooks are stored.
"""
# ============================ Creation of the main directory ==================================
current_dir = (path + "\\opensignalsfactory_environment").replace("\\", "/")
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
# ================== Copy of 'images' 'styles' and 'signal_samples' folders ====================
path_cloned_files = (os.path.abspath(__file__).split(os.path.basename(__file__))[0] + \
"\\notebook_files\\osf_files\\").replace("\\", "/")
for var in ["images", "styles", "signal_samples"]:
if os.path.isdir((current_dir + "\\" + var).replace("\\", "/")):
shutil.rmtree((current_dir + "\\" + var).replace("\\", "/"))
src = (path_cloned_files + "\\" + var).replace("\\", "/")
destination = (current_dir + "\\" + var).replace("\\", "/")
shutil.copytree(src, destination)
# =========================== Generation of 'Categories' folder ================================
current_dir += "/Categories"
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
categories = list(NOTEBOOK_KEYS.keys())
for category in categories:
if not os.path.isdir(current_dir + "/" + category):
os.makedirs(current_dir + "/" + category)
return current_dir | python | def _generate_dir_structure(path):
"""
Internal function intended to generate the biosignalsnotebooks directories in order to the user
can visualise and execute the Notebook created with "notebook" class in Jupyter.
----------
Parameters
----------
path : str
Path where the biosignalsnotebooks environment (files and folders) will be stored.
Returns
-------
out : str
Path of the directory that contains the folders (one folder per category) where the
Notebooks are stored.
"""
# ============================ Creation of the main directory ==================================
current_dir = (path + "\\opensignalsfactory_environment").replace("\\", "/")
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
# ================== Copy of 'images' 'styles' and 'signal_samples' folders ====================
path_cloned_files = (os.path.abspath(__file__).split(os.path.basename(__file__))[0] + \
"\\notebook_files\\osf_files\\").replace("\\", "/")
for var in ["images", "styles", "signal_samples"]:
if os.path.isdir((current_dir + "\\" + var).replace("\\", "/")):
shutil.rmtree((current_dir + "\\" + var).replace("\\", "/"))
src = (path_cloned_files + "\\" + var).replace("\\", "/")
destination = (current_dir + "\\" + var).replace("\\", "/")
shutil.copytree(src, destination)
# =========================== Generation of 'Categories' folder ================================
current_dir += "/Categories"
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
categories = list(NOTEBOOK_KEYS.keys())
for category in categories:
if not os.path.isdir(current_dir + "/" + category):
os.makedirs(current_dir + "/" + category)
return current_dir | [
"def",
"_generate_dir_structure",
"(",
"path",
")",
":",
"# ============================ Creation of the main directory ==================================",
"current_dir",
"=",
"(",
"path",
"+",
"\"\\\\opensignalsfactory_environment\"",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"current_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"current_dir",
")",
"# ================== Copy of 'images' 'styles' and 'signal_samples' folders ====================",
"path_cloned_files",
"=",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"__file__",
")",
")",
"[",
"0",
"]",
"+",
"\"\\\\notebook_files\\\\osf_files\\\\\"",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"for",
"var",
"in",
"[",
"\"images\"",
",",
"\"styles\"",
",",
"\"signal_samples\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"(",
"current_dir",
"+",
"\"\\\\\"",
"+",
"var",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
")",
":",
"shutil",
".",
"rmtree",
"(",
"(",
"current_dir",
"+",
"\"\\\\\"",
"+",
"var",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
")",
"src",
"=",
"(",
"path_cloned_files",
"+",
"\"\\\\\"",
"+",
"var",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"destination",
"=",
"(",
"current_dir",
"+",
"\"\\\\\"",
"+",
"var",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"shutil",
".",
"copytree",
"(",
"src",
",",
"destination",
")",
"# =========================== Generation of 'Categories' folder ================================",
"current_dir",
"+=",
"\"/Categories\"",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"current_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"current_dir",
")",
"categories",
"=",
"list",
"(",
"NOTEBOOK_KEYS",
".",
"keys",
"(",
")",
")",
"for",
"category",
"in",
"categories",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"current_dir",
"+",
"\"/\"",
"+",
"category",
")",
":",
"os",
".",
"makedirs",
"(",
"current_dir",
"+",
"\"/\"",
"+",
"category",
")",
"return",
"current_dir"
] | Internal function intended to generate the biosignalsnotebooks directories in order to the user
can visualise and execute the Notebook created with "notebook" class in Jupyter.
----------
Parameters
----------
path : str
Path where the biosignalsnotebooks environment (files and folders) will be stored.
Returns
-------
out : str
Path of the directory that contains the folders (one folder per category) where the
Notebooks are stored. | [
"Internal",
"function",
"intended",
"to",
"generate",
"the",
"biosignalsnotebooks",
"directories",
"in",
"order",
"to",
"the",
"user",
"can",
"visualise",
"and",
"execute",
"the",
"Notebook",
"created",
"with",
"notebook",
"class",
"in",
"Jupyter",
"."
] | aaa01d4125180b3a34f1e26e0d3ff08c23f666d3 | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/old/_factory.py#L669-L713 | train |
sontek/bulby | bulby/color.py | in_lamp_reach | def in_lamp_reach(p):
''' Check if the provided XYPoint can be recreated by a Hue lamp. '''
v1 = XYPoint(Lime.x - Red.x, Lime.y - Red.y)
v2 = XYPoint(Blue.x - Red.x, Blue.y - Red.y)
q = XYPoint(p.x - Red.x, p.y - Red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0) | python | def in_lamp_reach(p):
''' Check if the provided XYPoint can be recreated by a Hue lamp. '''
v1 = XYPoint(Lime.x - Red.x, Lime.y - Red.y)
v2 = XYPoint(Blue.x - Red.x, Blue.y - Red.y)
q = XYPoint(p.x - Red.x, p.y - Red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0) | [
"def",
"in_lamp_reach",
"(",
"p",
")",
":",
"v1",
"=",
"XYPoint",
"(",
"Lime",
".",
"x",
"-",
"Red",
".",
"x",
",",
"Lime",
".",
"y",
"-",
"Red",
".",
"y",
")",
"v2",
"=",
"XYPoint",
"(",
"Blue",
".",
"x",
"-",
"Red",
".",
"x",
",",
"Blue",
".",
"y",
"-",
"Red",
".",
"y",
")",
"q",
"=",
"XYPoint",
"(",
"p",
".",
"x",
"-",
"Red",
".",
"x",
",",
"p",
".",
"y",
"-",
"Red",
".",
"y",
")",
"s",
"=",
"cross_product",
"(",
"q",
",",
"v2",
")",
"/",
"cross_product",
"(",
"v1",
",",
"v2",
")",
"t",
"=",
"cross_product",
"(",
"v1",
",",
"q",
")",
"/",
"cross_product",
"(",
"v1",
",",
"v2",
")",
"return",
"(",
"s",
">=",
"0.0",
")",
"and",
"(",
"t",
">=",
"0.0",
")",
"and",
"(",
"s",
"+",
"t",
"<=",
"1.0",
")"
] | Check if the provided XYPoint can be recreated by a Hue lamp. | [
"Check",
"if",
"the",
"provided",
"XYPoint",
"can",
"be",
"recreated",
"by",
"a",
"Hue",
"lamp",
"."
] | a2e741f843ee8e361b50a6079601108bfbe52526 | https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L18-L27 | train |
sontek/bulby | bulby/color.py | get_closest_point_to_line | def get_closest_point_to_line(A, B, P):
'''
Find the closest point on a line. This point will be reproducible by a Hue
lamp.
'''
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t) | python | def get_closest_point_to_line(A, B, P):
'''
Find the closest point on a line. This point will be reproducible by a Hue
lamp.
'''
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t) | [
"def",
"get_closest_point_to_line",
"(",
"A",
",",
"B",
",",
"P",
")",
":",
"AP",
"=",
"XYPoint",
"(",
"P",
".",
"x",
"-",
"A",
".",
"x",
",",
"P",
".",
"y",
"-",
"A",
".",
"y",
")",
"AB",
"=",
"XYPoint",
"(",
"B",
".",
"x",
"-",
"A",
".",
"x",
",",
"B",
".",
"y",
"-",
"A",
".",
"y",
")",
"ab2",
"=",
"AB",
".",
"x",
"*",
"AB",
".",
"x",
"+",
"AB",
".",
"y",
"*",
"AB",
".",
"y",
"ap_ab",
"=",
"AP",
".",
"x",
"*",
"AB",
".",
"x",
"+",
"AP",
".",
"y",
"*",
"AB",
".",
"y",
"t",
"=",
"ap_ab",
"/",
"ab2",
"if",
"t",
"<",
"0.0",
":",
"t",
"=",
"0.0",
"elif",
"t",
">",
"1.0",
":",
"t",
"=",
"1.0",
"return",
"XYPoint",
"(",
"A",
".",
"x",
"+",
"AB",
".",
"x",
"*",
"t",
",",
"A",
".",
"y",
"+",
"AB",
".",
"y",
"*",
"t",
")"
] | Find the closest point on a line. This point will be reproducible by a Hue
lamp. | [
"Find",
"the",
"closest",
"point",
"on",
"a",
"line",
".",
"This",
"point",
"will",
"be",
"reproducible",
"by",
"a",
"Hue",
"lamp",
"."
] | a2e741f843ee8e361b50a6079601108bfbe52526 | https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L30-L46 | train |
sontek/bulby | bulby/color.py | get_closest_point_to_point | def get_closest_point_to_point(xy_point):
'''
Used to find the closest point to an unreproducible Color is unreproducible
on each line in the CIE 1931 'triangle'.
'''
pAB = get_closest_point_to_line(Red, Lime, xy_point)
pAC = get_closest_point_to_line(Blue, Red, xy_point)
pBC = get_closest_point_to_line(Lime, Blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if (dAC < lowest):
lowest = dAC
closest_point = pAC
if (dBC < lowest):
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return XYPoint(cx, cy) | python | def get_closest_point_to_point(xy_point):
'''
Used to find the closest point to an unreproducible Color is unreproducible
on each line in the CIE 1931 'triangle'.
'''
pAB = get_closest_point_to_line(Red, Lime, xy_point)
pAC = get_closest_point_to_line(Blue, Red, xy_point)
pBC = get_closest_point_to_line(Lime, Blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if (dAC < lowest):
lowest = dAC
closest_point = pAC
if (dBC < lowest):
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return XYPoint(cx, cy) | [
"def",
"get_closest_point_to_point",
"(",
"xy_point",
")",
":",
"pAB",
"=",
"get_closest_point_to_line",
"(",
"Red",
",",
"Lime",
",",
"xy_point",
")",
"pAC",
"=",
"get_closest_point_to_line",
"(",
"Blue",
",",
"Red",
",",
"xy_point",
")",
"pBC",
"=",
"get_closest_point_to_line",
"(",
"Lime",
",",
"Blue",
",",
"xy_point",
")",
"# Get the distances per point and see which point is closer to our Point.",
"dAB",
"=",
"get_distance_between_two_points",
"(",
"xy_point",
",",
"pAB",
")",
"dAC",
"=",
"get_distance_between_two_points",
"(",
"xy_point",
",",
"pAC",
")",
"dBC",
"=",
"get_distance_between_two_points",
"(",
"xy_point",
",",
"pBC",
")",
"lowest",
"=",
"dAB",
"closest_point",
"=",
"pAB",
"if",
"(",
"dAC",
"<",
"lowest",
")",
":",
"lowest",
"=",
"dAC",
"closest_point",
"=",
"pAC",
"if",
"(",
"dBC",
"<",
"lowest",
")",
":",
"lowest",
"=",
"dBC",
"closest_point",
"=",
"pBC",
"# Change the xy value to a value which is within the reach of the lamp.",
"cx",
"=",
"closest_point",
".",
"x",
"cy",
"=",
"closest_point",
".",
"y",
"return",
"XYPoint",
"(",
"cx",
",",
"cy",
")"
] | Used to find the closest point to an unreproducible Color is unreproducible
on each line in the CIE 1931 'triangle'. | [
"Used",
"to",
"find",
"the",
"closest",
"point",
"to",
"an",
"unreproducible",
"Color",
"is",
"unreproducible",
"on",
"each",
"line",
"in",
"the",
"CIE",
"1931",
"triangle",
"."
] | a2e741f843ee8e361b50a6079601108bfbe52526 | https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L58-L87 | train |
sontek/bulby | bulby/color.py | get_xy_from_hex | def get_xy_from_hex(hex_value):
'''
Returns X, Y coordinates containing the closest avilable CIE 1931
based on the hex_value provided.
'''
red, green, blue = struct.unpack('BBB', codecs.decode(hex_value, 'hex'))
r = ((red + 0.055) / (1.0 + 0.055)) ** 2.4 if (red > 0.04045) else (red / 12.92) # pragma: noqa
g = ((green + 0.055) / (1.0 + 0.055)) ** 2.4 if (green > 0.04045) else (green / 12.92) # pragma: noqa
b = ((blue + 0.055) / (1.0 + 0.055)) ** 2.4 if (blue > 0.04045) else (blue / 12.92) # pragma: noqa
X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804
Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169
Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733
if X + Y + Z == 0:
cx = cy = 0
else:
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
# Check if the given XY value is within the colourreach of our lamps.
xy_point = XYPoint(cx, cy)
is_in_reach = in_lamp_reach(xy_point)
if not is_in_reach:
xy_point = get_closest_point_to_point(xy_point)
return xy_point | python | def get_xy_from_hex(hex_value):
'''
Returns X, Y coordinates containing the closest avilable CIE 1931
based on the hex_value provided.
'''
red, green, blue = struct.unpack('BBB', codecs.decode(hex_value, 'hex'))
r = ((red + 0.055) / (1.0 + 0.055)) ** 2.4 if (red > 0.04045) else (red / 12.92) # pragma: noqa
g = ((green + 0.055) / (1.0 + 0.055)) ** 2.4 if (green > 0.04045) else (green / 12.92) # pragma: noqa
b = ((blue + 0.055) / (1.0 + 0.055)) ** 2.4 if (blue > 0.04045) else (blue / 12.92) # pragma: noqa
X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804
Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169
Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733
if X + Y + Z == 0:
cx = cy = 0
else:
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
# Check if the given XY value is within the colourreach of our lamps.
xy_point = XYPoint(cx, cy)
is_in_reach = in_lamp_reach(xy_point)
if not is_in_reach:
xy_point = get_closest_point_to_point(xy_point)
return xy_point | [
"def",
"get_xy_from_hex",
"(",
"hex_value",
")",
":",
"red",
",",
"green",
",",
"blue",
"=",
"struct",
".",
"unpack",
"(",
"'BBB'",
",",
"codecs",
".",
"decode",
"(",
"hex_value",
",",
"'hex'",
")",
")",
"r",
"=",
"(",
"(",
"red",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"red",
">",
"0.04045",
")",
"else",
"(",
"red",
"/",
"12.92",
")",
"# pragma: noqa",
"g",
"=",
"(",
"(",
"green",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"green",
">",
"0.04045",
")",
"else",
"(",
"green",
"/",
"12.92",
")",
"# pragma: noqa",
"b",
"=",
"(",
"(",
"blue",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"blue",
">",
"0.04045",
")",
"else",
"(",
"blue",
"/",
"12.92",
")",
"# pragma: noqa",
"X",
"=",
"r",
"*",
"0.4360747",
"+",
"g",
"*",
"0.3850649",
"+",
"b",
"*",
"0.0930804",
"Y",
"=",
"r",
"*",
"0.2225045",
"+",
"g",
"*",
"0.7168786",
"+",
"b",
"*",
"0.0406169",
"Z",
"=",
"r",
"*",
"0.0139322",
"+",
"g",
"*",
"0.0971045",
"+",
"b",
"*",
"0.7141733",
"if",
"X",
"+",
"Y",
"+",
"Z",
"==",
"0",
":",
"cx",
"=",
"cy",
"=",
"0",
"else",
":",
"cx",
"=",
"X",
"/",
"(",
"X",
"+",
"Y",
"+",
"Z",
")",
"cy",
"=",
"Y",
"/",
"(",
"X",
"+",
"Y",
"+",
"Z",
")",
"# Check if the given XY value is within the colourreach of our lamps.",
"xy_point",
"=",
"XYPoint",
"(",
"cx",
",",
"cy",
")",
"is_in_reach",
"=",
"in_lamp_reach",
"(",
"xy_point",
")",
"if",
"not",
"is_in_reach",
":",
"xy_point",
"=",
"get_closest_point_to_point",
"(",
"xy_point",
")",
"return",
"xy_point"
] | Returns X, Y coordinates containing the closest avilable CIE 1931
based on the hex_value provided. | [
"Returns",
"X",
"Y",
"coordinates",
"containing",
"the",
"closest",
"avilable",
"CIE",
"1931",
"based",
"on",
"the",
"hex_value",
"provided",
"."
] | a2e741f843ee8e361b50a6079601108bfbe52526 | https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L90-L117 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.get_other_keys | def get_other_keys(self, key, including_current=False):
""" Returns list of other keys that are mapped to the same value as specified key.
@param key - key for which other keys should be returned.
@param including_current if set to True - key will also appear on this list."""
other_keys = []
if key in self:
other_keys.extend(self.__dict__[str(type(key))][key])
if not including_current:
other_keys.remove(key)
return other_keys | python | def get_other_keys(self, key, including_current=False):
""" Returns list of other keys that are mapped to the same value as specified key.
@param key - key for which other keys should be returned.
@param including_current if set to True - key will also appear on this list."""
other_keys = []
if key in self:
other_keys.extend(self.__dict__[str(type(key))][key])
if not including_current:
other_keys.remove(key)
return other_keys | [
"def",
"get_other_keys",
"(",
"self",
",",
"key",
",",
"including_current",
"=",
"False",
")",
":",
"other_keys",
"=",
"[",
"]",
"if",
"key",
"in",
"self",
":",
"other_keys",
".",
"extend",
"(",
"self",
".",
"__dict__",
"[",
"str",
"(",
"type",
"(",
"key",
")",
")",
"]",
"[",
"key",
"]",
")",
"if",
"not",
"including_current",
":",
"other_keys",
".",
"remove",
"(",
"key",
")",
"return",
"other_keys"
] | Returns list of other keys that are mapped to the same value as specified key.
@param key - key for which other keys should be returned.
@param including_current if set to True - key will also appear on this list. | [
"Returns",
"list",
"of",
"other",
"keys",
"that",
"are",
"mapped",
"to",
"the",
"same",
"value",
"as",
"specified",
"key",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L167-L176 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.iterkeys | def iterkeys(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if(key_type is not None):
the_key = str(key_type)
if the_key in self.__dict__:
for key in self.__dict__[the_key].keys():
if return_all_keys:
yield self.__dict__[the_key][key]
else:
yield key
else:
for keys in self.items_dict.keys():
yield keys | python | def iterkeys(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if(key_type is not None):
the_key = str(key_type)
if the_key in self.__dict__:
for key in self.__dict__[the_key].keys():
if return_all_keys:
yield self.__dict__[the_key][key]
else:
yield key
else:
for keys in self.items_dict.keys():
yield keys | [
"def",
"iterkeys",
"(",
"self",
",",
"key_type",
"=",
"None",
",",
"return_all_keys",
"=",
"False",
")",
":",
"if",
"(",
"key_type",
"is",
"not",
"None",
")",
":",
"the_key",
"=",
"str",
"(",
"key_type",
")",
"if",
"the_key",
"in",
"self",
".",
"__dict__",
":",
"for",
"key",
"in",
"self",
".",
"__dict__",
"[",
"the_key",
"]",
".",
"keys",
"(",
")",
":",
"if",
"return_all_keys",
":",
"yield",
"self",
".",
"__dict__",
"[",
"the_key",
"]",
"[",
"key",
"]",
"else",
":",
"yield",
"key",
"else",
":",
"for",
"keys",
"in",
"self",
".",
"items_dict",
".",
"keys",
"(",
")",
":",
"yield",
"keys"
] | Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type. | [
"Returns",
"an",
"iterator",
"over",
"the",
"dictionary",
"s",
"keys",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L201-L217 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.itervalues | def itervalues(self, key_type=None):
""" Returns an iterator over the dictionary's values.
@param key_type if specified, iterator will be returning only values pointed by keys of this type.
Otherwise (if not specified) all values in this dictinary will be generated."""
if(key_type is not None):
intermediate_key = str(key_type)
if intermediate_key in self.__dict__:
for direct_key in self.__dict__[intermediate_key].values():
yield self.items_dict[direct_key]
else:
for value in self.items_dict.values():
yield value | python | def itervalues(self, key_type=None):
""" Returns an iterator over the dictionary's values.
@param key_type if specified, iterator will be returning only values pointed by keys of this type.
Otherwise (if not specified) all values in this dictinary will be generated."""
if(key_type is not None):
intermediate_key = str(key_type)
if intermediate_key in self.__dict__:
for direct_key in self.__dict__[intermediate_key].values():
yield self.items_dict[direct_key]
else:
for value in self.items_dict.values():
yield value | [
"def",
"itervalues",
"(",
"self",
",",
"key_type",
"=",
"None",
")",
":",
"if",
"(",
"key_type",
"is",
"not",
"None",
")",
":",
"intermediate_key",
"=",
"str",
"(",
"key_type",
")",
"if",
"intermediate_key",
"in",
"self",
".",
"__dict__",
":",
"for",
"direct_key",
"in",
"self",
".",
"__dict__",
"[",
"intermediate_key",
"]",
".",
"values",
"(",
")",
":",
"yield",
"self",
".",
"items_dict",
"[",
"direct_key",
"]",
"else",
":",
"for",
"value",
"in",
"self",
".",
"items_dict",
".",
"values",
"(",
")",
":",
"yield",
"value"
] | Returns an iterator over the dictionary's values.
@param key_type if specified, iterator will be returning only values pointed by keys of this type.
Otherwise (if not specified) all values in this dictinary will be generated. | [
"Returns",
"an",
"iterator",
"over",
"the",
"dictionary",
"s",
"values",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L219-L230 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.keys | def keys(self, key_type=None):
""" Returns a copy of the dictionary's keys.
@param key_type if specified, only keys for this type will be returned.
Otherwise list of tuples containing all (multiple) keys will be returned."""
if key_type is not None:
intermediate_key = str(key_type)
if intermediate_key in self.__dict__:
return self.__dict__[intermediate_key].keys()
else:
all_keys = {} # in order to preserve keys() type (dict_keys for python3)
for keys in self.items_dict.keys():
all_keys[keys] = None
return all_keys.keys() | python | def keys(self, key_type=None):
""" Returns a copy of the dictionary's keys.
@param key_type if specified, only keys for this type will be returned.
Otherwise list of tuples containing all (multiple) keys will be returned."""
if key_type is not None:
intermediate_key = str(key_type)
if intermediate_key in self.__dict__:
return self.__dict__[intermediate_key].keys()
else:
all_keys = {} # in order to preserve keys() type (dict_keys for python3)
for keys in self.items_dict.keys():
all_keys[keys] = None
return all_keys.keys() | [
"def",
"keys",
"(",
"self",
",",
"key_type",
"=",
"None",
")",
":",
"if",
"key_type",
"is",
"not",
"None",
":",
"intermediate_key",
"=",
"str",
"(",
"key_type",
")",
"if",
"intermediate_key",
"in",
"self",
".",
"__dict__",
":",
"return",
"self",
".",
"__dict__",
"[",
"intermediate_key",
"]",
".",
"keys",
"(",
")",
"else",
":",
"all_keys",
"=",
"{",
"}",
"# in order to preserve keys() type (dict_keys for python3) \r",
"for",
"keys",
"in",
"self",
".",
"items_dict",
".",
"keys",
"(",
")",
":",
"all_keys",
"[",
"keys",
"]",
"=",
"None",
"return",
"all_keys",
".",
"keys",
"(",
")"
] | Returns a copy of the dictionary's keys.
@param key_type if specified, only keys for this type will be returned.
Otherwise list of tuples containing all (multiple) keys will be returned. | [
"Returns",
"a",
"copy",
"of",
"the",
"dictionary",
"s",
"keys",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L239-L251 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.values | def values(self, key_type=None):
""" Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned."""
if(key_type is not None):
all_items = {} # in order to preserve keys() type (dict_values for python3)
keys_used = set()
direct_key = str(key_type)
if direct_key in self.__dict__:
for intermediate_key in self.__dict__[direct_key].values():
if not intermediate_key in keys_used:
all_items[intermediate_key] = self.items_dict[intermediate_key]
keys_used.add(intermediate_key)
return all_items.values()
else:
return self.items_dict.values() | python | def values(self, key_type=None):
""" Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned."""
if(key_type is not None):
all_items = {} # in order to preserve keys() type (dict_values for python3)
keys_used = set()
direct_key = str(key_type)
if direct_key in self.__dict__:
for intermediate_key in self.__dict__[direct_key].values():
if not intermediate_key in keys_used:
all_items[intermediate_key] = self.items_dict[intermediate_key]
keys_used.add(intermediate_key)
return all_items.values()
else:
return self.items_dict.values() | [
"def",
"values",
"(",
"self",
",",
"key_type",
"=",
"None",
")",
":",
"if",
"(",
"key_type",
"is",
"not",
"None",
")",
":",
"all_items",
"=",
"{",
"}",
"# in order to preserve keys() type (dict_values for python3) \r",
"keys_used",
"=",
"set",
"(",
")",
"direct_key",
"=",
"str",
"(",
"key_type",
")",
"if",
"direct_key",
"in",
"self",
".",
"__dict__",
":",
"for",
"intermediate_key",
"in",
"self",
".",
"__dict__",
"[",
"direct_key",
"]",
".",
"values",
"(",
")",
":",
"if",
"not",
"intermediate_key",
"in",
"keys_used",
":",
"all_items",
"[",
"intermediate_key",
"]",
"=",
"self",
".",
"items_dict",
"[",
"intermediate_key",
"]",
"keys_used",
".",
"add",
"(",
"intermediate_key",
")",
"return",
"all_items",
".",
"values",
"(",
")",
"else",
":",
"return",
"self",
".",
"items_dict",
".",
"values",
"(",
")"
] | Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned. | [
"Returns",
"a",
"copy",
"of",
"the",
"dictionary",
"s",
"values",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L253-L268 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.__add_item | def __add_item(self, item, keys=None):
""" Internal method to add an item to the multi-key dictionary"""
if(not keys or not len(keys)):
raise Exception('Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'
% (self.__class__.__name__, str(item)))
direct_key = tuple(keys) # put all keys in a tuple, and use it as a key
for key in keys:
key_type = str(type(key))
# store direct key as a value in an intermediate dictionary
if(not key_type in self.__dict__):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
# store the value in the actual dictionary
if(not 'items_dict' in self.__dict__):
self.items_dict = dict()
self.items_dict[direct_key] = item | python | def __add_item(self, item, keys=None):
""" Internal method to add an item to the multi-key dictionary"""
if(not keys or not len(keys)):
raise Exception('Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'
% (self.__class__.__name__, str(item)))
direct_key = tuple(keys) # put all keys in a tuple, and use it as a key
for key in keys:
key_type = str(type(key))
# store direct key as a value in an intermediate dictionary
if(not key_type in self.__dict__):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
# store the value in the actual dictionary
if(not 'items_dict' in self.__dict__):
self.items_dict = dict()
self.items_dict[direct_key] = item | [
"def",
"__add_item",
"(",
"self",
",",
"item",
",",
"keys",
"=",
"None",
")",
":",
"if",
"(",
"not",
"keys",
"or",
"not",
"len",
"(",
"keys",
")",
")",
":",
"raise",
"Exception",
"(",
"'Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"item",
")",
")",
")",
"direct_key",
"=",
"tuple",
"(",
"keys",
")",
"# put all keys in a tuple, and use it as a key\r",
"for",
"key",
"in",
"keys",
":",
"key_type",
"=",
"str",
"(",
"type",
"(",
"key",
")",
")",
"# store direct key as a value in an intermediate dictionary\r",
"if",
"(",
"not",
"key_type",
"in",
"self",
".",
"__dict__",
")",
":",
"self",
".",
"__setattr__",
"(",
"key_type",
",",
"dict",
"(",
")",
")",
"self",
".",
"__dict__",
"[",
"key_type",
"]",
"[",
"key",
"]",
"=",
"direct_key",
"# store the value in the actual dictionary\r",
"if",
"(",
"not",
"'items_dict'",
"in",
"self",
".",
"__dict__",
")",
":",
"self",
".",
"items_dict",
"=",
"dict",
"(",
")",
"self",
".",
"items_dict",
"[",
"direct_key",
"]",
"=",
"item"
] | Internal method to add an item to the multi-key dictionary | [
"Internal",
"method",
"to",
"add",
"an",
"item",
"to",
"the",
"multi",
"-",
"key",
"dictionary"
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L277-L294 | train |
formiaczek/multi_key_dict | multi_key_dict.py | multi_key_dict.get | def get(self, key, default=None):
""" Return the value at index specified as key."""
if key in self:
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
return default | python | def get(self, key, default=None):
""" Return the value at index specified as key."""
if key in self:
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
return default | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"if",
"key",
"in",
"self",
":",
"return",
"self",
".",
"items_dict",
"[",
"self",
".",
"__dict__",
"[",
"str",
"(",
"type",
"(",
"key",
")",
")",
"]",
"[",
"key",
"]",
"]",
"else",
":",
"return",
"default"
] | Return the value at index specified as key. | [
"Return",
"the",
"value",
"at",
"index",
"specified",
"as",
"key",
"."
] | 320826cadad8ae8664042c627fa90f82ecd7b6b7 | https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L296-L301 | train |
adamziel/django_translate | django_translate/extractors/django_template.py | DjangoTemplateExtractor.extract_translations | def extract_translations(self, string):
"""Extract messages from Django template string."""
trans = []
for t in Lexer(string.decode("utf-8"), None).tokenize():
if t.token_type == TOKEN_BLOCK:
if not t.contents.startswith(
(self.tranz_tag, self.tranzchoice_tag)):
continue
is_tranzchoice = t.contents.startswith(
self.tranzchoice_tag +
" ")
kwargs = {
"id": self._match_to_transvar(id_re, t.contents),
"number": self._match_to_transvar(number_re, t.contents),
"domain": self._match_to_transvar(domain_re, t.contents),
"locale": self._match_to_transvar(locale_re, t.contents),
"is_transchoice": is_tranzchoice, "parameters": TransVar(
[x.split("=")[0].strip() for x in properties_re.findall(t.contents) if x],
TransVar.LITERAL
),
"lineno": t.lineno,
}
trans.append(Translation(**kwargs))
return trans | python | def extract_translations(self, string):
"""Extract messages from Django template string."""
trans = []
for t in Lexer(string.decode("utf-8"), None).tokenize():
if t.token_type == TOKEN_BLOCK:
if not t.contents.startswith(
(self.tranz_tag, self.tranzchoice_tag)):
continue
is_tranzchoice = t.contents.startswith(
self.tranzchoice_tag +
" ")
kwargs = {
"id": self._match_to_transvar(id_re, t.contents),
"number": self._match_to_transvar(number_re, t.contents),
"domain": self._match_to_transvar(domain_re, t.contents),
"locale": self._match_to_transvar(locale_re, t.contents),
"is_transchoice": is_tranzchoice, "parameters": TransVar(
[x.split("=")[0].strip() for x in properties_re.findall(t.contents) if x],
TransVar.LITERAL
),
"lineno": t.lineno,
}
trans.append(Translation(**kwargs))
return trans | [
"def",
"extract_translations",
"(",
"self",
",",
"string",
")",
":",
"trans",
"=",
"[",
"]",
"for",
"t",
"in",
"Lexer",
"(",
"string",
".",
"decode",
"(",
"\"utf-8\"",
")",
",",
"None",
")",
".",
"tokenize",
"(",
")",
":",
"if",
"t",
".",
"token_type",
"==",
"TOKEN_BLOCK",
":",
"if",
"not",
"t",
".",
"contents",
".",
"startswith",
"(",
"(",
"self",
".",
"tranz_tag",
",",
"self",
".",
"tranzchoice_tag",
")",
")",
":",
"continue",
"is_tranzchoice",
"=",
"t",
".",
"contents",
".",
"startswith",
"(",
"self",
".",
"tranzchoice_tag",
"+",
"\" \"",
")",
"kwargs",
"=",
"{",
"\"id\"",
":",
"self",
".",
"_match_to_transvar",
"(",
"id_re",
",",
"t",
".",
"contents",
")",
",",
"\"number\"",
":",
"self",
".",
"_match_to_transvar",
"(",
"number_re",
",",
"t",
".",
"contents",
")",
",",
"\"domain\"",
":",
"self",
".",
"_match_to_transvar",
"(",
"domain_re",
",",
"t",
".",
"contents",
")",
",",
"\"locale\"",
":",
"self",
".",
"_match_to_transvar",
"(",
"locale_re",
",",
"t",
".",
"contents",
")",
",",
"\"is_transchoice\"",
":",
"is_tranzchoice",
",",
"\"parameters\"",
":",
"TransVar",
"(",
"[",
"x",
".",
"split",
"(",
"\"=\"",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"properties_re",
".",
"findall",
"(",
"t",
".",
"contents",
")",
"if",
"x",
"]",
",",
"TransVar",
".",
"LITERAL",
")",
",",
"\"lineno\"",
":",
"t",
".",
"lineno",
",",
"}",
"trans",
".",
"append",
"(",
"Translation",
"(",
"*",
"*",
"kwargs",
")",
")",
"return",
"trans"
] | Extract messages from Django template string. | [
"Extract",
"messages",
"from",
"Django",
"template",
"string",
"."
] | 43d8ef94a5c230abbdc89f3dbc623313fde998f2 | https://github.com/adamziel/django_translate/blob/43d8ef94a5c230abbdc89f3dbc623313fde998f2/django_translate/extractors/django_template.py#L32-L58 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink.next | def next(self):
"""Returns the next marker.
Returns:
tuple: The marker name as a string and its genotypes as a
:py:class:`numpy.ndarray`.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
self._n += 1
if self._n > self._nb_markers:
raise StopIteration()
return self._bim.index[self._n - 1], self._read_current_marker() | python | def next(self):
"""Returns the next marker.
Returns:
tuple: The marker name as a string and its genotypes as a
:py:class:`numpy.ndarray`.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
self._n += 1
if self._n > self._nb_markers:
raise StopIteration()
return self._bim.index[self._n - 1], self._read_current_marker() | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"\"r\"",
":",
"raise",
"UnsupportedOperation",
"(",
"\"not available in 'w' mode\"",
")",
"self",
".",
"_n",
"+=",
"1",
"if",
"self",
".",
"_n",
">",
"self",
".",
"_nb_markers",
":",
"raise",
"StopIteration",
"(",
")",
"return",
"self",
".",
"_bim",
".",
"index",
"[",
"self",
".",
"_n",
"-",
"1",
"]",
",",
"self",
".",
"_read_current_marker",
"(",
")"
] | Returns the next marker.
Returns:
tuple: The marker name as a string and its genotypes as a
:py:class:`numpy.ndarray`. | [
"Returns",
"the",
"next",
"marker",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L181-L196 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink._read_current_marker | def _read_current_marker(self):
"""Reads the current marker and returns its genotypes."""
return self._geno_values[
np.frombuffer(self._bed.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples] | python | def _read_current_marker(self):
"""Reads the current marker and returns its genotypes."""
return self._geno_values[
np.frombuffer(self._bed.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples] | [
"def",
"_read_current_marker",
"(",
"self",
")",
":",
"return",
"self",
".",
"_geno_values",
"[",
"np",
".",
"frombuffer",
"(",
"self",
".",
"_bed",
".",
"read",
"(",
"self",
".",
"_nb_bytes",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"]",
".",
"flatten",
"(",
"order",
"=",
"\"C\"",
")",
"[",
":",
"self",
".",
"_nb_samples",
"]"
] | Reads the current marker and returns its genotypes. | [
"Reads",
"the",
"current",
"marker",
"and",
"returns",
"its",
"genotypes",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L198-L202 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink.seek | def seek(self, n):
"""Gets to a certain marker position in the BED file.
Args:
n (int): The index of the marker to seek to.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if 0 <= n < self._nb_markers:
self._n = n
self._bed.seek(self._get_seek_position(n))
else:
# Invalid seek value
raise ValueError("invalid position in BED: {}".format(n)) | python | def seek(self, n):
"""Gets to a certain marker position in the BED file.
Args:
n (int): The index of the marker to seek to.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if 0 <= n < self._nb_markers:
self._n = n
self._bed.seek(self._get_seek_position(n))
else:
# Invalid seek value
raise ValueError("invalid position in BED: {}".format(n)) | [
"def",
"seek",
"(",
"self",
",",
"n",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"\"r\"",
":",
"raise",
"UnsupportedOperation",
"(",
"\"not available in 'w' mode\"",
")",
"if",
"0",
"<=",
"n",
"<",
"self",
".",
"_nb_markers",
":",
"self",
".",
"_n",
"=",
"n",
"self",
".",
"_bed",
".",
"seek",
"(",
"self",
".",
"_get_seek_position",
"(",
"n",
")",
")",
"else",
":",
"# Invalid seek value",
"raise",
"ValueError",
"(",
"\"invalid position in BED: {}\"",
".",
"format",
"(",
"n",
")",
")"
] | Gets to a certain marker position in the BED file.
Args:
n (int): The index of the marker to seek to. | [
"Gets",
"to",
"a",
"certain",
"marker",
"position",
"in",
"the",
"BED",
"file",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L204-L220 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink._read_bim | def _read_bim(self):
"""Reads the BIM file."""
# Reading the BIM file and setting the values
bim = pd.read_csv(self.bim_filename, delim_whitespace=True,
names=["chrom", "snp", "cm", "pos", "a1", "a2"],
dtype=dict(snp=str, a1=str, a2=str))
# Saving the index as integer
bim["i"] = bim.index
# Checking for duplicated markers
try:
bim = bim.set_index("snp", verify_integrity=True)
self._has_duplicated = False
except ValueError as e:
# Setting this flag to true
self._has_duplicated = True
# Finding the duplicated markers
duplicated = bim.snp.duplicated(keep=False)
duplicated_markers = bim.loc[duplicated, "snp"]
duplicated_marker_counts = duplicated_markers.value_counts()
# The dictionary that will contain information about the duplicated
# markers
self._dup_markers = {
m: [] for m in duplicated_marker_counts.index
}
# Logging a warning
logger.warning("Duplicated markers found")
for marker, count in duplicated_marker_counts.iteritems():
logger.warning(" - {}: {:,d} times".format(marker, count))
logger.warning("Appending ':dupX' to the duplicated markers "
"according to their location in the BIM file")
# Renaming the markers
counter = Counter()
for i, marker in duplicated_markers.iteritems():
counter[marker] += 1
new_name = "{}:dup{}".format(marker, counter[marker])
bim.loc[i, "snp"] = new_name
# Updating the dictionary containing the duplicated markers
self._dup_markers[marker].append(new_name)
# Resetting the index
bim = bim.set_index("snp", verify_integrity=True)
# Encoding the allele
# - The original 0 is the actual 2 (a1/a1)
# - The original 2 is the actual 1 (a1/a2)
# - The original 3 is the actual 0 (a2/a2)
# - The original 1 is the actual -1 (no call)
allele_encoding = np.array(
[bim.a2 * 2, bim.a1 + bim.a2, bim.a1 * 2,
list(repeat("00", bim.shape[0]))],
dtype="U2",
)
self._allele_encoding = allele_encoding.T
# Saving the data in the object
self._bim = bim[["chrom", "pos", "cm", "a1", "a2", "i"]]
self._nb_markers = self._bim.shape[0] | python | def _read_bim(self):
"""Reads the BIM file."""
# Reading the BIM file and setting the values
bim = pd.read_csv(self.bim_filename, delim_whitespace=True,
names=["chrom", "snp", "cm", "pos", "a1", "a2"],
dtype=dict(snp=str, a1=str, a2=str))
# Saving the index as integer
bim["i"] = bim.index
# Checking for duplicated markers
try:
bim = bim.set_index("snp", verify_integrity=True)
self._has_duplicated = False
except ValueError as e:
# Setting this flag to true
self._has_duplicated = True
# Finding the duplicated markers
duplicated = bim.snp.duplicated(keep=False)
duplicated_markers = bim.loc[duplicated, "snp"]
duplicated_marker_counts = duplicated_markers.value_counts()
# The dictionary that will contain information about the duplicated
# markers
self._dup_markers = {
m: [] for m in duplicated_marker_counts.index
}
# Logging a warning
logger.warning("Duplicated markers found")
for marker, count in duplicated_marker_counts.iteritems():
logger.warning(" - {}: {:,d} times".format(marker, count))
logger.warning("Appending ':dupX' to the duplicated markers "
"according to their location in the BIM file")
# Renaming the markers
counter = Counter()
for i, marker in duplicated_markers.iteritems():
counter[marker] += 1
new_name = "{}:dup{}".format(marker, counter[marker])
bim.loc[i, "snp"] = new_name
# Updating the dictionary containing the duplicated markers
self._dup_markers[marker].append(new_name)
# Resetting the index
bim = bim.set_index("snp", verify_integrity=True)
# Encoding the allele
# - The original 0 is the actual 2 (a1/a1)
# - The original 2 is the actual 1 (a1/a2)
# - The original 3 is the actual 0 (a2/a2)
# - The original 1 is the actual -1 (no call)
allele_encoding = np.array(
[bim.a2 * 2, bim.a1 + bim.a2, bim.a1 * 2,
list(repeat("00", bim.shape[0]))],
dtype="U2",
)
self._allele_encoding = allele_encoding.T
# Saving the data in the object
self._bim = bim[["chrom", "pos", "cm", "a1", "a2", "i"]]
self._nb_markers = self._bim.shape[0] | [
"def",
"_read_bim",
"(",
"self",
")",
":",
"# Reading the BIM file and setting the values",
"bim",
"=",
"pd",
".",
"read_csv",
"(",
"self",
".",
"bim_filename",
",",
"delim_whitespace",
"=",
"True",
",",
"names",
"=",
"[",
"\"chrom\"",
",",
"\"snp\"",
",",
"\"cm\"",
",",
"\"pos\"",
",",
"\"a1\"",
",",
"\"a2\"",
"]",
",",
"dtype",
"=",
"dict",
"(",
"snp",
"=",
"str",
",",
"a1",
"=",
"str",
",",
"a2",
"=",
"str",
")",
")",
"# Saving the index as integer",
"bim",
"[",
"\"i\"",
"]",
"=",
"bim",
".",
"index",
"# Checking for duplicated markers",
"try",
":",
"bim",
"=",
"bim",
".",
"set_index",
"(",
"\"snp\"",
",",
"verify_integrity",
"=",
"True",
")",
"self",
".",
"_has_duplicated",
"=",
"False",
"except",
"ValueError",
"as",
"e",
":",
"# Setting this flag to true",
"self",
".",
"_has_duplicated",
"=",
"True",
"# Finding the duplicated markers",
"duplicated",
"=",
"bim",
".",
"snp",
".",
"duplicated",
"(",
"keep",
"=",
"False",
")",
"duplicated_markers",
"=",
"bim",
".",
"loc",
"[",
"duplicated",
",",
"\"snp\"",
"]",
"duplicated_marker_counts",
"=",
"duplicated_markers",
".",
"value_counts",
"(",
")",
"# The dictionary that will contain information about the duplicated",
"# markers",
"self",
".",
"_dup_markers",
"=",
"{",
"m",
":",
"[",
"]",
"for",
"m",
"in",
"duplicated_marker_counts",
".",
"index",
"}",
"# Logging a warning",
"logger",
".",
"warning",
"(",
"\"Duplicated markers found\"",
")",
"for",
"marker",
",",
"count",
"in",
"duplicated_marker_counts",
".",
"iteritems",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\" - {}: {:,d} times\"",
".",
"format",
"(",
"marker",
",",
"count",
")",
")",
"logger",
".",
"warning",
"(",
"\"Appending ':dupX' to the duplicated markers \"",
"\"according to their location in the BIM file\"",
")",
"# Renaming the markers",
"counter",
"=",
"Counter",
"(",
")",
"for",
"i",
",",
"marker",
"in",
"duplicated_markers",
".",
"iteritems",
"(",
")",
":",
"counter",
"[",
"marker",
"]",
"+=",
"1",
"new_name",
"=",
"\"{}:dup{}\"",
".",
"format",
"(",
"marker",
",",
"counter",
"[",
"marker",
"]",
")",
"bim",
".",
"loc",
"[",
"i",
",",
"\"snp\"",
"]",
"=",
"new_name",
"# Updating the dictionary containing the duplicated markers",
"self",
".",
"_dup_markers",
"[",
"marker",
"]",
".",
"append",
"(",
"new_name",
")",
"# Resetting the index",
"bim",
"=",
"bim",
".",
"set_index",
"(",
"\"snp\"",
",",
"verify_integrity",
"=",
"True",
")",
"# Encoding the allele",
"# - The original 0 is the actual 2 (a1/a1)",
"# - The original 2 is the actual 1 (a1/a2)",
"# - The original 3 is the actual 0 (a2/a2)",
"# - The original 1 is the actual -1 (no call)",
"allele_encoding",
"=",
"np",
".",
"array",
"(",
"[",
"bim",
".",
"a2",
"*",
"2",
",",
"bim",
".",
"a1",
"+",
"bim",
".",
"a2",
",",
"bim",
".",
"a1",
"*",
"2",
",",
"list",
"(",
"repeat",
"(",
"\"00\"",
",",
"bim",
".",
"shape",
"[",
"0",
"]",
")",
")",
"]",
",",
"dtype",
"=",
"\"U2\"",
",",
")",
"self",
".",
"_allele_encoding",
"=",
"allele_encoding",
".",
"T",
"# Saving the data in the object",
"self",
".",
"_bim",
"=",
"bim",
"[",
"[",
"\"chrom\"",
",",
"\"pos\"",
",",
"\"cm\"",
",",
"\"a1\"",
",",
"\"a2\"",
",",
"\"i\"",
"]",
"]",
"self",
".",
"_nb_markers",
"=",
"self",
".",
"_bim",
".",
"shape",
"[",
"0",
"]"
] | Reads the BIM file. | [
"Reads",
"the",
"BIM",
"file",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L231-L295 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink._read_fam | def _read_fam(self):
"""Reads the FAM file."""
# Reading the FAM file and setting the values
fam = pd.read_csv(self.fam_filename, delim_whitespace=True,
names=["fid", "iid", "father", "mother", "gender",
"status"],
dtype=dict(fid=str, iid=str, father=str, mother=str))
# Getting the byte and bit location of each samples
fam["byte"] = [
int(np.ceil((1 + 1) / 4.0)) - 1 for i in range(len(fam))
]
fam["bit"] = [(i % 4) * 2 for i in range(len(fam))]
# Saving the data in the object
self._fam = fam
self._nb_samples = self._fam.shape[0] | python | def _read_fam(self):
"""Reads the FAM file."""
# Reading the FAM file and setting the values
fam = pd.read_csv(self.fam_filename, delim_whitespace=True,
names=["fid", "iid", "father", "mother", "gender",
"status"],
dtype=dict(fid=str, iid=str, father=str, mother=str))
# Getting the byte and bit location of each samples
fam["byte"] = [
int(np.ceil((1 + 1) / 4.0)) - 1 for i in range(len(fam))
]
fam["bit"] = [(i % 4) * 2 for i in range(len(fam))]
# Saving the data in the object
self._fam = fam
self._nb_samples = self._fam.shape[0] | [
"def",
"_read_fam",
"(",
"self",
")",
":",
"# Reading the FAM file and setting the values",
"fam",
"=",
"pd",
".",
"read_csv",
"(",
"self",
".",
"fam_filename",
",",
"delim_whitespace",
"=",
"True",
",",
"names",
"=",
"[",
"\"fid\"",
",",
"\"iid\"",
",",
"\"father\"",
",",
"\"mother\"",
",",
"\"gender\"",
",",
"\"status\"",
"]",
",",
"dtype",
"=",
"dict",
"(",
"fid",
"=",
"str",
",",
"iid",
"=",
"str",
",",
"father",
"=",
"str",
",",
"mother",
"=",
"str",
")",
")",
"# Getting the byte and bit location of each samples",
"fam",
"[",
"\"byte\"",
"]",
"=",
"[",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"1",
"+",
"1",
")",
"/",
"4.0",
")",
")",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fam",
")",
")",
"]",
"fam",
"[",
"\"bit\"",
"]",
"=",
"[",
"(",
"i",
"%",
"4",
")",
"*",
"2",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fam",
")",
")",
"]",
"# Saving the data in the object",
"self",
".",
"_fam",
"=",
"fam",
"self",
".",
"_nb_samples",
"=",
"self",
".",
"_fam",
".",
"shape",
"[",
"0",
"]"
] | Reads the FAM file. | [
"Reads",
"the",
"FAM",
"file",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L333-L349 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink._read_bed | def _read_bed(self):
"""Reads the BED file."""
# Checking if BIM and BAM files were both read
if (self._bim is None) or (self._fam is None):
raise RuntimeError("no BIM or FAM file were read")
# The number of bytes per marker
self._nb_bytes = int(np.ceil(self._nb_samples / 4.0))
# Checking the file is valid by looking at the first 3 bytes and the
# last entry (correct size)
with open(self.bed_filename, "rb") as bed_file:
# Checking that the first two bytes are OK
if (ord(bed_file.read(1)) != 108) or (ord(bed_file.read(1)) != 27):
raise ValueError("not a valid BED file: "
"{}".format(self.bed_filename))
# Checking that the format is SNP-major
if ord(bed_file.read(1)) != 1:
raise ValueError("not in SNP-major format (please recode): "
"{}".format(self.bed_filename))
# Checking the last entry (for BED corruption)
seek_index = self._get_seek_position(self._bim.iloc[-1, :].i)
bed_file.seek(seek_index)
geno = self._geno_values[
np.frombuffer(bed_file.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples]
if geno.shape[0] != self._nb_samples:
raise ValueError("invalid number of entries: corrupted BED?")
# Opening the file for the rest of the operations (reading 3 bytes)
self._bed = open(self.bed_filename, "rb")
self._bed.read(3) | python | def _read_bed(self):
"""Reads the BED file."""
# Checking if BIM and BAM files were both read
if (self._bim is None) or (self._fam is None):
raise RuntimeError("no BIM or FAM file were read")
# The number of bytes per marker
self._nb_bytes = int(np.ceil(self._nb_samples / 4.0))
# Checking the file is valid by looking at the first 3 bytes and the
# last entry (correct size)
with open(self.bed_filename, "rb") as bed_file:
# Checking that the first two bytes are OK
if (ord(bed_file.read(1)) != 108) or (ord(bed_file.read(1)) != 27):
raise ValueError("not a valid BED file: "
"{}".format(self.bed_filename))
# Checking that the format is SNP-major
if ord(bed_file.read(1)) != 1:
raise ValueError("not in SNP-major format (please recode): "
"{}".format(self.bed_filename))
# Checking the last entry (for BED corruption)
seek_index = self._get_seek_position(self._bim.iloc[-1, :].i)
bed_file.seek(seek_index)
geno = self._geno_values[
np.frombuffer(bed_file.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples]
if geno.shape[0] != self._nb_samples:
raise ValueError("invalid number of entries: corrupted BED?")
# Opening the file for the rest of the operations (reading 3 bytes)
self._bed = open(self.bed_filename, "rb")
self._bed.read(3) | [
"def",
"_read_bed",
"(",
"self",
")",
":",
"# Checking if BIM and BAM files were both read",
"if",
"(",
"self",
".",
"_bim",
"is",
"None",
")",
"or",
"(",
"self",
".",
"_fam",
"is",
"None",
")",
":",
"raise",
"RuntimeError",
"(",
"\"no BIM or FAM file were read\"",
")",
"# The number of bytes per marker",
"self",
".",
"_nb_bytes",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"self",
".",
"_nb_samples",
"/",
"4.0",
")",
")",
"# Checking the file is valid by looking at the first 3 bytes and the",
"# last entry (correct size)",
"with",
"open",
"(",
"self",
".",
"bed_filename",
",",
"\"rb\"",
")",
"as",
"bed_file",
":",
"# Checking that the first two bytes are OK",
"if",
"(",
"ord",
"(",
"bed_file",
".",
"read",
"(",
"1",
")",
")",
"!=",
"108",
")",
"or",
"(",
"ord",
"(",
"bed_file",
".",
"read",
"(",
"1",
")",
")",
"!=",
"27",
")",
":",
"raise",
"ValueError",
"(",
"\"not a valid BED file: \"",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"bed_filename",
")",
")",
"# Checking that the format is SNP-major",
"if",
"ord",
"(",
"bed_file",
".",
"read",
"(",
"1",
")",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"not in SNP-major format (please recode): \"",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"bed_filename",
")",
")",
"# Checking the last entry (for BED corruption)",
"seek_index",
"=",
"self",
".",
"_get_seek_position",
"(",
"self",
".",
"_bim",
".",
"iloc",
"[",
"-",
"1",
",",
":",
"]",
".",
"i",
")",
"bed_file",
".",
"seek",
"(",
"seek_index",
")",
"geno",
"=",
"self",
".",
"_geno_values",
"[",
"np",
".",
"frombuffer",
"(",
"bed_file",
".",
"read",
"(",
"self",
".",
"_nb_bytes",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"]",
".",
"flatten",
"(",
"order",
"=",
"\"C\"",
")",
"[",
":",
"self",
".",
"_nb_samples",
"]",
"if",
"geno",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"_nb_samples",
":",
"raise",
"ValueError",
"(",
"\"invalid number of entries: corrupted BED?\"",
")",
"# Opening the file for the rest of the operations (reading 3 bytes)",
"self",
".",
"_bed",
"=",
"open",
"(",
"self",
".",
"bed_filename",
",",
"\"rb\"",
")",
"self",
".",
"_bed",
".",
"read",
"(",
"3",
")"
] | Reads the BED file. | [
"Reads",
"the",
"BED",
"file",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L375-L408 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink._write_bed_header | def _write_bed_header(self):
"""Writes the BED first 3 bytes."""
# Writing the first three bytes
final_byte = 1 if self._bed_format == "SNP-major" else 0
self._bed.write(bytearray((108, 27, final_byte))) | python | def _write_bed_header(self):
"""Writes the BED first 3 bytes."""
# Writing the first three bytes
final_byte = 1 if self._bed_format == "SNP-major" else 0
self._bed.write(bytearray((108, 27, final_byte))) | [
"def",
"_write_bed_header",
"(",
"self",
")",
":",
"# Writing the first three bytes",
"final_byte",
"=",
"1",
"if",
"self",
".",
"_bed_format",
"==",
"\"SNP-major\"",
"else",
"0",
"self",
".",
"_bed",
".",
"write",
"(",
"bytearray",
"(",
"(",
"108",
",",
"27",
",",
"final_byte",
")",
")",
")"
] | Writes the BED first 3 bytes. | [
"Writes",
"the",
"BED",
"first",
"3",
"bytes",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L410-L414 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink.iter_geno_marker | def iter_geno_marker(self, markers, return_index=False):
"""Iterates over genotypes for a list of markers.
Args:
markers (list): The list of markers to iterate onto.
return_index (bool): Wether to return the marker's index or not.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# If string, we change to list
if isinstance(markers, str):
markers = [markers]
# Iterating over all markers
if return_index:
for marker in markers:
geno, seek = self.get_geno_marker(marker, return_index=True)
yield marker, geno, seek
else:
for marker in markers:
yield marker, self.get_geno_marker(marker) | python | def iter_geno_marker(self, markers, return_index=False):
"""Iterates over genotypes for a list of markers.
Args:
markers (list): The list of markers to iterate onto.
return_index (bool): Wether to return the marker's index or not.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# If string, we change to list
if isinstance(markers, str):
markers = [markers]
# Iterating over all markers
if return_index:
for marker in markers:
geno, seek = self.get_geno_marker(marker, return_index=True)
yield marker, geno, seek
else:
for marker in markers:
yield marker, self.get_geno_marker(marker) | [
"def",
"iter_geno_marker",
"(",
"self",
",",
"markers",
",",
"return_index",
"=",
"False",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"\"r\"",
":",
"raise",
"UnsupportedOperation",
"(",
"\"not available in 'w' mode\"",
")",
"# If string, we change to list",
"if",
"isinstance",
"(",
"markers",
",",
"str",
")",
":",
"markers",
"=",
"[",
"markers",
"]",
"# Iterating over all markers",
"if",
"return_index",
":",
"for",
"marker",
"in",
"markers",
":",
"geno",
",",
"seek",
"=",
"self",
".",
"get_geno_marker",
"(",
"marker",
",",
"return_index",
"=",
"True",
")",
"yield",
"marker",
",",
"geno",
",",
"seek",
"else",
":",
"for",
"marker",
"in",
"markers",
":",
"yield",
"marker",
",",
"self",
".",
"get_geno_marker",
"(",
"marker",
")"
] | Iterates over genotypes for a list of markers.
Args:
markers (list): The list of markers to iterate onto.
return_index (bool): Wether to return the marker's index or not.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (additive format). | [
"Iterates",
"over",
"genotypes",
"for",
"a",
"list",
"of",
"markers",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L445-L471 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink.get_geno_marker | def get_geno_marker(self, marker, return_index=False):
"""Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Check if the marker exists
if marker not in self._bim.index:
raise ValueError("{}: marker not in BIM".format(marker))
# Seeking to the correct position
seek_index = self._bim.loc[marker, "i"]
self.seek(seek_index)
if return_index:
return self._read_current_marker(), seek_index
return self._read_current_marker() | python | def get_geno_marker(self, marker, return_index=False):
"""Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Check if the marker exists
if marker not in self._bim.index:
raise ValueError("{}: marker not in BIM".format(marker))
# Seeking to the correct position
seek_index = self._bim.loc[marker, "i"]
self.seek(seek_index)
if return_index:
return self._read_current_marker(), seek_index
return self._read_current_marker() | [
"def",
"get_geno_marker",
"(",
"self",
",",
"marker",
",",
"return_index",
"=",
"False",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"\"r\"",
":",
"raise",
"UnsupportedOperation",
"(",
"\"not available in 'w' mode\"",
")",
"# Check if the marker exists",
"if",
"marker",
"not",
"in",
"self",
".",
"_bim",
".",
"index",
":",
"raise",
"ValueError",
"(",
"\"{}: marker not in BIM\"",
".",
"format",
"(",
"marker",
")",
")",
"# Seeking to the correct position",
"seek_index",
"=",
"self",
".",
"_bim",
".",
"loc",
"[",
"marker",
",",
"\"i\"",
"]",
"self",
".",
"seek",
"(",
"seek_index",
")",
"if",
"return_index",
":",
"return",
"self",
".",
"_read_current_marker",
"(",
")",
",",
"seek_index",
"return",
"self",
".",
"_read_current_marker",
"(",
")"
] | Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format). | [
"Gets",
"the",
"genotypes",
"for",
"a",
"given",
"marker",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L489-L513 | train |
lemieuxl/pyplink | pyplink/pyplink.py | PyPlink.write_genotypes | def write_genotypes(self, genotypes):
"""Write genotypes to binary file.
Args:
genotypes (numpy.ndarray): The genotypes to write in the BED file.
"""
if self._mode != "w":
raise UnsupportedOperation("not available in 'r' mode")
# Initializing the number of samples if required
if self._nb_values is None:
self._nb_values = len(genotypes)
# Checking the expected number of samples
if self._nb_values != len(genotypes):
raise ValueError("{:,d} samples expected, got {:,d}".format(
self._nb_values,
len(genotypes),
))
# Writing to file
byte_array = [
g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in
self._grouper((_byte_recode[geno] for geno in genotypes), 4)
]
self._bed.write(bytearray(byte_array)) | python | def write_genotypes(self, genotypes):
"""Write genotypes to binary file.
Args:
genotypes (numpy.ndarray): The genotypes to write in the BED file.
"""
if self._mode != "w":
raise UnsupportedOperation("not available in 'r' mode")
# Initializing the number of samples if required
if self._nb_values is None:
self._nb_values = len(genotypes)
# Checking the expected number of samples
if self._nb_values != len(genotypes):
raise ValueError("{:,d} samples expected, got {:,d}".format(
self._nb_values,
len(genotypes),
))
# Writing to file
byte_array = [
g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in
self._grouper((_byte_recode[geno] for geno in genotypes), 4)
]
self._bed.write(bytearray(byte_array)) | [
"def",
"write_genotypes",
"(",
"self",
",",
"genotypes",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"\"w\"",
":",
"raise",
"UnsupportedOperation",
"(",
"\"not available in 'r' mode\"",
")",
"# Initializing the number of samples if required",
"if",
"self",
".",
"_nb_values",
"is",
"None",
":",
"self",
".",
"_nb_values",
"=",
"len",
"(",
"genotypes",
")",
"# Checking the expected number of samples",
"if",
"self",
".",
"_nb_values",
"!=",
"len",
"(",
"genotypes",
")",
":",
"raise",
"ValueError",
"(",
"\"{:,d} samples expected, got {:,d}\"",
".",
"format",
"(",
"self",
".",
"_nb_values",
",",
"len",
"(",
"genotypes",
")",
",",
")",
")",
"# Writing to file",
"byte_array",
"=",
"[",
"g",
"[",
"0",
"]",
"|",
"(",
"g",
"[",
"1",
"]",
"<<",
"2",
")",
"|",
"(",
"g",
"[",
"2",
"]",
"<<",
"4",
")",
"|",
"(",
"g",
"[",
"3",
"]",
"<<",
"6",
")",
"for",
"g",
"in",
"self",
".",
"_grouper",
"(",
"(",
"_byte_recode",
"[",
"geno",
"]",
"for",
"geno",
"in",
"genotypes",
")",
",",
"4",
")",
"]",
"self",
".",
"_bed",
".",
"write",
"(",
"bytearray",
"(",
"byte_array",
")",
")"
] | Write genotypes to binary file.
Args:
genotypes (numpy.ndarray): The genotypes to write in the BED file. | [
"Write",
"genotypes",
"to",
"binary",
"file",
"."
] | 31d47c86f589064bda98206314a2d0b20e7fd2f0 | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L531-L557 | train |
CI-WATER/gsshapy | gsshapy/orm/tim.py | TimeSeriesFile._read | def _read(self, directory, filename, session, path, name, extension, spatial=None, spatialReferenceID=None, replaceParamFile=None):
"""
Generic Time Series Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
timeSeries = []
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
record = {'time': sline[0],
'values': []}
for idx in range(1, len(sline)):
record['values'].append(sline[idx])
timeSeries.append(record)
self._createTimeSeriesObjects(timeSeries, filename) | python | def _read(self, directory, filename, session, path, name, extension, spatial=None, spatialReferenceID=None, replaceParamFile=None):
"""
Generic Time Series Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
timeSeries = []
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
record = {'time': sline[0],
'values': []}
for idx in range(1, len(sline)):
record['values'].append(sline[idx])
timeSeries.append(record)
self._createTimeSeriesObjects(timeSeries, filename) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
"=",
"None",
",",
"spatialReferenceID",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"# Assign file extension attribute to file object",
"self",
".",
"fileExtension",
"=",
"extension",
"timeSeries",
"=",
"[",
"]",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"record",
"=",
"{",
"'time'",
":",
"sline",
"[",
"0",
"]",
",",
"'values'",
":",
"[",
"]",
"}",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"sline",
")",
")",
":",
"record",
"[",
"'values'",
"]",
".",
"append",
"(",
"sline",
"[",
"idx",
"]",
")",
"timeSeries",
".",
"append",
"(",
"record",
")",
"self",
".",
"_createTimeSeriesObjects",
"(",
"timeSeries",
",",
"filename",
")"
] | Generic Time Series Read from File Method | [
"Generic",
"Time",
"Series",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L60-L82 | train |
CI-WATER/gsshapy | gsshapy/orm/tim.py | TimeSeriesFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Generic Time Series Write to File Method
"""
# Retrieve all time series
timeSeries = self.timeSeries
# Num TimeSeries
numTS = len(timeSeries)
# Transform into list of dictionaries for pivot tool
valList = []
for tsNum, ts in enumerate(timeSeries):
values = ts.values
for value in values:
valDict = {'time': value.simTime,
'tsNum': tsNum,
'value': value.value}
valList.append(valDict)
# Use pivot function (from lib) to pivot the values into
# a format that is easy to write.
result = pivot(valList, ('time',), ('tsNum',), 'value')
# Write lines
for line in result:
valString = ''
# Compile value string
for n in range(0, numTS):
val = '%.6f' % line[(n,)]
valString = '%s%s%s' % (
valString,
' ' * (13 - len(str(val))), # Fancy spacing trick
val)
openFile.write(' %.8f%s\n' % (line['time'], valString)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Generic Time Series Write to File Method
"""
# Retrieve all time series
timeSeries = self.timeSeries
# Num TimeSeries
numTS = len(timeSeries)
# Transform into list of dictionaries for pivot tool
valList = []
for tsNum, ts in enumerate(timeSeries):
values = ts.values
for value in values:
valDict = {'time': value.simTime,
'tsNum': tsNum,
'value': value.value}
valList.append(valDict)
# Use pivot function (from lib) to pivot the values into
# a format that is easy to write.
result = pivot(valList, ('time',), ('tsNum',), 'value')
# Write lines
for line in result:
valString = ''
# Compile value string
for n in range(0, numTS):
val = '%.6f' % line[(n,)]
valString = '%s%s%s' % (
valString,
' ' * (13 - len(str(val))), # Fancy spacing trick
val)
openFile.write(' %.8f%s\n' % (line['time'], valString)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve all time series",
"timeSeries",
"=",
"self",
".",
"timeSeries",
"# Num TimeSeries",
"numTS",
"=",
"len",
"(",
"timeSeries",
")",
"# Transform into list of dictionaries for pivot tool",
"valList",
"=",
"[",
"]",
"for",
"tsNum",
",",
"ts",
"in",
"enumerate",
"(",
"timeSeries",
")",
":",
"values",
"=",
"ts",
".",
"values",
"for",
"value",
"in",
"values",
":",
"valDict",
"=",
"{",
"'time'",
":",
"value",
".",
"simTime",
",",
"'tsNum'",
":",
"tsNum",
",",
"'value'",
":",
"value",
".",
"value",
"}",
"valList",
".",
"append",
"(",
"valDict",
")",
"# Use pivot function (from lib) to pivot the values into",
"# a format that is easy to write.",
"result",
"=",
"pivot",
"(",
"valList",
",",
"(",
"'time'",
",",
")",
",",
"(",
"'tsNum'",
",",
")",
",",
"'value'",
")",
"# Write lines",
"for",
"line",
"in",
"result",
":",
"valString",
"=",
"''",
"# Compile value string",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"numTS",
")",
":",
"val",
"=",
"'%.6f'",
"%",
"line",
"[",
"(",
"n",
",",
")",
"]",
"valString",
"=",
"'%s%s%s'",
"%",
"(",
"valString",
",",
"' '",
"*",
"(",
"13",
"-",
"len",
"(",
"str",
"(",
"val",
")",
")",
")",
",",
"# Fancy spacing trick",
"val",
")",
"openFile",
".",
"write",
"(",
"' %.8f%s\\n'",
"%",
"(",
"line",
"[",
"'time'",
"]",
",",
"valString",
")",
")"
] | Generic Time Series Write to File Method | [
"Generic",
"Time",
"Series",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L84-L121 | train |
CI-WATER/gsshapy | gsshapy/orm/tim.py | TimeSeriesFile.as_dataframe | def as_dataframe(self):
"""
Return time series as pandas dataframe
"""
time_series = {}
for ts_index, ts in enumerate(self.timeSeries):
index = []
data = []
for value in ts.values:
index.append(value.simTime)
data.append(value.value)
time_series[ts_index] = pd.Series(data, index=index)
return pd.DataFrame(time_series) | python | def as_dataframe(self):
"""
Return time series as pandas dataframe
"""
time_series = {}
for ts_index, ts in enumerate(self.timeSeries):
index = []
data = []
for value in ts.values:
index.append(value.simTime)
data.append(value.value)
time_series[ts_index] = pd.Series(data, index=index)
return pd.DataFrame(time_series) | [
"def",
"as_dataframe",
"(",
"self",
")",
":",
"time_series",
"=",
"{",
"}",
"for",
"ts_index",
",",
"ts",
"in",
"enumerate",
"(",
"self",
".",
"timeSeries",
")",
":",
"index",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"for",
"value",
"in",
"ts",
".",
"values",
":",
"index",
".",
"append",
"(",
"value",
".",
"simTime",
")",
"data",
".",
"append",
"(",
"value",
".",
"value",
")",
"time_series",
"[",
"ts_index",
"]",
"=",
"pd",
".",
"Series",
"(",
"data",
",",
"index",
"=",
"index",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"time_series",
")"
] | Return time series as pandas dataframe | [
"Return",
"time",
"series",
"as",
"pandas",
"dataframe"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L123-L135 | train |
CI-WATER/gsshapy | gsshapy/orm/tim.py | TimeSeriesFile._createTimeSeriesObjects | def _createTimeSeriesObjects(self, timeSeries, filename):
"""
Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method
"""
try:
# Determine number of value columns
valColumns = len(timeSeries[0]['values'])
# Create List of GSSHAPY TimeSeries objects
series = []
for i in range(0, valColumns):
ts = TimeSeries()
ts.timeSeriesFile = self
series.append(ts)
for record in timeSeries:
for index, value in enumerate(record['values']):
# Create GSSHAPY TimeSeriesValue objects
tsVal = TimeSeriesValue(simTime=record['time'],
value=value)
# Associate with appropriate TimeSeries object via the index
tsVal.timeSeries = series[index]
except IndexError:
log.warning(('%s was opened, but the contents of the file were empty.'
'This file will not be read into the database.') % filename)
except:
raise | python | def _createTimeSeriesObjects(self, timeSeries, filename):
"""
Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method
"""
try:
# Determine number of value columns
valColumns = len(timeSeries[0]['values'])
# Create List of GSSHAPY TimeSeries objects
series = []
for i in range(0, valColumns):
ts = TimeSeries()
ts.timeSeriesFile = self
series.append(ts)
for record in timeSeries:
for index, value in enumerate(record['values']):
# Create GSSHAPY TimeSeriesValue objects
tsVal = TimeSeriesValue(simTime=record['time'],
value=value)
# Associate with appropriate TimeSeries object via the index
tsVal.timeSeries = series[index]
except IndexError:
log.warning(('%s was opened, but the contents of the file were empty.'
'This file will not be read into the database.') % filename)
except:
raise | [
"def",
"_createTimeSeriesObjects",
"(",
"self",
",",
"timeSeries",
",",
"filename",
")",
":",
"try",
":",
"# Determine number of value columns",
"valColumns",
"=",
"len",
"(",
"timeSeries",
"[",
"0",
"]",
"[",
"'values'",
"]",
")",
"# Create List of GSSHAPY TimeSeries objects",
"series",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"valColumns",
")",
":",
"ts",
"=",
"TimeSeries",
"(",
")",
"ts",
".",
"timeSeriesFile",
"=",
"self",
"series",
".",
"append",
"(",
"ts",
")",
"for",
"record",
"in",
"timeSeries",
":",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"record",
"[",
"'values'",
"]",
")",
":",
"# Create GSSHAPY TimeSeriesValue objects",
"tsVal",
"=",
"TimeSeriesValue",
"(",
"simTime",
"=",
"record",
"[",
"'time'",
"]",
",",
"value",
"=",
"value",
")",
"# Associate with appropriate TimeSeries object via the index",
"tsVal",
".",
"timeSeries",
"=",
"series",
"[",
"index",
"]",
"except",
"IndexError",
":",
"log",
".",
"warning",
"(",
"(",
"'%s was opened, but the contents of the file were empty.'",
"'This file will not be read into the database.'",
")",
"%",
"filename",
")",
"except",
":",
"raise"
] | Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method | [
"Create",
"GSSHAPY",
"TimeSeries",
"and",
"TimeSeriesValue",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/tim.py#L137-L164 | train |
vinci1it2000/schedula | schedula/utils/blue.py | Blueprint.extend | def extend(self, *blues, memo=None):
"""
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
"""
memo = {} if memo is None else memo
for blue in blues:
if isinstance(blue, Dispatcher):
blue = blue.blue(memo=memo)
for method, kwargs in blue.deferred:
getattr(self, method)(**kwargs)
return self | python | def extend(self, *blues, memo=None):
"""
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
"""
memo = {} if memo is None else memo
for blue in blues:
if isinstance(blue, Dispatcher):
blue = blue.blue(memo=memo)
for method, kwargs in blue.deferred:
getattr(self, method)(**kwargs)
return self | [
"def",
"extend",
"(",
"self",
",",
"*",
"blues",
",",
"memo",
"=",
"None",
")",
":",
"memo",
"=",
"{",
"}",
"if",
"memo",
"is",
"None",
"else",
"memo",
"for",
"blue",
"in",
"blues",
":",
"if",
"isinstance",
"(",
"blue",
",",
"Dispatcher",
")",
":",
"blue",
"=",
"blue",
".",
"blue",
"(",
"memo",
"=",
"memo",
")",
"for",
"method",
",",
"kwargs",
"in",
"blue",
".",
"deferred",
":",
"getattr",
"(",
"self",
",",
"method",
")",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self"
] | Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...> | [
"Extends",
"deferred",
"operations",
"calling",
"each",
"operation",
"of",
"given",
"Blueprints",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/blue.py#L89-L123 | train |
CI-WATER/gsshapy | gsshapy/orm/loc.py | OutputLocationFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Generic Output Location Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
self.numLocations = sline[0]
else:
# Create GSSHAPY OutputLocation object
location = OutputLocation(linkOrCellI=sline[0],
nodeOrCellJ=sline[1])
# Associate OutputLocation with OutputLocationFile
location.outputLocationFile = self | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Generic Output Location Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
self.numLocations = sline[0]
else:
# Create GSSHAPY OutputLocation object
location = OutputLocation(linkOrCellI=sline[0],
nodeOrCellJ=sline[1])
# Associate OutputLocation with OutputLocationFile
location.outputLocationFile = self | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Assign file extension attribute to file object",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"sline",
")",
"==",
"1",
":",
"self",
".",
"numLocations",
"=",
"sline",
"[",
"0",
"]",
"else",
":",
"# Create GSSHAPY OutputLocation object",
"location",
"=",
"OutputLocation",
"(",
"linkOrCellI",
"=",
"sline",
"[",
"0",
"]",
",",
"nodeOrCellJ",
"=",
"sline",
"[",
"1",
"]",
")",
"# Associate OutputLocation with OutputLocationFile",
"location",
".",
"outputLocationFile",
"=",
"self"
] | Generic Output Location Read from File Method | [
"Generic",
"Output",
"Location",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/loc.py#L61-L81 | train |
CI-WATER/gsshapy | gsshapy/orm/loc.py | OutputLocationFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Generic Output Location Write to File Method
"""
# Retrieve output locations
locations = self.outputLocations
# Write lines
openFile.write('%s\n' % self.numLocations)
for location in locations:
openFile.write('%s %s\n' % (location.linkOrCellI,
location.nodeOrCellJ)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Generic Output Location Write to File Method
"""
# Retrieve output locations
locations = self.outputLocations
# Write lines
openFile.write('%s\n' % self.numLocations)
for location in locations:
openFile.write('%s %s\n' % (location.linkOrCellI,
location.nodeOrCellJ)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve output locations",
"locations",
"=",
"self",
".",
"outputLocations",
"# Write lines",
"openFile",
".",
"write",
"(",
"'%s\\n'",
"%",
"self",
".",
"numLocations",
")",
"for",
"location",
"in",
"locations",
":",
"openFile",
".",
"write",
"(",
"'%s %s\\n'",
"%",
"(",
"location",
".",
"linkOrCellI",
",",
"location",
".",
"nodeOrCellJ",
")",
")"
] | Generic Output Location Write to File Method | [
"Generic",
"Output",
"Location",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/loc.py#L83-L95 | train |
vinci1it2000/schedula | schedula/utils/base.py | Base.web | def web(self, depth=-1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True):
"""
Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> webmap = dsp.web()
>>> print("Starting...\\n"); site = webmap.site().run(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected the
server is shutdown automatically.
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .web import WebMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
webmap = WebMap()
webmap.add_items(obj, workflow=False, depth=depth, **options)
if sites is not None:
import tempfile
directory = directory or tempfile.mkdtemp()
sites.add(webmap.site(directory, view=run))
return webmap | python | def web(self, depth=-1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True):
"""
Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> webmap = dsp.web()
>>> print("Starting...\\n"); site = webmap.site().run(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected the
server is shutdown automatically.
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .web import WebMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
webmap = WebMap()
webmap.add_items(obj, workflow=False, depth=depth, **options)
if sites is not None:
import tempfile
directory = directory or tempfile.mkdtemp()
sites.add(webmap.site(directory, view=run))
return webmap | [
"def",
"web",
"(",
"self",
",",
"depth",
"=",
"-",
"1",
",",
"node_data",
"=",
"NONE",
",",
"node_function",
"=",
"NONE",
",",
"directory",
"=",
"None",
",",
"sites",
"=",
"None",
",",
"run",
"=",
"True",
")",
":",
"options",
"=",
"{",
"'node_data'",
":",
"node_data",
",",
"'node_function'",
":",
"node_function",
"}",
"options",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"options",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"NONE",
"}",
"from",
".",
"web",
"import",
"WebMap",
"from",
".",
"sol",
"import",
"Solution",
"obj",
"=",
"self",
".",
"dsp",
"if",
"isinstance",
"(",
"self",
",",
"Solution",
")",
"else",
"self",
"webmap",
"=",
"WebMap",
"(",
")",
"webmap",
".",
"add_items",
"(",
"obj",
",",
"workflow",
"=",
"False",
",",
"depth",
"=",
"depth",
",",
"*",
"*",
"options",
")",
"if",
"sites",
"is",
"not",
"None",
":",
"import",
"tempfile",
"directory",
"=",
"directory",
"or",
"tempfile",
".",
"mkdtemp",
"(",
")",
"sites",
".",
"add",
"(",
"webmap",
".",
"site",
"(",
"directory",
",",
"view",
"=",
"run",
")",
")",
"return",
"webmap"
] | Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> webmap = dsp.web()
>>> print("Starting...\\n"); site = webmap.site().run(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected the
server is shutdown automatically. | [
"Creates",
"a",
"dispatcher",
"Flask",
"app",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/base.py#L27-L109 | train |
vinci1it2000/schedula | schedula/utils/base.py | Base.plot | def plot(self, workflow=None, view=True, depth=-1, name=NONE, comment=NONE,
format=NONE, engine=NONE, encoding=NONE, graph_attr=NONE,
node_attr=NONE, edge_attr=NONE, body=NONE, node_styles=NONE,
node_data=NONE, node_function=NONE, edge_data=NONE, max_lines=NONE,
max_width=NONE, directory=None, sites=None, index=False):
"""
Plots the Dispatcher with a graph in the DOT language with Graphviz.
:param workflow:
If True the latest solution will be plotted, otherwise the dmap.
:type workflow: bool, optional
:param view:
Open the rendered directed graph in the DOT language with the sys
default opener.
:type view: bool, optional
:param edge_data:
Edge attributes to view.
:type edge_data: tuple[str], optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param node_styles:
Default node styles according to graphviz node attributes.
:type node_styles: dict[str|Token, dict[str, str]]
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param name:
Graph name used in the source code.
:type name: str
:param comment:
Comment added to the first line of the source.
:type comment: str
:param directory:
(Sub)directory for source saving and rendering.
:type directory: str, optional
:param format:
Rendering output format ('pdf', 'png', ...).
:type format: str, optional
:param engine:
Layout command used ('dot', 'neato', ...).
:type engine: str, optional
:param encoding:
Encoding for saving the source.
:type encoding: str, optional
:param graph_attr:
Dict of (attribute, value) pairs for the graph.
:type graph_attr: dict, optional
:param node_attr:
Dict of (attribute, value) pairs set for all nodes.
:type node_attr: dict, optional
:param edge_attr:
Dict of (attribute, value) pairs set for all edges.
:type edge_attr: dict, optional
:param body:
Dict of (attribute, value) pairs to add to the graph body.
:type body: dict, optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param index:
Add the site index as first page?
:type index: bool, optional
:param max_lines:
Maximum number of lines for rendering node attributes.
:type max_lines: int, optional
:param max_width:
Maximum number of characters in a line to render node attributes.
:type max_width: int, optional
:param view:
Open the main page of the site?
:type view: bool, optional
:return:
A SiteMap.
:rtype: schedula.utils.drw.SiteMap
Example:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
>>> dsp.plot(view=False, graph_attr={'ratio': '1'})
SiteMap([(Dispatcher, SiteMap())])
"""
d = {
'name': name, 'comment': comment, 'format': format,
'engine': engine, 'encoding': encoding, 'graph_attr': graph_attr,
'node_attr': node_attr, 'edge_attr': edge_attr, 'body': body,
}
options = {
'digraph': {k: v for k, v in d.items() if v is not NONE} or NONE,
'node_styles': node_styles,
'node_data': node_data,
'node_function': node_function,
'edge_data': edge_data,
'max_lines': max_lines, # 5
'max_width': max_width, # 200
}
options = {k: v for k, v in options.items() if v is not NONE}
from .drw import SiteMap
from .sol import Solution
if workflow is None and isinstance(self, Solution):
workflow = True
else:
workflow = workflow or False
sitemap = SiteMap()
sitemap.add_items(self, workflow=workflow, depth=depth, **options)
if view:
import tempfile
directory = directory or tempfile.mkdtemp()
if sites is None:
sitemap.render(directory=directory, view=True, index=index)
else:
sites.add(sitemap.site(directory, view=True, index=index))
return sitemap | python | def plot(self, workflow=None, view=True, depth=-1, name=NONE, comment=NONE,
format=NONE, engine=NONE, encoding=NONE, graph_attr=NONE,
node_attr=NONE, edge_attr=NONE, body=NONE, node_styles=NONE,
node_data=NONE, node_function=NONE, edge_data=NONE, max_lines=NONE,
max_width=NONE, directory=None, sites=None, index=False):
"""
Plots the Dispatcher with a graph in the DOT language with Graphviz.
:param workflow:
If True the latest solution will be plotted, otherwise the dmap.
:type workflow: bool, optional
:param view:
Open the rendered directed graph in the DOT language with the sys
default opener.
:type view: bool, optional
:param edge_data:
Edge attributes to view.
:type edge_data: tuple[str], optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param node_styles:
Default node styles according to graphviz node attributes.
:type node_styles: dict[str|Token, dict[str, str]]
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param name:
Graph name used in the source code.
:type name: str
:param comment:
Comment added to the first line of the source.
:type comment: str
:param directory:
(Sub)directory for source saving and rendering.
:type directory: str, optional
:param format:
Rendering output format ('pdf', 'png', ...).
:type format: str, optional
:param engine:
Layout command used ('dot', 'neato', ...).
:type engine: str, optional
:param encoding:
Encoding for saving the source.
:type encoding: str, optional
:param graph_attr:
Dict of (attribute, value) pairs for the graph.
:type graph_attr: dict, optional
:param node_attr:
Dict of (attribute, value) pairs set for all nodes.
:type node_attr: dict, optional
:param edge_attr:
Dict of (attribute, value) pairs set for all edges.
:type edge_attr: dict, optional
:param body:
Dict of (attribute, value) pairs to add to the graph body.
:type body: dict, optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param index:
Add the site index as first page?
:type index: bool, optional
:param max_lines:
Maximum number of lines for rendering node attributes.
:type max_lines: int, optional
:param max_width:
Maximum number of characters in a line to render node attributes.
:type max_width: int, optional
:param view:
Open the main page of the site?
:type view: bool, optional
:return:
A SiteMap.
:rtype: schedula.utils.drw.SiteMap
Example:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
>>> dsp.plot(view=False, graph_attr={'ratio': '1'})
SiteMap([(Dispatcher, SiteMap())])
"""
d = {
'name': name, 'comment': comment, 'format': format,
'engine': engine, 'encoding': encoding, 'graph_attr': graph_attr,
'node_attr': node_attr, 'edge_attr': edge_attr, 'body': body,
}
options = {
'digraph': {k: v for k, v in d.items() if v is not NONE} or NONE,
'node_styles': node_styles,
'node_data': node_data,
'node_function': node_function,
'edge_data': edge_data,
'max_lines': max_lines, # 5
'max_width': max_width, # 200
}
options = {k: v for k, v in options.items() if v is not NONE}
from .drw import SiteMap
from .sol import Solution
if workflow is None and isinstance(self, Solution):
workflow = True
else:
workflow = workflow or False
sitemap = SiteMap()
sitemap.add_items(self, workflow=workflow, depth=depth, **options)
if view:
import tempfile
directory = directory or tempfile.mkdtemp()
if sites is None:
sitemap.render(directory=directory, view=True, index=index)
else:
sites.add(sitemap.site(directory, view=True, index=index))
return sitemap | [
"def",
"plot",
"(",
"self",
",",
"workflow",
"=",
"None",
",",
"view",
"=",
"True",
",",
"depth",
"=",
"-",
"1",
",",
"name",
"=",
"NONE",
",",
"comment",
"=",
"NONE",
",",
"format",
"=",
"NONE",
",",
"engine",
"=",
"NONE",
",",
"encoding",
"=",
"NONE",
",",
"graph_attr",
"=",
"NONE",
",",
"node_attr",
"=",
"NONE",
",",
"edge_attr",
"=",
"NONE",
",",
"body",
"=",
"NONE",
",",
"node_styles",
"=",
"NONE",
",",
"node_data",
"=",
"NONE",
",",
"node_function",
"=",
"NONE",
",",
"edge_data",
"=",
"NONE",
",",
"max_lines",
"=",
"NONE",
",",
"max_width",
"=",
"NONE",
",",
"directory",
"=",
"None",
",",
"sites",
"=",
"None",
",",
"index",
"=",
"False",
")",
":",
"d",
"=",
"{",
"'name'",
":",
"name",
",",
"'comment'",
":",
"comment",
",",
"'format'",
":",
"format",
",",
"'engine'",
":",
"engine",
",",
"'encoding'",
":",
"encoding",
",",
"'graph_attr'",
":",
"graph_attr",
",",
"'node_attr'",
":",
"node_attr",
",",
"'edge_attr'",
":",
"edge_attr",
",",
"'body'",
":",
"body",
",",
"}",
"options",
"=",
"{",
"'digraph'",
":",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"NONE",
"}",
"or",
"NONE",
",",
"'node_styles'",
":",
"node_styles",
",",
"'node_data'",
":",
"node_data",
",",
"'node_function'",
":",
"node_function",
",",
"'edge_data'",
":",
"edge_data",
",",
"'max_lines'",
":",
"max_lines",
",",
"# 5",
"'max_width'",
":",
"max_width",
",",
"# 200",
"}",
"options",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"options",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"NONE",
"}",
"from",
".",
"drw",
"import",
"SiteMap",
"from",
".",
"sol",
"import",
"Solution",
"if",
"workflow",
"is",
"None",
"and",
"isinstance",
"(",
"self",
",",
"Solution",
")",
":",
"workflow",
"=",
"True",
"else",
":",
"workflow",
"=",
"workflow",
"or",
"False",
"sitemap",
"=",
"SiteMap",
"(",
")",
"sitemap",
".",
"add_items",
"(",
"self",
",",
"workflow",
"=",
"workflow",
",",
"depth",
"=",
"depth",
",",
"*",
"*",
"options",
")",
"if",
"view",
":",
"import",
"tempfile",
"directory",
"=",
"directory",
"or",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"sites",
"is",
"None",
":",
"sitemap",
".",
"render",
"(",
"directory",
"=",
"directory",
",",
"view",
"=",
"True",
",",
"index",
"=",
"index",
")",
"else",
":",
"sites",
".",
"add",
"(",
"sitemap",
".",
"site",
"(",
"directory",
",",
"view",
"=",
"True",
",",
"index",
"=",
"index",
")",
")",
"return",
"sitemap"
] | Plots the Dispatcher with a graph in the DOT language with Graphviz.
:param workflow:
If True the latest solution will be plotted, otherwise the dmap.
:type workflow: bool, optional
:param view:
Open the rendered directed graph in the DOT language with the sys
default opener.
:type view: bool, optional
:param edge_data:
Edge attributes to view.
:type edge_data: tuple[str], optional
:param node_data:
Data node attributes to view.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes to view.
:type node_function: tuple[str], optional
:param node_styles:
Default node styles according to graphviz node attributes.
:type node_styles: dict[str|Token, dict[str, str]]
:param depth:
Depth of sub-dispatch plots. If negative all levels are plotted.
:type depth: int, optional
:param name:
Graph name used in the source code.
:type name: str
:param comment:
Comment added to the first line of the source.
:type comment: str
:param directory:
(Sub)directory for source saving and rendering.
:type directory: str, optional
:param format:
Rendering output format ('pdf', 'png', ...).
:type format: str, optional
:param engine:
Layout command used ('dot', 'neato', ...).
:type engine: str, optional
:param encoding:
Encoding for saving the source.
:type encoding: str, optional
:param graph_attr:
Dict of (attribute, value) pairs for the graph.
:type graph_attr: dict, optional
:param node_attr:
Dict of (attribute, value) pairs set for all nodes.
:type node_attr: dict, optional
:param edge_attr:
Dict of (attribute, value) pairs set for all edges.
:type edge_attr: dict, optional
:param body:
Dict of (attribute, value) pairs to add to the graph body.
:type body: dict, optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param index:
Add the site index as first page?
:type index: bool, optional
:param max_lines:
Maximum number of lines for rendering node attributes.
:type max_lines: int, optional
:param max_width:
Maximum number of characters in a line to render node attributes.
:type max_width: int, optional
:param view:
Open the main page of the site?
:type view: bool, optional
:return:
A SiteMap.
:rtype: schedula.utils.drw.SiteMap
Example:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
>>> dsp.plot(view=False, graph_attr={'ratio': '1'})
SiteMap([(Dispatcher, SiteMap())]) | [
"Plots",
"the",
"Dispatcher",
"with",
"a",
"graph",
"in",
"the",
"DOT",
"language",
"with",
"Graphviz",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/base.py#L111-L265 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/base.py | Resource._api_get | def _api_get(self, url, **kwargs):
"""
A convenience wrapper for _get. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
return self._get(**kwargs) | python | def _api_get(self, url, **kwargs):
"""
A convenience wrapper for _get. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
return self._get(**kwargs) | [
"def",
"_api_get",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"+",
"url",
"kwargs",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"headers",
"=",
"deepcopy",
"(",
"self",
".",
"headers",
")",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"return",
"self",
".",
"_get",
"(",
"*",
"*",
"kwargs",
")"
] | A convenience wrapper for _get. Adds headers, auth and base url by
default | [
"A",
"convenience",
"wrapper",
"for",
"_get",
".",
"Adds",
"headers",
"auth",
"and",
"base",
"url",
"by",
"default"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L36-L47 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/base.py | Resource._api_put | def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._put(**kwargs) | python | def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._put(**kwargs) | [
"def",
"_api_put",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"+",
"url",
"kwargs",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"headers",
"=",
"deepcopy",
"(",
"self",
".",
"headers",
")",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"self",
".",
"_put",
"(",
"*",
"*",
"kwargs",
")"
] | A convenience wrapper for _put. Adds headers, auth and base url by
default | [
"A",
"convenience",
"wrapper",
"for",
"_put",
".",
"Adds",
"headers",
"auth",
"and",
"base",
"url",
"by",
"default"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L62-L73 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/base.py | Resource._api_post | def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._post(**kwargs) | python | def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._post(**kwargs) | [
"def",
"_api_post",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"+",
"url",
"kwargs",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"headers",
"=",
"deepcopy",
"(",
"self",
".",
"headers",
")",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"self",
".",
"_post",
"(",
"*",
"*",
"kwargs",
")"
] | A convenience wrapper for _post. Adds headers, auth and base url by
default | [
"A",
"convenience",
"wrapper",
"for",
"_post",
".",
"Adds",
"headers",
"auth",
"and",
"base",
"url",
"by",
"default"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L87-L98 | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/base.py | Resource._api_delete | def _api_delete(self, url, **kwargs):
"""
A convenience wrapper for _delete. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._delete(**kwargs) | python | def _api_delete(self, url, **kwargs):
"""
A convenience wrapper for _delete. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._delete(**kwargs) | [
"def",
"_api_delete",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"+",
"url",
"kwargs",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"headers",
"=",
"deepcopy",
"(",
"self",
".",
"headers",
")",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"self",
".",
"_delete",
"(",
"*",
"*",
"kwargs",
")"
] | A convenience wrapper for _delete. Adds headers, auth and base url by
default | [
"A",
"convenience",
"wrapper",
"for",
"_delete",
".",
"Adds",
"headers",
"auth",
"and",
"base",
"url",
"by",
"default"
] | ff65054115f19991da153f0e4f4e45e526545fea | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L112-L123 | train |
CI-WATER/gsshapy | gsshapy/base/rast.py | RasterObjectBase.getAsKmlGrid | def getAsKmlGrid(self, session, path=None, documentName=None, colorRamp=ColorRampEnum.COLOR_RAMP_HUE, alpha=1.0,
noDataValue=None):
"""
Retrieve the raster as a KML document with each cell of the raster represented as a vector polygon. The result
is a vector grid of raster cells. Cells with the no data value are excluded.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
"""
if type(self.raster) != type(None):
# Set Document Name
if documentName is None:
try:
documentName = self.filename
except AttributeError:
documentName = 'default'
# Set no data value to default
if noDataValue is None:
noDataValue = self.defaultNoDataValue
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
kmlString = converter.getAsKmlGrid(tableName=self.tableName,
rasterId=self.id,
rasterIdFieldName='id',
rasterFieldName=self.rasterColumnName,
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue,
discreet=self.discreet)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString | python | def getAsKmlGrid(self, session, path=None, documentName=None, colorRamp=ColorRampEnum.COLOR_RAMP_HUE, alpha=1.0,
noDataValue=None):
"""
Retrieve the raster as a KML document with each cell of the raster represented as a vector polygon. The result
is a vector grid of raster cells. Cells with the no data value are excluded.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
"""
if type(self.raster) != type(None):
# Set Document Name
if documentName is None:
try:
documentName = self.filename
except AttributeError:
documentName = 'default'
# Set no data value to default
if noDataValue is None:
noDataValue = self.defaultNoDataValue
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
kmlString = converter.getAsKmlGrid(tableName=self.tableName,
rasterId=self.id,
rasterIdFieldName='id',
rasterFieldName=self.rasterColumnName,
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue,
discreet=self.discreet)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString | [
"def",
"getAsKmlGrid",
"(",
"self",
",",
"session",
",",
"path",
"=",
"None",
",",
"documentName",
"=",
"None",
",",
"colorRamp",
"=",
"ColorRampEnum",
".",
"COLOR_RAMP_HUE",
",",
"alpha",
"=",
"1.0",
",",
"noDataValue",
"=",
"None",
")",
":",
"if",
"type",
"(",
"self",
".",
"raster",
")",
"!=",
"type",
"(",
"None",
")",
":",
"# Set Document Name",
"if",
"documentName",
"is",
"None",
":",
"try",
":",
"documentName",
"=",
"self",
".",
"filename",
"except",
"AttributeError",
":",
"documentName",
"=",
"'default'",
"# Set no data value to default",
"if",
"noDataValue",
"is",
"None",
":",
"noDataValue",
"=",
"self",
".",
"defaultNoDataValue",
"# Make sure the raster field is valid",
"converter",
"=",
"RasterConverter",
"(",
"sqlAlchemyEngineOrSession",
"=",
"session",
")",
"# Configure color ramp",
"if",
"isinstance",
"(",
"colorRamp",
",",
"dict",
")",
":",
"converter",
".",
"setCustomColorRamp",
"(",
"colorRamp",
"[",
"'colors'",
"]",
",",
"colorRamp",
"[",
"'interpolatedPoints'",
"]",
")",
"else",
":",
"converter",
".",
"setDefaultColorRamp",
"(",
"colorRamp",
")",
"kmlString",
"=",
"converter",
".",
"getAsKmlGrid",
"(",
"tableName",
"=",
"self",
".",
"tableName",
",",
"rasterId",
"=",
"self",
".",
"id",
",",
"rasterIdFieldName",
"=",
"'id'",
",",
"rasterFieldName",
"=",
"self",
".",
"rasterColumnName",
",",
"documentName",
"=",
"documentName",
",",
"alpha",
"=",
"alpha",
",",
"noDataValue",
"=",
"noDataValue",
",",
"discreet",
"=",
"self",
".",
"discreet",
")",
"if",
"path",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"kmlString",
")",
"return",
"kmlString"
] | Retrieve the raster as a KML document with each cell of the raster represented as a vector polygon. The result
is a vector grid of raster cells. Cells with the no data value are excluded.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string | [
"Retrieve",
"the",
"raster",
"as",
"a",
"KML",
"document",
"with",
"each",
"cell",
"of",
"the",
"raster",
"represented",
"as",
"a",
"vector",
"polygon",
".",
"The",
"result",
"is",
"a",
"vector",
"grid",
"of",
"raster",
"cells",
".",
"Cells",
"with",
"the",
"no",
"data",
"value",
"are",
"excluded",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/rast.py#L33-L91 | train |
CI-WATER/gsshapy | gsshapy/base/rast.py | RasterObjectBase.getAsGrassAsciiGrid | def getAsGrassAsciiGrid(self, session):
"""
Retrieve the raster in the GRASS ASCII Grid format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
Returns:
str: GRASS ASCII string.
"""
if type(self.raster) != type(None):
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
return converter.getAsGrassAsciiRaster(tableName=self.tableName,
rasterIdFieldName='id',
rasterId=self.id,
rasterFieldName=self.rasterColumnName) | python | def getAsGrassAsciiGrid(self, session):
"""
Retrieve the raster in the GRASS ASCII Grid format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
Returns:
str: GRASS ASCII string.
"""
if type(self.raster) != type(None):
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
return converter.getAsGrassAsciiRaster(tableName=self.tableName,
rasterIdFieldName='id',
rasterId=self.id,
rasterFieldName=self.rasterColumnName) | [
"def",
"getAsGrassAsciiGrid",
"(",
"self",
",",
"session",
")",
":",
"if",
"type",
"(",
"self",
".",
"raster",
")",
"!=",
"type",
"(",
"None",
")",
":",
"# Make sure the raster field is valid",
"converter",
"=",
"RasterConverter",
"(",
"sqlAlchemyEngineOrSession",
"=",
"session",
")",
"return",
"converter",
".",
"getAsGrassAsciiRaster",
"(",
"tableName",
"=",
"self",
".",
"tableName",
",",
"rasterIdFieldName",
"=",
"'id'",
",",
"rasterId",
"=",
"self",
".",
"id",
",",
"rasterFieldName",
"=",
"self",
".",
"rasterColumnName",
")"
] | Retrieve the raster in the GRASS ASCII Grid format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
Returns:
str: GRASS ASCII string. | [
"Retrieve",
"the",
"raster",
"in",
"the",
"GRASS",
"ASCII",
"Grid",
"format",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/rast.py#L229-L246 | train |
pedrotgn/pyactor | pyactor/context.py | shutdown | def shutdown(url=None):
'''
Stops the Host passed by parameter or all of them if none is
specified, stopping at the same time all its actors.
Should be called at the end of its usage, to finish correctly
all the connections and threads.
'''
if url is None:
for host in util.hosts.values():
host.shutdown()
global core_type
core_type = None
else:
host = util.hosts[url]
host.shutdown() | python | def shutdown(url=None):
'''
Stops the Host passed by parameter or all of them if none is
specified, stopping at the same time all its actors.
Should be called at the end of its usage, to finish correctly
all the connections and threads.
'''
if url is None:
for host in util.hosts.values():
host.shutdown()
global core_type
core_type = None
else:
host = util.hosts[url]
host.shutdown() | [
"def",
"shutdown",
"(",
"url",
"=",
"None",
")",
":",
"if",
"url",
"is",
"None",
":",
"for",
"host",
"in",
"util",
".",
"hosts",
".",
"values",
"(",
")",
":",
"host",
".",
"shutdown",
"(",
")",
"global",
"core_type",
"core_type",
"=",
"None",
"else",
":",
"host",
"=",
"util",
".",
"hosts",
"[",
"url",
"]",
"host",
".",
"shutdown",
"(",
")"
] | Stops the Host passed by parameter or all of them if none is
specified, stopping at the same time all its actors.
Should be called at the end of its usage, to finish correctly
all the connections and threads. | [
"Stops",
"the",
"Host",
"passed",
"by",
"parameter",
"or",
"all",
"of",
"them",
"if",
"none",
"is",
"specified",
"stopping",
"at",
"the",
"same",
"time",
"all",
"its",
"actors",
".",
"Should",
"be",
"called",
"at",
"the",
"end",
"of",
"its",
"usage",
"to",
"finish",
"correctly",
"all",
"the",
"connections",
"and",
"threads",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L483-L497 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.