Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,700 | Galarzaa90/tibia.py | tibiapy/world.py | World._parse_world_info | def _parse_world_info(self, world_info_table):
"""
Parses the World Information table from Tibia.com and adds the found values to the object.
Parameters
----------
world_info_table: :class:`list`[:class:`bs4.Tag`]
"""
world_info = {}
for row in world_info_table:
cols_raw = row.find_all('td')
cols = [ele.text.strip() for ele in cols_raw]
field, value = cols
field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower()
value = value.replace("\xa0", " ")
world_info[field] = value
try:
self.online_count = int(world_info.pop("players_online"))
except KeyError:
self.online_count = 0
self.location = try_enum(WorldLocation, world_info.pop("location"))
self.pvp_type = try_enum(PvpType, world_info.pop("pvp_type"))
self.transfer_type = try_enum(TransferType, world_info.pop("transfer_type", None), TransferType.REGULAR)
m = record_regexp.match(world_info.pop("online_record"))
if m:
self.record_count = int(m.group("count"))
self.record_date = parse_tibia_datetime(m.group("date"))
if "world_quest_titles" in world_info:
self.world_quest_titles = [q.strip() for q in world_info.pop("world_quest_titles").split(",")]
self.experimental = world_info.pop("game_world_type") != "Regular"
self._parse_battleye_status(world_info.pop("battleye_status"))
self.premium_only = "premium_type" in world_info
month, year = world_info.pop("creation_date").split("/")
month = int(month)
year = int(year)
if year > 90:
year += 1900
else:
year += 2000
self.creation_date = "%d-%02d" % (year, month)
for k, v in world_info.items():
try:
setattr(self, k, v)
except AttributeError:
pass | python | def _parse_world_info(self, world_info_table):
"""
Parses the World Information table from Tibia.com and adds the found values to the object.
Parameters
----------
world_info_table: :class:`list`[:class:`bs4.Tag`]
"""
world_info = {}
for row in world_info_table:
cols_raw = row.find_all('td')
cols = [ele.text.strip() for ele in cols_raw]
field, value = cols
field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower()
value = value.replace("\xa0", " ")
world_info[field] = value
try:
self.online_count = int(world_info.pop("players_online"))
except KeyError:
self.online_count = 0
self.location = try_enum(WorldLocation, world_info.pop("location"))
self.pvp_type = try_enum(PvpType, world_info.pop("pvp_type"))
self.transfer_type = try_enum(TransferType, world_info.pop("transfer_type", None), TransferType.REGULAR)
m = record_regexp.match(world_info.pop("online_record"))
if m:
self.record_count = int(m.group("count"))
self.record_date = parse_tibia_datetime(m.group("date"))
if "world_quest_titles" in world_info:
self.world_quest_titles = [q.strip() for q in world_info.pop("world_quest_titles").split(",")]
self.experimental = world_info.pop("game_world_type") != "Regular"
self._parse_battleye_status(world_info.pop("battleye_status"))
self.premium_only = "premium_type" in world_info
month, year = world_info.pop("creation_date").split("/")
month = int(month)
year = int(year)
if year > 90:
year += 1900
else:
year += 2000
self.creation_date = "%d-%02d" % (year, month)
for k, v in world_info.items():
try:
setattr(self, k, v)
except AttributeError:
pass | ['def', '_parse_world_info', '(', 'self', ',', 'world_info_table', ')', ':', 'world_info', '=', '{', '}', 'for', 'row', 'in', 'world_info_table', ':', 'cols_raw', '=', 'row', '.', 'find_all', '(', "'td'", ')', 'cols', '=', '[', 'ele', '.', 'text', '.', 'strip', '(', ')', 'for', 'ele', 'in', 'cols_raw', ']', 'field', ',', 'value', '=', 'cols', 'field', '=', 'field', '.', 'replace', '(', '"\\xa0"', ',', '"_"', ')', '.', 'replace', '(', '" "', ',', '"_"', ')', '.', 'replace', '(', '":"', ',', '""', ')', '.', 'lower', '(', ')', 'value', '=', 'value', '.', 'replace', '(', '"\\xa0"', ',', '" "', ')', 'world_info', '[', 'field', ']', '=', 'value', 'try', ':', 'self', '.', 'online_count', '=', 'int', '(', 'world_info', '.', 'pop', '(', '"players_online"', ')', ')', 'except', 'KeyError', ':', 'self', '.', 'online_count', '=', '0', 'self', '.', 'location', '=', 'try_enum', '(', 'WorldLocation', ',', 'world_info', '.', 'pop', '(', '"location"', ')', ')', 'self', '.', 'pvp_type', '=', 'try_enum', '(', 'PvpType', ',', 'world_info', '.', 'pop', '(', '"pvp_type"', ')', ')', 'self', '.', 'transfer_type', '=', 'try_enum', '(', 'TransferType', ',', 'world_info', '.', 'pop', '(', '"transfer_type"', ',', 'None', ')', ',', 'TransferType', '.', 'REGULAR', ')', 'm', '=', 'record_regexp', '.', 'match', '(', 'world_info', '.', 'pop', '(', '"online_record"', ')', ')', 'if', 'm', ':', 'self', '.', 'record_count', '=', 'int', '(', 'm', '.', 'group', '(', '"count"', ')', ')', 'self', '.', 'record_date', '=', 'parse_tibia_datetime', '(', 'm', '.', 'group', '(', '"date"', ')', ')', 'if', '"world_quest_titles"', 'in', 'world_info', ':', 'self', '.', 'world_quest_titles', '=', '[', 'q', '.', 'strip', '(', ')', 'for', 'q', 'in', 'world_info', '.', 'pop', '(', '"world_quest_titles"', ')', '.', 'split', '(', '","', ')', ']', 'self', '.', 'experimental', '=', 'world_info', '.', 'pop', '(', '"game_world_type"', ')', '!=', '"Regular"', 'self', '.', '_parse_battleye_status', '(', 'world_info', '.', 'pop', '(', '"battleye_status"', ')', ')', 'self', '.', 'premium_only', '=', '"premium_type"', 'in', 'world_info', 'month', ',', 'year', '=', 'world_info', '.', 'pop', '(', '"creation_date"', ')', '.', 'split', '(', '"/"', ')', 'month', '=', 'int', '(', 'month', ')', 'year', '=', 'int', '(', 'year', ')', 'if', 'year', '>', '90', ':', 'year', '+=', '1900', 'else', ':', 'year', '+=', '2000', 'self', '.', 'creation_date', '=', '"%d-%02d"', '%', '(', 'year', ',', 'month', ')', 'for', 'k', ',', 'v', 'in', 'world_info', '.', 'items', '(', ')', ':', 'try', ':', 'setattr', '(', 'self', ',', 'k', ',', 'v', ')', 'except', 'AttributeError', ':', 'pass'] | Parses the World Information table from Tibia.com and adds the found values to the object.
Parameters
----------
world_info_table: :class:`list`[:class:`bs4.Tag`] | ['Parses', 'the', 'World', 'Information', 'table', 'from', 'Tibia', '.', 'com', 'and', 'adds', 'the', 'found', 'values', 'to', 'the', 'object', '.'] | train | https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/world.py#L306-L352 |
7,701 | PMEAL/OpenPNM | openpnm/materials/VoronoiFibers.py | DelaunayGeometry._rotate_and_chop | def _rotate_and_chop(self, verts, normal, axis=[0, 0, 1]):
r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing
"""
xaxis = [1, 0, 0]
yaxis = [0, 1, 0]
zaxis = [0, 0, 1]
angle = tr.angle_between_vectors(normal, axis)
if angle == 0.0 or angle == np.pi:
# We are already aligned
facet = verts
else:
M = tr.rotation_matrix(tr.angle_between_vectors(normal, axis),
tr.vector_product(normal, axis))
try:
facet = np.dot(verts, M[:3, :3].T)
except ValueError:
pass
try:
x = facet[:, 0]
y = facet[:, 1]
z = facet[:, 2]
except IndexError:
x = facet[0]
y = facet[1]
z = facet[2]
# Work out span of points and set axes scales to cover this and be
# equal in both dimensions
if axis == xaxis:
output = np.column_stack((y, z))
elif axis == yaxis:
output = np.column_stack((x, z))
elif axis == zaxis:
output = np.column_stack((x, y))
else:
output = facet
return output | python | def _rotate_and_chop(self, verts, normal, axis=[0, 0, 1]):
r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing
"""
xaxis = [1, 0, 0]
yaxis = [0, 1, 0]
zaxis = [0, 0, 1]
angle = tr.angle_between_vectors(normal, axis)
if angle == 0.0 or angle == np.pi:
# We are already aligned
facet = verts
else:
M = tr.rotation_matrix(tr.angle_between_vectors(normal, axis),
tr.vector_product(normal, axis))
try:
facet = np.dot(verts, M[:3, :3].T)
except ValueError:
pass
try:
x = facet[:, 0]
y = facet[:, 1]
z = facet[:, 2]
except IndexError:
x = facet[0]
y = facet[1]
z = facet[2]
# Work out span of points and set axes scales to cover this and be
# equal in both dimensions
if axis == xaxis:
output = np.column_stack((y, z))
elif axis == yaxis:
output = np.column_stack((x, z))
elif axis == zaxis:
output = np.column_stack((x, y))
else:
output = facet
return output | ['def', '_rotate_and_chop', '(', 'self', ',', 'verts', ',', 'normal', ',', 'axis', '=', '[', '0', ',', '0', ',', '1', ']', ')', ':', 'xaxis', '=', '[', '1', ',', '0', ',', '0', ']', 'yaxis', '=', '[', '0', ',', '1', ',', '0', ']', 'zaxis', '=', '[', '0', ',', '0', ',', '1', ']', 'angle', '=', 'tr', '.', 'angle_between_vectors', '(', 'normal', ',', 'axis', ')', 'if', 'angle', '==', '0.0', 'or', 'angle', '==', 'np', '.', 'pi', ':', '# We are already aligned', 'facet', '=', 'verts', 'else', ':', 'M', '=', 'tr', '.', 'rotation_matrix', '(', 'tr', '.', 'angle_between_vectors', '(', 'normal', ',', 'axis', ')', ',', 'tr', '.', 'vector_product', '(', 'normal', ',', 'axis', ')', ')', 'try', ':', 'facet', '=', 'np', '.', 'dot', '(', 'verts', ',', 'M', '[', ':', '3', ',', ':', '3', ']', '.', 'T', ')', 'except', 'ValueError', ':', 'pass', 'try', ':', 'x', '=', 'facet', '[', ':', ',', '0', ']', 'y', '=', 'facet', '[', ':', ',', '1', ']', 'z', '=', 'facet', '[', ':', ',', '2', ']', 'except', 'IndexError', ':', 'x', '=', 'facet', '[', '0', ']', 'y', '=', 'facet', '[', '1', ']', 'z', '=', 'facet', '[', '2', ']', '# Work out span of points and set axes scales to cover this and be', '# equal in both dimensions', 'if', 'axis', '==', 'xaxis', ':', 'output', '=', 'np', '.', 'column_stack', '(', '(', 'y', ',', 'z', ')', ')', 'elif', 'axis', '==', 'yaxis', ':', 'output', '=', 'np', '.', 'column_stack', '(', '(', 'x', ',', 'z', ')', ')', 'elif', 'axis', '==', 'zaxis', ':', 'output', '=', 'np', '.', 'column_stack', '(', '(', 'x', ',', 'y', ')', ')', 'else', ':', 'output', '=', 'facet', 'return', 'output'] | r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing | ['r', 'Method', 'to', 'rotate', 'a', 'set', 'of', 'vertices', '(', 'or', 'coords', ')', 'to', 'align', 'with', 'an', 'axis', 'points', 'must', 'be', 'coplanar', 'and', 'normal', 'must', 'be', 'given', 'Chops', 'axis', 'coord', 'to', 'give', 'vertices', 'back', 'in', '2D', 'Used', 'to', 'prepare', 'verts', 'for', 'printing', 'or', 'calculating', 'convex', 'hull', 'in', 'order', 'to', 'arrange', 'them', 'in', 'hull', 'order', 'for', 'calculations', 'and', 'printing'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/materials/VoronoiFibers.py#L955-L995 |
7,702 | hbldh/pybankid | bankid/certutils.py | split_certificate | def split_certificate(certificate_path, destination_folder, password=None):
"""Splits a PKCS12 certificate into Base64-encoded DER certificate and key.
This method splits a potentially password-protected
`PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate
(format ``.p12`` or ``.pfx``) into one certificate and one key part, both in
`pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_
format.
:returns: Tuple of certificate and key string data.
:rtype: tuple
"""
try:
# Attempt Linux and Darwin call first.
p = subprocess.Popen(
["openssl", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
sout, serr = p.communicate()
openssl_executable_version = sout.decode().lower()
if not (
openssl_executable_version.startswith("openssl")
or openssl_executable_version.startswith("libressl")
):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "openssl"
except Exception:
# Attempt to call on standard Git for Windows path.
p = subprocess.Popen(
["C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe", "version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sout, serr = p.communicate()
if not sout.decode().lower().startswith("openssl"):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe"
if not os.path.exists(os.path.abspath(os.path.expanduser(destination_folder))):
os.makedirs(os.path.abspath(os.path.expanduser(destination_folder)))
# Paths to output files.
out_cert_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "certificate.pem"
)
out_key_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "key.pem"
)
# Use openssl for converting to pem format.
pipeline_1 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_cert_path),
"-clcerts",
"-nokeys",
]
p = subprocess.Popen(
list(filter(None, pipeline_1)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
pipeline_2 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_key_path),
"-nocerts",
"-nodes",
]
p = subprocess.Popen(
list(filter(None, pipeline_2)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
# Return path tuples.
return out_cert_path, out_key_path | python | def split_certificate(certificate_path, destination_folder, password=None):
"""Splits a PKCS12 certificate into Base64-encoded DER certificate and key.
This method splits a potentially password-protected
`PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate
(format ``.p12`` or ``.pfx``) into one certificate and one key part, both in
`pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_
format.
:returns: Tuple of certificate and key string data.
:rtype: tuple
"""
try:
# Attempt Linux and Darwin call first.
p = subprocess.Popen(
["openssl", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
sout, serr = p.communicate()
openssl_executable_version = sout.decode().lower()
if not (
openssl_executable_version.startswith("openssl")
or openssl_executable_version.startswith("libressl")
):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "openssl"
except Exception:
# Attempt to call on standard Git for Windows path.
p = subprocess.Popen(
["C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe", "version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sout, serr = p.communicate()
if not sout.decode().lower().startswith("openssl"):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe"
if not os.path.exists(os.path.abspath(os.path.expanduser(destination_folder))):
os.makedirs(os.path.abspath(os.path.expanduser(destination_folder)))
# Paths to output files.
out_cert_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "certificate.pem"
)
out_key_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "key.pem"
)
# Use openssl for converting to pem format.
pipeline_1 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_cert_path),
"-clcerts",
"-nokeys",
]
p = subprocess.Popen(
list(filter(None, pipeline_1)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
pipeline_2 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_key_path),
"-nocerts",
"-nodes",
]
p = subprocess.Popen(
list(filter(None, pipeline_2)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
# Return path tuples.
return out_cert_path, out_key_path | ['def', 'split_certificate', '(', 'certificate_path', ',', 'destination_folder', ',', 'password', '=', 'None', ')', ':', 'try', ':', '# Attempt Linux and Darwin call first.', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', '"openssl"', ',', '"version"', ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'sout', ',', 'serr', '=', 'p', '.', 'communicate', '(', ')', 'openssl_executable_version', '=', 'sout', '.', 'decode', '(', ')', '.', 'lower', '(', ')', 'if', 'not', '(', 'openssl_executable_version', '.', 'startswith', '(', '"openssl"', ')', 'or', 'openssl_executable_version', '.', 'startswith', '(', '"libressl"', ')', ')', ':', 'raise', 'BankIDError', '(', '"OpenSSL executable could not be found. "', '"Splitting cannot be performed."', ')', 'openssl_executable', '=', '"openssl"', 'except', 'Exception', ':', '# Attempt to call on standard Git for Windows path.', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', '"C:\\\\Program Files\\\\Git\\\\mingw64\\\\bin\\\\openssl.exe"', ',', '"version"', ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', ')', 'sout', ',', 'serr', '=', 'p', '.', 'communicate', '(', ')', 'if', 'not', 'sout', '.', 'decode', '(', ')', '.', 'lower', '(', ')', '.', 'startswith', '(', '"openssl"', ')', ':', 'raise', 'BankIDError', '(', '"OpenSSL executable could not be found. "', '"Splitting cannot be performed."', ')', 'openssl_executable', '=', '"C:\\\\Program Files\\\\Git\\\\mingw64\\\\bin\\\\openssl.exe"', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'destination_folder', ')', ')', ')', ':', 'os', '.', 'makedirs', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'destination_folder', ')', ')', ')', '# Paths to output files.', 'out_cert_path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'destination_folder', ')', ')', ',', '"certificate.pem"', ')', 'out_key_path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'destination_folder', ')', ')', ',', '"key.pem"', ')', '# Use openssl for converting to pem format.', 'pipeline_1', '=', '[', 'openssl_executable', ',', '"pkcs12"', ',', '"-in"', ',', '"{0}"', '.', 'format', '(', 'certificate_path', ')', ',', '"-passin"', 'if', 'password', 'is', 'not', 'None', 'else', '""', ',', '"pass:{0}"', '.', 'format', '(', 'password', ')', 'if', 'password', 'is', 'not', 'None', 'else', '""', ',', '"-out"', ',', '"{0}"', '.', 'format', '(', 'out_cert_path', ')', ',', '"-clcerts"', ',', '"-nokeys"', ',', ']', 'p', '=', 'subprocess', '.', 'Popen', '(', 'list', '(', 'filter', '(', 'None', ',', 'pipeline_1', ')', ')', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'p', '.', 'communicate', '(', ')', 'pipeline_2', '=', '[', 'openssl_executable', ',', '"pkcs12"', ',', '"-in"', ',', '"{0}"', '.', 'format', '(', 'certificate_path', ')', ',', '"-passin"', 'if', 'password', 'is', 'not', 'None', 'else', '""', ',', '"pass:{0}"', '.', 'format', '(', 'password', ')', 'if', 'password', 'is', 'not', 'None', 'else', '""', ',', '"-out"', ',', '"{0}"', '.', 'format', '(', 'out_key_path', ')', ',', '"-nocerts"', ',', '"-nodes"', ',', ']', 'p', '=', 'subprocess', '.', 'Popen', '(', 'list', '(', 'filter', '(', 'None', ',', 'pipeline_2', ')', ')', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'p', '.', 'communicate', '(', ')', '# Return path tuples.', 'return', 'out_cert_path', ',', 'out_key_path'] | Splits a PKCS12 certificate into Base64-encoded DER certificate and key.
This method splits a potentially password-protected
`PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate
(format ``.p12`` or ``.pfx``) into one certificate and one key part, both in
`pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_
format.
:returns: Tuple of certificate and key string data.
:rtype: tuple | ['Splits', 'a', 'PKCS12', 'certificate', 'into', 'Base64', '-', 'encoded', 'DER', 'certificate', 'and', 'key', '.'] | train | https://github.com/hbldh/pybankid/blob/1405f66e41f912cdda15e20aea08cdfa6b60480a/bankid/certutils.py#L62-L152 |
7,703 | Shapeways/coyote_framework | coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py | WebElementWrapper.click | def click(self, force_click=False):
"""
Clicks the element
@type force_click: bool
@param force_click: force a click on the element using javascript, skipping webdriver
@rtype: WebElementWrapper
@return: Returns itself
"""
js_executor = self.driver_wrapper.js_executor
def click_element():
"""
Wrapper to call click
"""
return self.element.click()
def force_click_element():
"""
Javascript wrapper to force_click the element
"""
js_executor.execute_template('clickElementTemplate', {}, self.element)
return True
if force_click:
self.execute_and_handle_webelement_exceptions(force_click_element, 'click element by javascript')
else:
self.execute_and_handle_webelement_exceptions(click_element, 'click')
return self | python | def click(self, force_click=False):
"""
Clicks the element
@type force_click: bool
@param force_click: force a click on the element using javascript, skipping webdriver
@rtype: WebElementWrapper
@return: Returns itself
"""
js_executor = self.driver_wrapper.js_executor
def click_element():
"""
Wrapper to call click
"""
return self.element.click()
def force_click_element():
"""
Javascript wrapper to force_click the element
"""
js_executor.execute_template('clickElementTemplate', {}, self.element)
return True
if force_click:
self.execute_and_handle_webelement_exceptions(force_click_element, 'click element by javascript')
else:
self.execute_and_handle_webelement_exceptions(click_element, 'click')
return self | ['def', 'click', '(', 'self', ',', 'force_click', '=', 'False', ')', ':', 'js_executor', '=', 'self', '.', 'driver_wrapper', '.', 'js_executor', 'def', 'click_element', '(', ')', ':', '"""\n Wrapper to call click\n """', 'return', 'self', '.', 'element', '.', 'click', '(', ')', 'def', 'force_click_element', '(', ')', ':', '"""\n Javascript wrapper to force_click the element\n """', 'js_executor', '.', 'execute_template', '(', "'clickElementTemplate'", ',', '{', '}', ',', 'self', '.', 'element', ')', 'return', 'True', 'if', 'force_click', ':', 'self', '.', 'execute_and_handle_webelement_exceptions', '(', 'force_click_element', ',', "'click element by javascript'", ')', 'else', ':', 'self', '.', 'execute_and_handle_webelement_exceptions', '(', 'click_element', ',', "'click'", ')', 'return', 'self'] | Clicks the element
@type force_click: bool
@param force_click: force a click on the element using javascript, skipping webdriver
@rtype: WebElementWrapper
@return: Returns itself | ['Clicks', 'the', 'element'] | train | https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L127-L157 |
7,704 | YosaiProject/yosai | yosai/core/serialize/marshalling.py | default_unmarshaller | def default_unmarshaller(instance, state):
"""
Restore the state of an object.
If the ``__setstate__()`` method exists on the instance, it is called with the state object
as the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``.
:param instance: an uninitialized instance
:param state: the state object, as returned by :func:`default_marshaller`
"""
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
try:
instance.__dict__.update(state)
except AttributeError:
raise TypeError('{!r} has no __dict__ attribute and does not implement __setstate__()'
.format(instance.__class__.__name__)) | python | def default_unmarshaller(instance, state):
"""
Restore the state of an object.
If the ``__setstate__()`` method exists on the instance, it is called with the state object
as the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``.
:param instance: an uninitialized instance
:param state: the state object, as returned by :func:`default_marshaller`
"""
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
try:
instance.__dict__.update(state)
except AttributeError:
raise TypeError('{!r} has no __dict__ attribute and does not implement __setstate__()'
.format(instance.__class__.__name__)) | ['def', 'default_unmarshaller', '(', 'instance', ',', 'state', ')', ':', 'if', 'hasattr', '(', 'instance', ',', "'__setstate__'", ')', ':', 'instance', '.', '__setstate__', '(', 'state', ')', 'else', ':', 'try', ':', 'instance', '.', '__dict__', '.', 'update', '(', 'state', ')', 'except', 'AttributeError', ':', 'raise', 'TypeError', '(', "'{!r} has no __dict__ attribute and does not implement __setstate__()'", '.', 'format', '(', 'instance', '.', '__class__', '.', '__name__', ')', ')'] | Restore the state of an object.
If the ``__setstate__()`` method exists on the instance, it is called with the state object
as the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``.
:param instance: an uninitialized instance
:param state: the state object, as returned by :func:`default_marshaller` | ['Restore', 'the', 'state', 'of', 'an', 'object', '.'] | train | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/serialize/marshalling.py#L26-L44 |
7,705 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py | GroupSizer | def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize | python | def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize | ['def', 'GroupSizer', '(', 'field_number', ',', 'is_repeated', ',', 'is_packed', ')', ':', 'tag_size', '=', '_TagSize', '(', 'field_number', ')', '*', '2', 'assert', 'not', 'is_packed', 'if', 'is_repeated', ':', 'def', 'RepeatedFieldSize', '(', 'value', ')', ':', 'result', '=', 'tag_size', '*', 'len', '(', 'value', ')', 'for', 'element', 'in', 'value', ':', 'result', '+=', 'element', '.', 'ByteSize', '(', ')', 'return', 'result', 'return', 'RepeatedFieldSize', 'else', ':', 'def', 'FieldSize', '(', 'value', ')', ':', 'return', 'tag_size', '+', 'value', '.', 'ByteSize', '(', ')', 'return', 'FieldSize'] | Returns a sizer for a group field. | ['Returns', 'a', 'sizer', 'for', 'a', 'group', 'field', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L274-L289 |
7,706 | ternaris/marv | marv/cli.py | marvcli_undiscard | def marvcli_undiscard(datasets):
"""Undiscard DATASETS previously discarded."""
create_app()
setids = parse_setids(datasets, discarded=True)
dataset = Dataset.__table__
stmt = dataset.update()\
.where(dataset.c.setid.in_(setids))\
.values(discarded=False)
db.session.execute(stmt)
db.session.commit() | python | def marvcli_undiscard(datasets):
"""Undiscard DATASETS previously discarded."""
create_app()
setids = parse_setids(datasets, discarded=True)
dataset = Dataset.__table__
stmt = dataset.update()\
.where(dataset.c.setid.in_(setids))\
.values(discarded=False)
db.session.execute(stmt)
db.session.commit() | ['def', 'marvcli_undiscard', '(', 'datasets', ')', ':', 'create_app', '(', ')', 'setids', '=', 'parse_setids', '(', 'datasets', ',', 'discarded', '=', 'True', ')', 'dataset', '=', 'Dataset', '.', '__table__', 'stmt', '=', 'dataset', '.', 'update', '(', ')', '.', 'where', '(', 'dataset', '.', 'c', '.', 'setid', '.', 'in_', '(', 'setids', ')', ')', '.', 'values', '(', 'discarded', '=', 'False', ')', 'db', '.', 'session', '.', 'execute', '(', 'stmt', ')', 'db', '.', 'session', '.', 'commit', '(', ')'] | Undiscard DATASETS previously discarded. | ['Undiscard', 'DATASETS', 'previously', 'discarded', '.'] | train | https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/marv/cli.py#L234-L244 |
7,707 | cga-harvard/Hypermap-Registry | hypermap/search_api/utils.py | gap_to_sorl | def gap_to_sorl(time_gap):
"""
P1D to +1DAY
:param time_gap:
:return: solr's format duration.
"""
quantity, unit = parse_ISO8601(time_gap)
if unit[0] == "WEEKS":
return "+{0}DAYS".format(quantity * 7)
else:
return "+{0}{1}".format(quantity, unit[0]) | python | def gap_to_sorl(time_gap):
"""
P1D to +1DAY
:param time_gap:
:return: solr's format duration.
"""
quantity, unit = parse_ISO8601(time_gap)
if unit[0] == "WEEKS":
return "+{0}DAYS".format(quantity * 7)
else:
return "+{0}{1}".format(quantity, unit[0]) | ['def', 'gap_to_sorl', '(', 'time_gap', ')', ':', 'quantity', ',', 'unit', '=', 'parse_ISO8601', '(', 'time_gap', ')', 'if', 'unit', '[', '0', ']', '==', '"WEEKS"', ':', 'return', '"+{0}DAYS"', '.', 'format', '(', 'quantity', '*', '7', ')', 'else', ':', 'return', '"+{0}{1}"', '.', 'format', '(', 'quantity', ',', 'unit', '[', '0', ']', ')'] | P1D to +1DAY
:param time_gap:
:return: solr's format duration. | ['P1D', 'to', '+', '1DAY', ':', 'param', 'time_gap', ':', ':', 'return', ':', 'solr', 's', 'format', 'duration', '.'] | train | https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/utils.py#L185-L195 |
7,708 | DataONEorg/d1_python | lib_common/src/d1_common/type_conversions.py | str_to_etree | def str_to_etree(xml_str, encoding='utf-8'):
"""Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc.
"""
parser = xml.etree.ElementTree.XMLParser(encoding=encoding)
return xml.etree.ElementTree.fromstring(xml_str, parser=parser) | python | def str_to_etree(xml_str, encoding='utf-8'):
"""Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc.
"""
parser = xml.etree.ElementTree.XMLParser(encoding=encoding)
return xml.etree.ElementTree.fromstring(xml_str, parser=parser) | ['def', 'str_to_etree', '(', 'xml_str', ',', 'encoding', '=', "'utf-8'", ')', ':', 'parser', '=', 'xml', '.', 'etree', '.', 'ElementTree', '.', 'XMLParser', '(', 'encoding', '=', 'encoding', ')', 'return', 'xml', '.', 'etree', '.', 'ElementTree', '.', 'fromstring', '(', 'xml_str', ',', 'parser', '=', 'parser', ')'] | Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc. | ['Deserialize', 'API', 'XML', 'doc', 'to', 'an', 'ElementTree', '.'] | train | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/type_conversions.py#L506-L521 |
7,709 | klahnakoski/pyLibrary | mo_math/vendor/strangman/stats.py | zs | def zs(inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist, item))
return zscores | python | def zs(inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist, item))
return zscores | ['def', 'zs', '(', 'inlist', ')', ':', 'zscores', '=', '[', ']', 'for', 'item', 'in', 'inlist', ':', 'zscores', '.', 'append', '(', 'z', '(', 'inlist', ',', 'item', ')', ')', 'return', 'zscores'] | Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist) | ['Returns', 'a', 'list', 'of', 'z', '-', 'scores', 'one', 'for', 'each', 'score', 'in', 'the', 'passed', 'list', '.'] | train | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L690-L699 |
7,710 | kubernetes-client/python | kubernetes/client/apis/batch_v2alpha1_api.py | BatchV2alpha1Api.list_namespaced_cron_job | def list_namespaced_cron_job(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data | python | def list_namespaced_cron_job(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data | ['def', 'list_namespaced_cron_job', '(', 'self', ',', 'namespace', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'list_namespaced_cron_job_with_http_info', '(', 'namespace', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'list_namespaced_cron_job_with_http_info', '(', 'namespace', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread. | ['list', 'or', 'watch', 'objects', 'of', 'kind', 'CronJob', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'pass', 'async_req', '=', 'True', '>>>', 'thread', '=', 'api', '.', 'list_namespaced_cron_job', '(', 'namespace', 'async_req', '=', 'True', ')', '>>>', 'result', '=', 'thread', '.', 'get', '()'] | train | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/batch_v2alpha1_api.py#L617-L644 |
7,711 | saltstack/salt | salt/modules/boto_apigateway.py | describe_api_integration_response | def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
region=None, key=None, keyid=None, profile=None):
'''
Get an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode
'''
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_integration_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=statusCode)
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | python | def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
region=None, key=None, keyid=None, profile=None):
'''
Get an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode
'''
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_integration_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=statusCode)
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | ['def', 'describe_api_integration_response', '(', 'restApiId', ',', 'resourcePath', ',', 'httpMethod', ',', 'statusCode', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'try', ':', 'resource', '=', 'describe_api_resource', '(', 'restApiId', ',', 'resourcePath', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', '.', 'get', '(', "'resource'", ')', 'if', 'resource', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'response', '=', 'conn', '.', 'get_integration_response', '(', 'restApiId', '=', 'restApiId', ',', 'resourceId', '=', 'resource', '[', "'id'", ']', ',', 'httpMethod', '=', 'httpMethod', ',', 'statusCode', '=', 'statusCode', ')', 'return', '{', "'response'", ':', '_convert_datetime_str', '(', 'response', ')', '}', 'return', '{', "'error'", ':', "'no such resource'", '}', 'except', 'ClientError', 'as', 'e', ':', 'return', '{', "'error'", ':', '__utils__', '[', "'boto3.get_error'", ']', '(', 'e', ')', '}'] | Get an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode | ['Get', 'an', 'integration', 'response', 'for', 'a', 'given', 'method', 'in', 'a', 'given', 'API'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L1225-L1247 |
7,712 | hazelcast/hazelcast-python-client | hazelcast/proxy/transactional_list.py | TransactionalList.add | def add(self, item):
"""
Transactional implementation of :func:`List.add(item) <hazelcast.proxy.list.List.add>`
:param item: (object), the new item to be added.
:return: (bool), ``true`` if the item is added successfully, ``false`` otherwise.
"""
check_not_none(item, "item can't be none")
return self._encode_invoke(transactional_list_add_codec, item=self._to_data(item)) | python | def add(self, item):
"""
Transactional implementation of :func:`List.add(item) <hazelcast.proxy.list.List.add>`
:param item: (object), the new item to be added.
:return: (bool), ``true`` if the item is added successfully, ``false`` otherwise.
"""
check_not_none(item, "item can't be none")
return self._encode_invoke(transactional_list_add_codec, item=self._to_data(item)) | ['def', 'add', '(', 'self', ',', 'item', ')', ':', 'check_not_none', '(', 'item', ',', '"item can\'t be none"', ')', 'return', 'self', '.', '_encode_invoke', '(', 'transactional_list_add_codec', ',', 'item', '=', 'self', '.', '_to_data', '(', 'item', ')', ')'] | Transactional implementation of :func:`List.add(item) <hazelcast.proxy.list.List.add>`
:param item: (object), the new item to be added.
:return: (bool), ``true`` if the item is added successfully, ``false`` otherwise. | ['Transactional', 'implementation', 'of', ':', 'func', ':', 'List', '.', 'add', '(', 'item', ')', '<hazelcast', '.', 'proxy', '.', 'list', '.', 'List', '.', 'add', '>'] | train | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/transactional_list.py#L11-L19 |
7,713 | thomasvandoren/bugzscout-py | bugzscout/ext/cli.py | _from_args | def _from_args(args):
"""Factory method to create a new instance from command line args.
:param args: instance of :class:`argparse.Namespace`
"""
return bugzscout.BugzScout(args.url, args.user, args.project, args.area) | python | def _from_args(args):
"""Factory method to create a new instance from command line args.
:param args: instance of :class:`argparse.Namespace`
"""
return bugzscout.BugzScout(args.url, args.user, args.project, args.area) | ['def', '_from_args', '(', 'args', ')', ':', 'return', 'bugzscout', '.', 'BugzScout', '(', 'args', '.', 'url', ',', 'args', '.', 'user', ',', 'args', '.', 'project', ',', 'args', '.', 'area', ')'] | Factory method to create a new instance from command line args.
:param args: instance of :class:`argparse.Namespace` | ['Factory', 'method', 'to', 'create', 'a', 'new', 'instance', 'from', 'command', 'line', 'args', '.'] | train | https://github.com/thomasvandoren/bugzscout-py/blob/514528e958a97e0e7b36870037c5c69661511824/bugzscout/ext/cli.py#L42-L47 |
7,714 | bovee/Aston | aston/trace/math_traces.py | movingaverage | def movingaverage(arr, window):
"""
Calculates the moving average ("rolling mean") of an array
of a certain window size.
"""
m = np.ones(int(window)) / int(window)
return scipy.ndimage.convolve1d(arr, m, axis=0, mode='reflect') | python | def movingaverage(arr, window):
"""
Calculates the moving average ("rolling mean") of an array
of a certain window size.
"""
m = np.ones(int(window)) / int(window)
return scipy.ndimage.convolve1d(arr, m, axis=0, mode='reflect') | ['def', 'movingaverage', '(', 'arr', ',', 'window', ')', ':', 'm', '=', 'np', '.', 'ones', '(', 'int', '(', 'window', ')', ')', '/', 'int', '(', 'window', ')', 'return', 'scipy', '.', 'ndimage', '.', 'convolve1d', '(', 'arr', ',', 'm', ',', 'axis', '=', '0', ',', 'mode', '=', "'reflect'", ')'] | Calculates the moving average ("rolling mean") of an array
of a certain window size. | ['Calculates', 'the', 'moving', 'average', '(', 'rolling', 'mean', ')', 'of', 'an', 'array', 'of', 'a', 'certain', 'window', 'size', '.'] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L73-L79 |
7,715 | casacore/python-casacore | casacore/measures/__init__.py | measures.riseset | def riseset(self, crd, ev="5deg"):
"""This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs.
"""
a = self.rise(crd, ev)
if isinstance(a['rise'], str):
return {"rise": {"last": a[0], "utc": a[0]},
"set": {"last": a[1], "utc": a[1]},
"solved": False}
ofe = self.measure(self._framestack["epoch"], "utc")
if not is_measure(ofe):
ofe = self.epoch('utc', 'today')
x = a.copy()
for k in x:
x[k] = self.measure(
self.epoch("last",
a[k].totime(),
off=self.epoch("r_utc",
(dq.quantity(ofe["m0"])
+ dq.quantity("0.5d")
))
),
"utc")
return {"rise": {"last": self.epoch("last",
a["rise"].totime()),
"utc": x["rise"]},
"set": {"last": self.epoch("last",
a["set"].totime()),
"utc": x["set"]},
"solved": True
} | python | def riseset(self, crd, ev="5deg"):
"""This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs.
"""
a = self.rise(crd, ev)
if isinstance(a['rise'], str):
return {"rise": {"last": a[0], "utc": a[0]},
"set": {"last": a[1], "utc": a[1]},
"solved": False}
ofe = self.measure(self._framestack["epoch"], "utc")
if not is_measure(ofe):
ofe = self.epoch('utc', 'today')
x = a.copy()
for k in x:
x[k] = self.measure(
self.epoch("last",
a[k].totime(),
off=self.epoch("r_utc",
(dq.quantity(ofe["m0"])
+ dq.quantity("0.5d")
))
),
"utc")
return {"rise": {"last": self.epoch("last",
a["rise"].totime()),
"utc": x["rise"]},
"set": {"last": self.epoch("last",
a["set"].totime()),
"utc": x["set"]},
"solved": True
} | ['def', 'riseset', '(', 'self', ',', 'crd', ',', 'ev', '=', '"5deg"', ')', ':', 'a', '=', 'self', '.', 'rise', '(', 'crd', ',', 'ev', ')', 'if', 'isinstance', '(', 'a', '[', "'rise'", ']', ',', 'str', ')', ':', 'return', '{', '"rise"', ':', '{', '"last"', ':', 'a', '[', '0', ']', ',', '"utc"', ':', 'a', '[', '0', ']', '}', ',', '"set"', ':', '{', '"last"', ':', 'a', '[', '1', ']', ',', '"utc"', ':', 'a', '[', '1', ']', '}', ',', '"solved"', ':', 'False', '}', 'ofe', '=', 'self', '.', 'measure', '(', 'self', '.', '_framestack', '[', '"epoch"', ']', ',', '"utc"', ')', 'if', 'not', 'is_measure', '(', 'ofe', ')', ':', 'ofe', '=', 'self', '.', 'epoch', '(', "'utc'", ',', "'today'", ')', 'x', '=', 'a', '.', 'copy', '(', ')', 'for', 'k', 'in', 'x', ':', 'x', '[', 'k', ']', '=', 'self', '.', 'measure', '(', 'self', '.', 'epoch', '(', '"last"', ',', 'a', '[', 'k', ']', '.', 'totime', '(', ')', ',', 'off', '=', 'self', '.', 'epoch', '(', '"r_utc"', ',', '(', 'dq', '.', 'quantity', '(', 'ofe', '[', '"m0"', ']', ')', '+', 'dq', '.', 'quantity', '(', '"0.5d"', ')', ')', ')', ')', ',', '"utc"', ')', 'return', '{', '"rise"', ':', '{', '"last"', ':', 'self', '.', 'epoch', '(', '"last"', ',', 'a', '[', '"rise"', ']', '.', 'totime', '(', ')', ')', ',', '"utc"', ':', 'x', '[', '"rise"', ']', '}', ',', '"set"', ':', '{', '"last"', ':', 'self', '.', 'epoch', '(', '"last"', ',', 'a', '[', '"set"', ']', '.', 'totime', '(', ')', ')', ',', '"utc"', ':', 'x', '[', '"set"', ']', '}', ',', '"solved"', ':', 'True', '}'] | This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs. | ['This', 'will', 'give', 'the', 'rise', '/', 'set', 'times', 'of', 'a', 'source', '.', 'It', 'needs', 'the', 'position', 'in', 'the', 'frame', 'and', 'a', 'time', '.', 'If', 'the', 'latter', 'is', 'not', 'set', 'the', 'current', 'time', 'will', 'be', 'used', '.'] | train | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L807-L850 |
7,716 | tanghaibao/jcvi | jcvi/apps/ks.py | prepare | def prepare(args):
"""
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
"""
from jcvi.formats.fasta import Fasta
p = OptionParser(prepare.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
outfile = opts.outfile
if len(args) == 2:
pairsfile, cdsfile = args
pepfile = None
elif len(args) == 3:
pairsfile, cdsfile, pepfile = args
else:
sys.exit(not p.print_help())
f = Fasta(cdsfile)
fp = open(pairsfile)
fw = must_open(outfile, "w")
if pepfile:
assert outfile != "stdout", "Please specify outfile name."
f2 = Fasta(pepfile)
fw2 = must_open(outfile + ".pep", "w")
for row in fp:
if row[0] == '#':
continue
a, b = row.split()[:2]
if a == b:
logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b))
continue
if a not in f:
a = find_first_isoform(a, f)
assert a, a
if b not in f:
b = find_first_isoform(b, f)
assert b, b
acds = f[a]
bcds = f[b]
SeqIO.write((acds, bcds), fw, "fasta")
if pepfile:
apep = f2[a]
bpep = f2[b]
SeqIO.write((apep, bpep), fw2, "fasta")
fw.close()
if pepfile:
fw2.close() | python | def prepare(args):
"""
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
"""
from jcvi.formats.fasta import Fasta
p = OptionParser(prepare.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
outfile = opts.outfile
if len(args) == 2:
pairsfile, cdsfile = args
pepfile = None
elif len(args) == 3:
pairsfile, cdsfile, pepfile = args
else:
sys.exit(not p.print_help())
f = Fasta(cdsfile)
fp = open(pairsfile)
fw = must_open(outfile, "w")
if pepfile:
assert outfile != "stdout", "Please specify outfile name."
f2 = Fasta(pepfile)
fw2 = must_open(outfile + ".pep", "w")
for row in fp:
if row[0] == '#':
continue
a, b = row.split()[:2]
if a == b:
logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b))
continue
if a not in f:
a = find_first_isoform(a, f)
assert a, a
if b not in f:
b = find_first_isoform(b, f)
assert b, b
acds = f[a]
bcds = f[b]
SeqIO.write((acds, bcds), fw, "fasta")
if pepfile:
apep = f2[a]
bpep = f2[b]
SeqIO.write((apep, bpep), fw2, "fasta")
fw.close()
if pepfile:
fw2.close() | ['def', 'prepare', '(', 'args', ')', ':', 'from', 'jcvi', '.', 'formats', '.', 'fasta', 'import', 'Fasta', 'p', '=', 'OptionParser', '(', 'prepare', '.', '__doc__', ')', 'p', '.', 'set_outfile', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'outfile', '=', 'opts', '.', 'outfile', 'if', 'len', '(', 'args', ')', '==', '2', ':', 'pairsfile', ',', 'cdsfile', '=', 'args', 'pepfile', '=', 'None', 'elif', 'len', '(', 'args', ')', '==', '3', ':', 'pairsfile', ',', 'cdsfile', ',', 'pepfile', '=', 'args', 'else', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'f', '=', 'Fasta', '(', 'cdsfile', ')', 'fp', '=', 'open', '(', 'pairsfile', ')', 'fw', '=', 'must_open', '(', 'outfile', ',', '"w"', ')', 'if', 'pepfile', ':', 'assert', 'outfile', '!=', '"stdout"', ',', '"Please specify outfile name."', 'f2', '=', 'Fasta', '(', 'pepfile', ')', 'fw2', '=', 'must_open', '(', 'outfile', '+', '".pep"', ',', '"w"', ')', 'for', 'row', 'in', 'fp', ':', 'if', 'row', '[', '0', ']', '==', "'#'", ':', 'continue', 'a', ',', 'b', '=', 'row', '.', 'split', '(', ')', '[', ':', '2', ']', 'if', 'a', '==', 'b', ':', 'logging', '.', 'debug', '(', '"Self pairs found: {0} - {1}. Ignored"', '.', 'format', '(', 'a', ',', 'b', ')', ')', 'continue', 'if', 'a', 'not', 'in', 'f', ':', 'a', '=', 'find_first_isoform', '(', 'a', ',', 'f', ')', 'assert', 'a', ',', 'a', 'if', 'b', 'not', 'in', 'f', ':', 'b', '=', 'find_first_isoform', '(', 'b', ',', 'f', ')', 'assert', 'b', ',', 'b', 'acds', '=', 'f', '[', 'a', ']', 'bcds', '=', 'f', '[', 'b', ']', 'SeqIO', '.', 'write', '(', '(', 'acds', ',', 'bcds', ')', ',', 'fw', ',', '"fasta"', ')', 'if', 'pepfile', ':', 'apep', '=', 'f2', '[', 'a', ']', 'bpep', '=', 'f2', '[', 'b', ']', 'SeqIO', '.', 'write', '(', '(', 'apep', ',', 'bpep', ')', ',', 'fw2', ',', '"fasta"', ')', 'fw', '.', 'close', '(', ')', 'if', 'pepfile', ':', 'fw2', '.', 'close', '(', ')'] | %prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair. | ['%prog', 'prepare', 'pairsfile', 'cdsfile', '[', 'pepfile', ']', '-', 'o', 'paired', '.', 'cds', '.', 'fasta'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/ks.py#L412-L467 |
7,717 | openego/ding0 | ding0/core/network/__init__.py | GridDing0.find_path | def find_path(self, node_source, node_target, type='nodes'):
"""Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated.
"""
if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()):
path = nx.shortest_path(self._graph, node_source, node_target)
else:
raise Exception('At least one of the nodes is not a member of graph.')
if type == 'nodes':
return path
elif type == 'edges':
return [_ for _ in self._graph.edges(nbunch=path, data=True)
if (_[0] in path and _[1] in path)]
else:
raise ValueError('Please specify type as nodes or edges') | python | def find_path(self, node_source, node_target, type='nodes'):
"""Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated.
"""
if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()):
path = nx.shortest_path(self._graph, node_source, node_target)
else:
raise Exception('At least one of the nodes is not a member of graph.')
if type == 'nodes':
return path
elif type == 'edges':
return [_ for _ in self._graph.edges(nbunch=path, data=True)
if (_[0] in path and _[1] in path)]
else:
raise ValueError('Please specify type as nodes or edges') | ['def', 'find_path', '(', 'self', ',', 'node_source', ',', 'node_target', ',', 'type', '=', "'nodes'", ')', ':', 'if', '(', 'node_source', 'in', 'self', '.', '_graph', '.', 'nodes', '(', ')', ')', 'and', '(', 'node_target', 'in', 'self', '.', '_graph', '.', 'nodes', '(', ')', ')', ':', 'path', '=', 'nx', '.', 'shortest_path', '(', 'self', '.', '_graph', ',', 'node_source', ',', 'node_target', ')', 'else', ':', 'raise', 'Exception', '(', "'At least one of the nodes is not a member of graph.'", ')', 'if', 'type', '==', "'nodes'", ':', 'return', 'path', 'elif', 'type', '==', "'edges'", ':', 'return', '[', '_', 'for', '_', 'in', 'self', '.', '_graph', '.', 'edges', '(', 'nbunch', '=', 'path', ',', 'data', '=', 'True', ')', 'if', '(', '_', '[', '0', ']', 'in', 'path', 'and', '_', '[', '1', ']', 'in', 'path', ')', ']', 'else', ':', 'raise', 'ValueError', '(', "'Please specify type as nodes or edges'", ')'] | Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated. | ['Determines', 'shortest', 'path'] | train | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/__init__.py#L319-L359 |
7,718 | bslatkin/dpxdt | dpxdt/server/auth.py | manage_admins | def manage_admins():
"""Page for viewing and managing build admins."""
build = g.build
# Do not show cached data
db.session.add(build)
db.session.refresh(build)
add_form = forms.AddAdminForm()
if add_form.validate_on_submit():
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, add_form.email_address.data)
invitation_user = models.User.query.get(invitation_user_id)
if not invitation_user:
invitation_user = models.User(
id=invitation_user_id,
email_address=add_form.email_address.data)
db.session.add(invitation_user)
db.session.add(build)
db.session.add(invitation_user)
db.session.refresh(build, lockmode='update')
build.owners.append(invitation_user)
save_admin_log(build, invited_new_admin=True,
message=invitation_user.email_address)
db.session.commit()
logging.info('Added user=%r as owner to build_id=%r',
invitation_user.id, build.id)
return redirect(url_for('manage_admins', build_id=build.id))
add_form.build_id.data = build.id
revoke_form_list = []
for user in build.owners:
form = forms.RemoveAdminForm()
form.user_id.data = user.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((user, form))
return render_template(
'view_admins.html',
build=build,
add_form=add_form,
revoke_form_list=revoke_form_list) | python | def manage_admins():
"""Page for viewing and managing build admins."""
build = g.build
# Do not show cached data
db.session.add(build)
db.session.refresh(build)
add_form = forms.AddAdminForm()
if add_form.validate_on_submit():
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, add_form.email_address.data)
invitation_user = models.User.query.get(invitation_user_id)
if not invitation_user:
invitation_user = models.User(
id=invitation_user_id,
email_address=add_form.email_address.data)
db.session.add(invitation_user)
db.session.add(build)
db.session.add(invitation_user)
db.session.refresh(build, lockmode='update')
build.owners.append(invitation_user)
save_admin_log(build, invited_new_admin=True,
message=invitation_user.email_address)
db.session.commit()
logging.info('Added user=%r as owner to build_id=%r',
invitation_user.id, build.id)
return redirect(url_for('manage_admins', build_id=build.id))
add_form.build_id.data = build.id
revoke_form_list = []
for user in build.owners:
form = forms.RemoveAdminForm()
form.user_id.data = user.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((user, form))
return render_template(
'view_admins.html',
build=build,
add_form=add_form,
revoke_form_list=revoke_form_list) | ['def', 'manage_admins', '(', ')', ':', 'build', '=', 'g', '.', 'build', '# Do not show cached data', 'db', '.', 'session', '.', 'add', '(', 'build', ')', 'db', '.', 'session', '.', 'refresh', '(', 'build', ')', 'add_form', '=', 'forms', '.', 'AddAdminForm', '(', ')', 'if', 'add_form', '.', 'validate_on_submit', '(', ')', ':', 'invitation_user_id', '=', "'%s:%s'", '%', '(', 'models', '.', 'User', '.', 'EMAIL_INVITATION', ',', 'add_form', '.', 'email_address', '.', 'data', ')', 'invitation_user', '=', 'models', '.', 'User', '.', 'query', '.', 'get', '(', 'invitation_user_id', ')', 'if', 'not', 'invitation_user', ':', 'invitation_user', '=', 'models', '.', 'User', '(', 'id', '=', 'invitation_user_id', ',', 'email_address', '=', 'add_form', '.', 'email_address', '.', 'data', ')', 'db', '.', 'session', '.', 'add', '(', 'invitation_user', ')', 'db', '.', 'session', '.', 'add', '(', 'build', ')', 'db', '.', 'session', '.', 'add', '(', 'invitation_user', ')', 'db', '.', 'session', '.', 'refresh', '(', 'build', ',', 'lockmode', '=', "'update'", ')', 'build', '.', 'owners', '.', 'append', '(', 'invitation_user', ')', 'save_admin_log', '(', 'build', ',', 'invited_new_admin', '=', 'True', ',', 'message', '=', 'invitation_user', '.', 'email_address', ')', 'db', '.', 'session', '.', 'commit', '(', ')', 'logging', '.', 'info', '(', "'Added user=%r as owner to build_id=%r'", ',', 'invitation_user', '.', 'id', ',', 'build', '.', 'id', ')', 'return', 'redirect', '(', 'url_for', '(', "'manage_admins'", ',', 'build_id', '=', 'build', '.', 'id', ')', ')', 'add_form', '.', 'build_id', '.', 'data', '=', 'build', '.', 'id', 'revoke_form_list', '=', '[', ']', 'for', 'user', 'in', 'build', '.', 'owners', ':', 'form', '=', 'forms', '.', 'RemoveAdminForm', '(', ')', 'form', '.', 'user_id', '.', 'data', '=', 'user', '.', 'id', 'form', '.', 'build_id', '.', 'data', '=', 'build', '.', 'id', 'form', '.', 'revoke', '.', 'data', '=', 'True', 'revoke_form_list', '.', 'append', '(', '(', 'user', ',', 'form', ')', ')', 'return', 'render_template', '(', "'view_admins.html'", ',', 'build', '=', 'build', ',', 'add_form', '=', 'add_form', ',', 'revoke_form_list', '=', 'revoke_form_list', ')'] | Page for viewing and managing build admins. | ['Page', 'for', 'viewing', 'and', 'managing', 'build', 'admins', '.'] | train | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L469-L518 |
7,719 | zebpalmer/WeatherAlerts | weatheralerts/geo.py | SameCodes._load_same_codes | def _load_same_codes(self, refresh=False):
"""Loads the Same Codes into this object"""
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes() | python | def _load_same_codes(self, refresh=False):
"""Loads the Same Codes into this object"""
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes() | ['def', '_load_same_codes', '(', 'self', ',', 'refresh', '=', 'False', ')', ':', 'if', 'refresh', 'is', 'True', ':', 'self', '.', '_get_same_codes', '(', ')', 'else', ':', 'self', '.', '_cached_same_codes', '(', ')'] | Loads the Same Codes into this object | ['Loads', 'the', 'Same', 'Codes', 'into', 'this', 'object'] | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L106-L111 |
7,720 | vtkiorg/vtki | vtki/renderer.py | Renderer.remove_actor | def remove_actor(self, actor, reset_camera=False):
"""
Removes an actor from the Renderer.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
name = None
if isinstance(actor, str):
name = actor
keys = list(self._actors.keys())
names = []
for k in keys:
if k.startswith('{}-'.format(name)):
names.append(k)
if len(names) > 0:
self.remove_actor(names, reset_camera=reset_camera)
try:
actor = self._actors[name]
except KeyError:
# If actor of that name is not present then return success
return False
if isinstance(actor, collections.Iterable):
success = False
for a in actor:
rv = self.remove_actor(a, reset_camera=reset_camera)
if rv or success:
success = True
return success
if actor is None:
return False
# First remove this actor's mapper from _scalar_bar_mappers
_remove_mapper_from_plotter(self.parent, actor, False)
self.RemoveActor(actor)
if name is None:
for k, v in self._actors.items():
if v == actor:
name = k
self._actors.pop(name, None)
self.update_bounds_axes()
if reset_camera:
self.reset_camera()
elif not self.camera_set and reset_camera is None:
self.reset_camera()
else:
self.parent._render()
return True | python | def remove_actor(self, actor, reset_camera=False):
"""
Removes an actor from the Renderer.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
name = None
if isinstance(actor, str):
name = actor
keys = list(self._actors.keys())
names = []
for k in keys:
if k.startswith('{}-'.format(name)):
names.append(k)
if len(names) > 0:
self.remove_actor(names, reset_camera=reset_camera)
try:
actor = self._actors[name]
except KeyError:
# If actor of that name is not present then return success
return False
if isinstance(actor, collections.Iterable):
success = False
for a in actor:
rv = self.remove_actor(a, reset_camera=reset_camera)
if rv or success:
success = True
return success
if actor is None:
return False
# First remove this actor's mapper from _scalar_bar_mappers
_remove_mapper_from_plotter(self.parent, actor, False)
self.RemoveActor(actor)
if name is None:
for k, v in self._actors.items():
if v == actor:
name = k
self._actors.pop(name, None)
self.update_bounds_axes()
if reset_camera:
self.reset_camera()
elif not self.camera_set and reset_camera is None:
self.reset_camera()
else:
self.parent._render()
return True | ['def', 'remove_actor', '(', 'self', ',', 'actor', ',', 'reset_camera', '=', 'False', ')', ':', 'name', '=', 'None', 'if', 'isinstance', '(', 'actor', ',', 'str', ')', ':', 'name', '=', 'actor', 'keys', '=', 'list', '(', 'self', '.', '_actors', '.', 'keys', '(', ')', ')', 'names', '=', '[', ']', 'for', 'k', 'in', 'keys', ':', 'if', 'k', '.', 'startswith', '(', "'{}-'", '.', 'format', '(', 'name', ')', ')', ':', 'names', '.', 'append', '(', 'k', ')', 'if', 'len', '(', 'names', ')', '>', '0', ':', 'self', '.', 'remove_actor', '(', 'names', ',', 'reset_camera', '=', 'reset_camera', ')', 'try', ':', 'actor', '=', 'self', '.', '_actors', '[', 'name', ']', 'except', 'KeyError', ':', '# If actor of that name is not present then return success', 'return', 'False', 'if', 'isinstance', '(', 'actor', ',', 'collections', '.', 'Iterable', ')', ':', 'success', '=', 'False', 'for', 'a', 'in', 'actor', ':', 'rv', '=', 'self', '.', 'remove_actor', '(', 'a', ',', 'reset_camera', '=', 'reset_camera', ')', 'if', 'rv', 'or', 'success', ':', 'success', '=', 'True', 'return', 'success', 'if', 'actor', 'is', 'None', ':', 'return', 'False', "# First remove this actor's mapper from _scalar_bar_mappers", '_remove_mapper_from_plotter', '(', 'self', '.', 'parent', ',', 'actor', ',', 'False', ')', 'self', '.', 'RemoveActor', '(', 'actor', ')', 'if', 'name', 'is', 'None', ':', 'for', 'k', ',', 'v', 'in', 'self', '.', '_actors', '.', 'items', '(', ')', ':', 'if', 'v', '==', 'actor', ':', 'name', '=', 'k', 'self', '.', '_actors', '.', 'pop', '(', 'name', ',', 'None', ')', 'self', '.', 'update_bounds_axes', '(', ')', 'if', 'reset_camera', ':', 'self', '.', 'reset_camera', '(', ')', 'elif', 'not', 'self', '.', 'camera_set', 'and', 'reset_camera', 'is', 'None', ':', 'self', '.', 'reset_camera', '(', ')', 'else', ':', 'self', '.', 'parent', '.', '_render', '(', ')', 'return', 'True'] | Removes an actor from the Renderer.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed. | ['Removes', 'an', 'actor', 'from', 'the', 'Renderer', '.'] | train | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L544-L603 |
7,721 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/scrappers/zonerpress_cz/zonerpress_api.py | has_neigh | def has_neigh(tag_name, params=None, content=None, left=True):
"""
This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement.
"""
def has_neigh_closure(element):
if not element.parent \
or not (element.isTag() and not element.isEndTag()):
return False
# filter only visible tags/neighbours
childs = element.parent.childs
childs = filter(
lambda x: (x.isTag() and not x.isEndTag()) \
or x.getContent().strip() or x is element,
childs
)
if len(childs) <= 1:
return False
ioe = childs.index(element)
if left and ioe > 0:
return is_equal_tag(childs[ioe - 1], tag_name, params, content)
if not left and ioe + 1 < len(childs):
return is_equal_tag(childs[ioe + 1], tag_name, params, content)
return False
return has_neigh_closure | python | def has_neigh(tag_name, params=None, content=None, left=True):
"""
This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement.
"""
def has_neigh_closure(element):
if not element.parent \
or not (element.isTag() and not element.isEndTag()):
return False
# filter only visible tags/neighbours
childs = element.parent.childs
childs = filter(
lambda x: (x.isTag() and not x.isEndTag()) \
or x.getContent().strip() or x is element,
childs
)
if len(childs) <= 1:
return False
ioe = childs.index(element)
if left and ioe > 0:
return is_equal_tag(childs[ioe - 1], tag_name, params, content)
if not left and ioe + 1 < len(childs):
return is_equal_tag(childs[ioe + 1], tag_name, params, content)
return False
return has_neigh_closure | ['def', 'has_neigh', '(', 'tag_name', ',', 'params', '=', 'None', ',', 'content', '=', 'None', ',', 'left', '=', 'True', ')', ':', 'def', 'has_neigh_closure', '(', 'element', ')', ':', 'if', 'not', 'element', '.', 'parent', 'or', 'not', '(', 'element', '.', 'isTag', '(', ')', 'and', 'not', 'element', '.', 'isEndTag', '(', ')', ')', ':', 'return', 'False', '# filter only visible tags/neighbours', 'childs', '=', 'element', '.', 'parent', '.', 'childs', 'childs', '=', 'filter', '(', 'lambda', 'x', ':', '(', 'x', '.', 'isTag', '(', ')', 'and', 'not', 'x', '.', 'isEndTag', '(', ')', ')', 'or', 'x', '.', 'getContent', '(', ')', '.', 'strip', '(', ')', 'or', 'x', 'is', 'element', ',', 'childs', ')', 'if', 'len', '(', 'childs', ')', '<=', '1', ':', 'return', 'False', 'ioe', '=', 'childs', '.', 'index', '(', 'element', ')', 'if', 'left', 'and', 'ioe', '>', '0', ':', 'return', 'is_equal_tag', '(', 'childs', '[', 'ioe', '-', '1', ']', ',', 'tag_name', ',', 'params', ',', 'content', ')', 'if', 'not', 'left', 'and', 'ioe', '+', '1', '<', 'len', '(', 'childs', ')', ':', 'return', 'is_equal_tag', '(', 'childs', '[', 'ioe', '+', '1', ']', ',', 'tag_name', ',', 'params', ',', 'content', ')', 'return', 'False', 'return', 'has_neigh_closure'] | This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement. | ['This', 'function', 'generates', 'functions', 'which', 'matches', 'all', 'tags', 'with', 'neighbours', 'defined', 'by', 'parameters', '.'] | train | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/zonerpress_cz/zonerpress_api.py#L115-L158 |
7,722 | jxtech/wechatpy | wechatpy/pay/api/withhold.py | WeChatWithhold.query_order | def query_order(self, transaction_id=None, out_trade_no=None):
"""
查询订单 api
:param transaction_id: 二选一 微信订单号 微信的订单号,优先使用
:param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。
:return: 返回的结果信息
"""
if not transaction_id and not out_trade_no:
raise ValueError("transaction_id and out_trade_no must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"transaction_id": transaction_id,
"out_trade_no": out_trade_no,
}
return self._post("pay/paporderquery", data=data) | python | def query_order(self, transaction_id=None, out_trade_no=None):
"""
查询订单 api
:param transaction_id: 二选一 微信订单号 微信的订单号,优先使用
:param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。
:return: 返回的结果信息
"""
if not transaction_id and not out_trade_no:
raise ValueError("transaction_id and out_trade_no must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"transaction_id": transaction_id,
"out_trade_no": out_trade_no,
}
return self._post("pay/paporderquery", data=data) | ['def', 'query_order', '(', 'self', ',', 'transaction_id', '=', 'None', ',', 'out_trade_no', '=', 'None', ')', ':', 'if', 'not', 'transaction_id', 'and', 'not', 'out_trade_no', ':', 'raise', 'ValueError', '(', '"transaction_id and out_trade_no must be a choice."', ')', 'data', '=', '{', '"appid"', ':', 'self', '.', 'appid', ',', '"mch_id"', ':', 'self', '.', 'mch_id', ',', '"transaction_id"', ':', 'transaction_id', ',', '"out_trade_no"', ':', 'out_trade_no', ',', '}', 'return', 'self', '.', '_post', '(', '"pay/paporderquery"', ',', 'data', '=', 'data', ')'] | 查询订单 api
:param transaction_id: 二选一 微信订单号 微信的订单号,优先使用
:param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。
:return: 返回的结果信息 | ['查询订单', 'api'] | train | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/withhold.py#L158-L174 |
7,723 | aiogram/aiogram | aiogram/utils/executor.py | Executor.set_webhook | def set_webhook(self, webhook_path: Optional[str] = None, request_handler: Any = WebhookRequestHandler,
route_name: str = DEFAULT_ROUTE_NAME, web_app: Optional[Application] = None):
"""
Set webhook for bot
:param webhook_path: Optional[str] (default: None)
:param request_handler: Any (default: WebhookRequestHandler)
:param route_name: str Name of webhook handler route (default: 'webhook_handler')
:param web_app: Optional[Application] (default: None)
:return:
"""
self._prepare_webhook(webhook_path, request_handler, route_name, web_app)
self.loop.run_until_complete(self._startup_webhook()) | python | def set_webhook(self, webhook_path: Optional[str] = None, request_handler: Any = WebhookRequestHandler,
route_name: str = DEFAULT_ROUTE_NAME, web_app: Optional[Application] = None):
"""
Set webhook for bot
:param webhook_path: Optional[str] (default: None)
:param request_handler: Any (default: WebhookRequestHandler)
:param route_name: str Name of webhook handler route (default: 'webhook_handler')
:param web_app: Optional[Application] (default: None)
:return:
"""
self._prepare_webhook(webhook_path, request_handler, route_name, web_app)
self.loop.run_until_complete(self._startup_webhook()) | ['def', 'set_webhook', '(', 'self', ',', 'webhook_path', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'request_handler', ':', 'Any', '=', 'WebhookRequestHandler', ',', 'route_name', ':', 'str', '=', 'DEFAULT_ROUTE_NAME', ',', 'web_app', ':', 'Optional', '[', 'Application', ']', '=', 'None', ')', ':', 'self', '.', '_prepare_webhook', '(', 'webhook_path', ',', 'request_handler', ',', 'route_name', ',', 'web_app', ')', 'self', '.', 'loop', '.', 'run_until_complete', '(', 'self', '.', '_startup_webhook', '(', ')', ')'] | Set webhook for bot
:param webhook_path: Optional[str] (default: None)
:param request_handler: Any (default: WebhookRequestHandler)
:param route_name: str Name of webhook handler route (default: 'webhook_handler')
:param web_app: Optional[Application] (default: None)
:return: | ['Set', 'webhook', 'for', 'bot'] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/executor.py#L263-L275 |
7,724 | xflr6/gsheets | gsheets/models.py | SpreadSheet.findall | def findall(self, title=None):
"""Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
"""
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title]) | python | def findall(self, title=None):
"""Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
"""
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title]) | ['def', 'findall', '(', 'self', ',', 'title', '=', 'None', ')', ':', 'if', 'title', 'is', 'None', ':', 'return', 'list', '(', 'self', '.', '_sheets', ')', 'if', 'title', 'not', 'in', 'self', '.', '_titles', ':', 'return', '[', ']', 'return', 'list', '(', 'self', '.', '_titles', '[', 'title', ']', ')'] | Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty) | ['Return', 'a', 'list', 'of', 'worksheets', 'with', 'the', 'given', 'title', '.'] | train | https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/models.py#L122-L134 |
7,725 | lwcook/horsetail-matching | horsetailmatching/surrogates.py | PolySurrogate.predict | def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y | python | def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y | ['def', 'predict', '(', 'self', ',', 'u', ')', ':', 'y', ',', 'ysub', '=', '0', ',', 'np', '.', 'zeros', '(', 'self', '.', 'N_poly', ')', 'for', 'ip', 'in', 'range', '(', 'self', '.', 'N_poly', ')', ':', 'inds', '=', 'tuple', '(', 'self', '.', 'index_polys', '[', 'ip', ']', ')', 'ysub', '[', 'ip', ']', '=', 'self', '.', 'coeffs', '[', 'inds', ']', '*', 'eval_poly', '(', 'u', ',', 'inds', ',', 'self', '.', 'J_list', ')', 'y', '+=', 'ysub', '[', 'ip', ']', 'self', '.', 'response_components', '=', 'ysub', 'return', 'y'] | Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1]) | ['Predicts', 'the', 'output', 'value', 'at', 'u', 'from', 'the', 'fitted', 'polynomial', 'expansion', '.', 'Therefore', 'the', 'method', 'train', '()', 'must', 'be', 'called', 'first', '.'] | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L72-L98 |
7,726 | DomainTools/python_api | domaintools/api.py | delimited | def delimited(items, character='|'):
"""Returns a character delimited version of the provided list as a Python string"""
return '|'.join(items) if type(items) in (list, tuple, set) else items | python | def delimited(items, character='|'):
"""Returns a character delimited version of the provided list as a Python string"""
return '|'.join(items) if type(items) in (list, tuple, set) else items | ['def', 'delimited', '(', 'items', ',', 'character', '=', "'|'", ')', ':', 'return', "'|'", '.', 'join', '(', 'items', ')', 'if', 'type', '(', 'items', ')', 'in', '(', 'list', ',', 'tuple', ',', 'set', ')', 'else', 'items'] | Returns a character delimited version of the provided list as a Python string | ['Returns', 'a', 'character', 'delimited', 'version', 'of', 'the', 'provided', 'list', 'as', 'a', 'Python', 'string'] | train | https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/api.py#L10-L12 |
7,727 | jmurty/xml4h | xml4h/nodes.py | Node.write | def write(self, writer=None, encoding='utf-8', indent=0, newline='',
omit_declaration=False, node_depth=0, quote_char='"'):
"""
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
"""
xml4h.write_node(self,
writer=writer, encoding=encoding, indent=indent,
newline=newline, omit_declaration=omit_declaration,
node_depth=node_depth, quote_char=quote_char) | python | def write(self, writer=None, encoding='utf-8', indent=0, newline='',
omit_declaration=False, node_depth=0, quote_char='"'):
"""
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
"""
xml4h.write_node(self,
writer=writer, encoding=encoding, indent=indent,
newline=newline, omit_declaration=omit_declaration,
node_depth=node_depth, quote_char=quote_char) | ['def', 'write', '(', 'self', ',', 'writer', '=', 'None', ',', 'encoding', '=', "'utf-8'", ',', 'indent', '=', '0', ',', 'newline', '=', "''", ',', 'omit_declaration', '=', 'False', ',', 'node_depth', '=', '0', ',', 'quote_char', '=', '\'"\'', ')', ':', 'xml4h', '.', 'write_node', '(', 'self', ',', 'writer', '=', 'writer', ',', 'encoding', '=', 'encoding', ',', 'indent', '=', 'indent', ',', 'newline', '=', 'newline', ',', 'omit_declaration', '=', 'omit_declaration', ',', 'node_depth', '=', 'node_depth', ',', 'quote_char', '=', 'quote_char', ')'] | Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node. | ['Serialize', 'this', 'node', 'and', 'its', 'descendants', 'to', 'text', 'writing', 'the', 'output', 'to', 'a', 'given', '*', 'writer', '*', 'or', 'to', 'stdout', '.'] | train | https://github.com/jmurty/xml4h/blob/adbb45e27a01a869a505aee7bc16bad2f517b511/xml4h/nodes.py#L450-L492 |
7,728 | log2timeline/dfvfs | dfvfs/lib/cpio.py | CPIOArchiveFile.Open | def Open(self, file_object):
"""Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
"""
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object) | python | def Open(self, file_object):
"""Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
"""
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object) | ['def', 'Open', '(', 'self', ',', 'file_object', ')', ':', 'file_object', '.', 'seek', '(', '0', ',', 'os', '.', 'SEEK_SET', ')', 'signature_data', '=', 'file_object', '.', 'read', '(', '6', ')', 'self', '.', 'file_format', '=', 'None', 'if', 'len', '(', 'signature_data', ')', '>', '2', ':', 'if', 'signature_data', '[', ':', '2', ']', '==', 'self', '.', '_CPIO_SIGNATURE_BINARY_BIG_ENDIAN', ':', 'self', '.', 'file_format', '=', "'bin-big-endian'", 'elif', 'signature_data', '[', ':', '2', ']', '==', 'self', '.', '_CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN', ':', 'self', '.', 'file_format', '=', "'bin-little-endian'", 'elif', 'signature_data', '==', 'self', '.', '_CPIO_SIGNATURE_PORTABLE_ASCII', ':', 'self', '.', 'file_format', '=', "'odc'", 'elif', 'signature_data', '==', 'self', '.', '_CPIO_SIGNATURE_NEW_ASCII', ':', 'self', '.', 'file_format', '=', "'newc'", 'elif', 'signature_data', '==', 'self', '.', '_CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM', ':', 'self', '.', 'file_format', '=', "'crc'", 'if', 'self', '.', 'file_format', 'is', 'None', ':', 'raise', 'IOError', '(', "'Unsupported CPIO format.'", ')', 'self', '.', '_file_object', '=', 'file_object', 'self', '.', '_file_size', '=', 'file_object', '.', 'get_size', '(', ')', 'self', '.', '_ReadFileEntries', '(', 'self', '.', '_file_object', ')'] | Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported. | ['Opens', 'the', 'CPIO', 'archive', 'file', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/cpio.py#L293-L325 |
7,729 | noahbenson/neuropythy | neuropythy/io/core.py | forget_exporter | def forget_exporter(name):
'''
forget_exporter(name) yields True if an exporter of type name was successfully forgotten from
the neuropythy exporters list and false otherwise. This function must be called before an
exporter can be replaced.
'''
global exporters
name = name.lower()
if name in exporters:
exporters = exporters.discard(name)
delattr(save, name)
return True
else:
return False | python | def forget_exporter(name):
'''
forget_exporter(name) yields True if an exporter of type name was successfully forgotten from
the neuropythy exporters list and false otherwise. This function must be called before an
exporter can be replaced.
'''
global exporters
name = name.lower()
if name in exporters:
exporters = exporters.discard(name)
delattr(save, name)
return True
else:
return False | ['def', 'forget_exporter', '(', 'name', ')', ':', 'global', 'exporters', 'name', '=', 'name', '.', 'lower', '(', ')', 'if', 'name', 'in', 'exporters', ':', 'exporters', '=', 'exporters', '.', 'discard', '(', 'name', ')', 'delattr', '(', 'save', ',', 'name', ')', 'return', 'True', 'else', ':', 'return', 'False'] | forget_exporter(name) yields True if an exporter of type name was successfully forgotten from
the neuropythy exporters list and false otherwise. This function must be called before an
exporter can be replaced. | ['forget_exporter', '(', 'name', ')', 'yields', 'True', 'if', 'an', 'exporter', 'of', 'type', 'name', 'was', 'successfully', 'forgotten', 'from', 'the', 'neuropythy', 'exporters', 'list', 'and', 'false', 'otherwise', '.', 'This', 'function', 'must', 'be', 'called', 'before', 'an', 'exporter', 'can', 'be', 'replaced', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/io/core.py#L229-L242 |
7,730 | openstates/billy | billy/web/public/views/legislators.py | legislator_inactive | def legislator_inactive(request, abbr, legislator):
'''
Context:
- vote_preview_row_template
- old_roles
- abbr
- metadata
- legislator
- sources
- sponsored_bills
- legislator_votes
- has_votes
- nav_active
Templates:
- billy/web/public/legislator.html
- billy/web/public/vote_preview_row.html
'''
sponsored_bills = legislator.sponsored_bills(
limit=6, sort=[('action_dates.first', pymongo.DESCENDING)])
legislator_votes = list(legislator.votes_6_sorted())
has_votes = bool(legislator_votes)
return render(
request, templatename('legislator'),
dict(vote_preview_row_template=templatename('vote_preview_row'),
old_roles=legislator.old_roles_manager,
abbr=abbr,
metadata=legislator.metadata,
legislator=legislator,
sources=legislator['sources'],
sponsored_bills=list(sponsored_bills),
legislator_votes=legislator_votes,
has_votes=has_votes,
nav_active='legislators')) | python | def legislator_inactive(request, abbr, legislator):
'''
Context:
- vote_preview_row_template
- old_roles
- abbr
- metadata
- legislator
- sources
- sponsored_bills
- legislator_votes
- has_votes
- nav_active
Templates:
- billy/web/public/legislator.html
- billy/web/public/vote_preview_row.html
'''
sponsored_bills = legislator.sponsored_bills(
limit=6, sort=[('action_dates.first', pymongo.DESCENDING)])
legislator_votes = list(legislator.votes_6_sorted())
has_votes = bool(legislator_votes)
return render(
request, templatename('legislator'),
dict(vote_preview_row_template=templatename('vote_preview_row'),
old_roles=legislator.old_roles_manager,
abbr=abbr,
metadata=legislator.metadata,
legislator=legislator,
sources=legislator['sources'],
sponsored_bills=list(sponsored_bills),
legislator_votes=legislator_votes,
has_votes=has_votes,
nav_active='legislators')) | ['def', 'legislator_inactive', '(', 'request', ',', 'abbr', ',', 'legislator', ')', ':', 'sponsored_bills', '=', 'legislator', '.', 'sponsored_bills', '(', 'limit', '=', '6', ',', 'sort', '=', '[', '(', "'action_dates.first'", ',', 'pymongo', '.', 'DESCENDING', ')', ']', ')', 'legislator_votes', '=', 'list', '(', 'legislator', '.', 'votes_6_sorted', '(', ')', ')', 'has_votes', '=', 'bool', '(', 'legislator_votes', ')', 'return', 'render', '(', 'request', ',', 'templatename', '(', "'legislator'", ')', ',', 'dict', '(', 'vote_preview_row_template', '=', 'templatename', '(', "'vote_preview_row'", ')', ',', 'old_roles', '=', 'legislator', '.', 'old_roles_manager', ',', 'abbr', '=', 'abbr', ',', 'metadata', '=', 'legislator', '.', 'metadata', ',', 'legislator', '=', 'legislator', ',', 'sources', '=', 'legislator', '[', "'sources'", ']', ',', 'sponsored_bills', '=', 'list', '(', 'sponsored_bills', ')', ',', 'legislator_votes', '=', 'legislator_votes', ',', 'has_votes', '=', 'has_votes', ',', 'nav_active', '=', "'legislators'", ')', ')'] | Context:
- vote_preview_row_template
- old_roles
- abbr
- metadata
- legislator
- sources
- sponsored_bills
- legislator_votes
- has_votes
- nav_active
Templates:
- billy/web/public/legislator.html
- billy/web/public/vote_preview_row.html | ['Context', ':', '-', 'vote_preview_row_template', '-', 'old_roles', '-', 'abbr', '-', 'metadata', '-', 'legislator', '-', 'sources', '-', 'sponsored_bills', '-', 'legislator_votes', '-', 'has_votes', '-', 'nav_active'] | train | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/legislators.py#L176-L211 |
7,731 | google/dotty | efilter/transforms/solve.py | solve_let | def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) | python | def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) | ['def', 'solve_let', '(', 'expr', ',', 'vars', ')', ':', 'lhs_value', '=', 'solve', '(', 'expr', '.', 'lhs', ',', 'vars', ')', '.', 'value', 'if', 'not', 'isinstance', '(', 'lhs_value', ',', 'structured', '.', 'IStructured', ')', ':', 'raise', 'errors', '.', 'EfilterTypeError', '(', 'root', '=', 'expr', '.', 'lhs', ',', 'query', '=', 'expr', '.', 'original', ',', 'message', '=', '"The LHS of \'let\' must evaluate to an IStructured. Got %r."', '%', '(', 'lhs_value', ',', ')', ')', 'return', 'solve', '(', 'expr', '.', 'rhs', ',', '__nest_scope', '(', 'expr', '.', 'lhs', ',', 'vars', ',', 'lhs_value', ')', ')'] | Solves a let-form by calling RHS with nested scope. | ['Solves', 'a', 'let', '-', 'form', 'by', 'calling', 'RHS', 'with', 'nested', 'scope', '.'] | train | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L450-L459 |
7,732 | gwastro/pycbc | pycbc/results/legacy_grb.py | initialize_page | def initialize_page(title, style, script, header=None):
"""
A function that returns a markup.py page object with the required html
header.
"""
page = markup.page(mode="strict_html")
page._escape = False
page.init(title=title, css=style, script=script, header=header)
return page | python | def initialize_page(title, style, script, header=None):
"""
A function that returns a markup.py page object with the required html
header.
"""
page = markup.page(mode="strict_html")
page._escape = False
page.init(title=title, css=style, script=script, header=header)
return page | ['def', 'initialize_page', '(', 'title', ',', 'style', ',', 'script', ',', 'header', '=', 'None', ')', ':', 'page', '=', 'markup', '.', 'page', '(', 'mode', '=', '"strict_html"', ')', 'page', '.', '_escape', '=', 'False', 'page', '.', 'init', '(', 'title', '=', 'title', ',', 'css', '=', 'style', ',', 'script', '=', 'script', ',', 'header', '=', 'header', ')', 'return', 'page'] | A function that returns a markup.py page object with the required html
header. | ['A', 'function', 'that', 'returns', 'a', 'markup', '.', 'py', 'page', 'object', 'with', 'the', 'required', 'html', 'header', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/legacy_grb.py#L31-L41 |
7,733 | ionelmc/python-cogen | cogen/core/queue.py | Queue.task_done | def task_done(self, **kws):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
unfinished = self.unfinished_tasks - 1
op = None
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
op = QDone(self, **kws)
self.unfinished_tasks = unfinished
return op | python | def task_done(self, **kws):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
unfinished = self.unfinished_tasks - 1
op = None
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
op = QDone(self, **kws)
self.unfinished_tasks = unfinished
return op | ['def', 'task_done', '(', 'self', ',', '*', '*', 'kws', ')', ':', 'unfinished', '=', 'self', '.', 'unfinished_tasks', '-', '1', 'op', '=', 'None', 'if', 'unfinished', '<=', '0', ':', 'if', 'unfinished', '<', '0', ':', 'raise', 'ValueError', '(', "'task_done() called too many times'", ')', 'op', '=', 'QDone', '(', 'self', ',', '*', '*', 'kws', ')', 'self', '.', 'unfinished_tasks', '=', 'unfinished', 'return', 'op'] | Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue. | ['Indicate', 'that', 'a', 'formerly', 'enqueued', 'task', 'is', 'complete', '.', 'Used', 'by', 'Queue', 'consumer', 'threads', '.', 'For', 'each', 'get', '()', 'used', 'to', 'fetch', 'a', 'task', 'a', 'subsequent', 'call', 'to', 'task_done', '()', 'tells', 'the', 'queue', 'that', 'the', 'processing', 'on', 'the', 'task', 'is', 'complete', '.', 'If', 'a', 'join', '()', 'is', 'currently', 'blocking', 'it', 'will', 'resume', 'when', 'all', 'items', 'have', 'been', 'processed', '(', 'meaning', 'that', 'a', 'task_done', '()', 'call', 'was', 'received', 'for', 'every', 'item', 'that', 'had', 'been', 'put', '()', 'into', 'the', 'queue', ')', '.', 'Raises', 'a', 'ValueError', 'if', 'called', 'more', 'times', 'than', 'there', 'were', 'items', 'placed', 'in', 'the', 'queue', '.'] | train | https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/queue.py#L194-L215 |
7,734 | antocuni/pdb | pdb.py | Pdb.do_display | def do_display(self, arg):
"""
display expression
Add expression to the display list; expressions in this list
are evaluated at each step, and printed every time its value
changes.
WARNING: since the expressions is evaluated multiple time, pay
attention not to put expressions with side-effects in the
display list.
"""
try:
value = self._getval_or_undefined(arg)
except:
return
self._get_display_list()[arg] = value | python | def do_display(self, arg):
"""
display expression
Add expression to the display list; expressions in this list
are evaluated at each step, and printed every time its value
changes.
WARNING: since the expressions is evaluated multiple time, pay
attention not to put expressions with side-effects in the
display list.
"""
try:
value = self._getval_or_undefined(arg)
except:
return
self._get_display_list()[arg] = value | ['def', 'do_display', '(', 'self', ',', 'arg', ')', ':', 'try', ':', 'value', '=', 'self', '.', '_getval_or_undefined', '(', 'arg', ')', 'except', ':', 'return', 'self', '.', '_get_display_list', '(', ')', '[', 'arg', ']', '=', 'value'] | display expression
Add expression to the display list; expressions in this list
are evaluated at each step, and printed every time its value
changes.
WARNING: since the expressions is evaluated multiple time, pay
attention not to put expressions with side-effects in the
display list. | ['display', 'expression'] | train | https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L834-L850 |
7,735 | cloud-custodian/cloud-custodian | tools/c7n_gcp/c7n_gcp/mu.py | LogSubscriber.remove | def remove(self, func):
"""Remove any provisioned log sink if auto created"""
if not self.data['name'].startswith(self.prefix):
return
parent = self.get_parent(self.get_log())
_, sink_path, _ = self.get_sink()
client = self.session.client(
'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0]))
try:
client.execute_command(
'delete', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise | python | def remove(self, func):
"""Remove any provisioned log sink if auto created"""
if not self.data['name'].startswith(self.prefix):
return
parent = self.get_parent(self.get_log())
_, sink_path, _ = self.get_sink()
client = self.session.client(
'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0]))
try:
client.execute_command(
'delete', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise | ['def', 'remove', '(', 'self', ',', 'func', ')', ':', 'if', 'not', 'self', '.', 'data', '[', "'name'", ']', '.', 'startswith', '(', 'self', '.', 'prefix', ')', ':', 'return', 'parent', '=', 'self', '.', 'get_parent', '(', 'self', '.', 'get_log', '(', ')', ')', '_', ',', 'sink_path', ',', '_', '=', 'self', '.', 'get_sink', '(', ')', 'client', '=', 'self', '.', 'session', '.', 'client', '(', "'logging'", ',', "'v2'", ',', "'%s.sinks'", '%', '(', 'parent', '.', 'split', '(', "'/'", ',', '1', ')', '[', '0', ']', ')', ')', 'try', ':', 'client', '.', 'execute_command', '(', "'delete'", ',', '{', "'sinkName'", ':', 'sink_path', '}', ')', 'except', 'HttpError', 'as', 'e', ':', 'if', 'e', '.', 'resp', '.', 'status', '!=', '404', ':', 'raise'] | Remove any provisioned log sink if auto created | ['Remove', 'any', 'provisioned', 'log', 'sink', 'if', 'auto', 'created'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/mu.py#L686-L699 |
7,736 | honzamach/pynspect | pynspect/traversers.py | BaseFilteringTreeTraverser.evaluate_binop_comparison | def evaluate_binop_comparison(self, operation, left, right, **kwargs):
"""
Evaluate given comparison binary operation with given operands.
"""
if not operation in self.binops_comparison:
raise ValueError("Invalid comparison binary operation '{}'".format(operation))
if left is None or right is None:
return None
if not isinstance(left, (list, ListIP)):
left = [left]
if not isinstance(right, (list, ListIP)):
right = [right]
if not left or not right:
return None
if operation in ['OP_IS']:
res = self.binops_comparison[operation](left, right)
if res:
return True
elif operation in ['OP_IN']:
for iteml in left:
res = self.binops_comparison[operation](iteml, right)
if res:
return True
else:
for iteml in left:
if iteml is None:
continue
for itemr in right:
if itemr is None:
continue
res = self.binops_comparison[operation](iteml, itemr)
if res:
return True
return False | python | def evaluate_binop_comparison(self, operation, left, right, **kwargs):
"""
Evaluate given comparison binary operation with given operands.
"""
if not operation in self.binops_comparison:
raise ValueError("Invalid comparison binary operation '{}'".format(operation))
if left is None or right is None:
return None
if not isinstance(left, (list, ListIP)):
left = [left]
if not isinstance(right, (list, ListIP)):
right = [right]
if not left or not right:
return None
if operation in ['OP_IS']:
res = self.binops_comparison[operation](left, right)
if res:
return True
elif operation in ['OP_IN']:
for iteml in left:
res = self.binops_comparison[operation](iteml, right)
if res:
return True
else:
for iteml in left:
if iteml is None:
continue
for itemr in right:
if itemr is None:
continue
res = self.binops_comparison[operation](iteml, itemr)
if res:
return True
return False | ['def', 'evaluate_binop_comparison', '(', 'self', ',', 'operation', ',', 'left', ',', 'right', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'operation', 'in', 'self', '.', 'binops_comparison', ':', 'raise', 'ValueError', '(', '"Invalid comparison binary operation \'{}\'"', '.', 'format', '(', 'operation', ')', ')', 'if', 'left', 'is', 'None', 'or', 'right', 'is', 'None', ':', 'return', 'None', 'if', 'not', 'isinstance', '(', 'left', ',', '(', 'list', ',', 'ListIP', ')', ')', ':', 'left', '=', '[', 'left', ']', 'if', 'not', 'isinstance', '(', 'right', ',', '(', 'list', ',', 'ListIP', ')', ')', ':', 'right', '=', '[', 'right', ']', 'if', 'not', 'left', 'or', 'not', 'right', ':', 'return', 'None', 'if', 'operation', 'in', '[', "'OP_IS'", ']', ':', 'res', '=', 'self', '.', 'binops_comparison', '[', 'operation', ']', '(', 'left', ',', 'right', ')', 'if', 'res', ':', 'return', 'True', 'elif', 'operation', 'in', '[', "'OP_IN'", ']', ':', 'for', 'iteml', 'in', 'left', ':', 'res', '=', 'self', '.', 'binops_comparison', '[', 'operation', ']', '(', 'iteml', ',', 'right', ')', 'if', 'res', ':', 'return', 'True', 'else', ':', 'for', 'iteml', 'in', 'left', ':', 'if', 'iteml', 'is', 'None', ':', 'continue', 'for', 'itemr', 'in', 'right', ':', 'if', 'itemr', 'is', 'None', ':', 'continue', 'res', '=', 'self', '.', 'binops_comparison', '[', 'operation', ']', '(', 'iteml', ',', 'itemr', ')', 'if', 'res', ':', 'return', 'True', 'return', 'False'] | Evaluate given comparison binary operation with given operands. | ['Evaluate', 'given', 'comparison', 'binary', 'operation', 'with', 'given', 'operands', '.'] | train | https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/traversers.py#L651-L684 |
7,737 | cyrus-/cypy | cypy/cg.py | CG.append_once | def append_once(cls, code, **kwargs):
"""One-off code generation using append.
If keyword args are provided, initialized using
:meth:`with_id_processor`.
"""
if kwargs:
g = cls.with_id_processor()
g._append_context(kwargs)
else:
g = cls()
g.append(code)
return g.code | python | def append_once(cls, code, **kwargs):
"""One-off code generation using append.
If keyword args are provided, initialized using
:meth:`with_id_processor`.
"""
if kwargs:
g = cls.with_id_processor()
g._append_context(kwargs)
else:
g = cls()
g.append(code)
return g.code | ['def', 'append_once', '(', 'cls', ',', 'code', ',', '*', '*', 'kwargs', ')', ':', 'if', 'kwargs', ':', 'g', '=', 'cls', '.', 'with_id_processor', '(', ')', 'g', '.', '_append_context', '(', 'kwargs', ')', 'else', ':', 'g', '=', 'cls', '(', ')', 'g', '.', 'append', '(', 'code', ')', 'return', 'g', '.', 'code'] | One-off code generation using append.
If keyword args are provided, initialized using
:meth:`with_id_processor`. | ['One', '-', 'off', 'code', 'generation', 'using', 'append', '.', 'If', 'keyword', 'args', 'are', 'provided', 'initialized', 'using', ':', 'meth', ':', 'with_id_processor', '.'] | train | https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L89-L101 |
7,738 | GNS3/gns3-server | gns3server/controller/__init__.py | Controller.get_loaded_project | def get_loaded_project(self, project_id):
"""
Returns a project or raise a 404 error.
If project is not finished to load wait for it
"""
project = self.get_project(project_id)
yield from project.wait_loaded()
return project | python | def get_loaded_project(self, project_id):
"""
Returns a project or raise a 404 error.
If project is not finished to load wait for it
"""
project = self.get_project(project_id)
yield from project.wait_loaded()
return project | ['def', 'get_loaded_project', '(', 'self', ',', 'project_id', ')', ':', 'project', '=', 'self', '.', 'get_project', '(', 'project_id', ')', 'yield', 'from', 'project', '.', 'wait_loaded', '(', ')', 'return', 'project'] | Returns a project or raise a 404 error.
If project is not finished to load wait for it | ['Returns', 'a', 'project', 'or', 'raise', 'a', '404', 'error', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/__init__.py#L543-L551 |
7,739 | dhermes/bezier | src/bezier/_surface_intersection.py | _geometric_intersect | def _geometric_intersect(nodes1, degree1, nodes2, degree2, verify):
r"""Find all intersections among edges of two surfaces.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Uses :func:`generic_intersect` with the
:attr:`~.IntersectionStrategy.GEOMETRIC` intersection strategy.
Args:
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
verify (Optional[bool]): Indicates if duplicate intersections
should be checked.
Returns:
Tuple[Optional[list], Optional[bool], tuple]: 3-tuple of
* List of "edge info" lists. Each list represents a curved polygon
and contains 3-tuples of edge index, start and end (see the
output of :func:`ends_to_curve`).
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
* The nodes of three edges of the first surface being intersected
followed by the nodes of the three edges of the second.
"""
all_intersections = _geometric_intersection.all_intersections
return generic_intersect(
nodes1, degree1, nodes2, degree2, verify, all_intersections
) | python | def _geometric_intersect(nodes1, degree1, nodes2, degree2, verify):
r"""Find all intersections among edges of two surfaces.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Uses :func:`generic_intersect` with the
:attr:`~.IntersectionStrategy.GEOMETRIC` intersection strategy.
Args:
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
verify (Optional[bool]): Indicates if duplicate intersections
should be checked.
Returns:
Tuple[Optional[list], Optional[bool], tuple]: 3-tuple of
* List of "edge info" lists. Each list represents a curved polygon
and contains 3-tuples of edge index, start and end (see the
output of :func:`ends_to_curve`).
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
* The nodes of three edges of the first surface being intersected
followed by the nodes of the three edges of the second.
"""
all_intersections = _geometric_intersection.all_intersections
return generic_intersect(
nodes1, degree1, nodes2, degree2, verify, all_intersections
) | ['def', '_geometric_intersect', '(', 'nodes1', ',', 'degree1', ',', 'nodes2', ',', 'degree2', ',', 'verify', ')', ':', 'all_intersections', '=', '_geometric_intersection', '.', 'all_intersections', 'return', 'generic_intersect', '(', 'nodes1', ',', 'degree1', ',', 'nodes2', ',', 'degree2', ',', 'verify', ',', 'all_intersections', ')'] | r"""Find all intersections among edges of two surfaces.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Uses :func:`generic_intersect` with the
:attr:`~.IntersectionStrategy.GEOMETRIC` intersection strategy.
Args:
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
verify (Optional[bool]): Indicates if duplicate intersections
should be checked.
Returns:
Tuple[Optional[list], Optional[bool], tuple]: 3-tuple of
* List of "edge info" lists. Each list represents a curved polygon
and contains 3-tuples of edge index, start and end (see the
output of :func:`ends_to_curve`).
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
* The nodes of three edges of the first surface being intersected
followed by the nodes of the three edges of the second. | ['r', 'Find', 'all', 'intersections', 'among', 'edges', 'of', 'two', 'surfaces', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_intersection.py#L813-L848 |
7,740 | nikcub/floyd | floyd/util/unicode.py | to_utf8 | def to_utf8(value):
"""Returns a string encoded using UTF-8.
This function comes from `Tornado`_.
:param value:
A unicode or string to be encoded.
:returns:
The encoded string.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value | python | def to_utf8(value):
"""Returns a string encoded using UTF-8.
This function comes from `Tornado`_.
:param value:
A unicode or string to be encoded.
:returns:
The encoded string.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value | ['def', 'to_utf8', '(', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'unicode', ')', ':', 'return', 'value', '.', 'encode', '(', "'utf-8'", ')', 'assert', 'isinstance', '(', 'value', ',', 'str', ')', 'return', 'value'] | Returns a string encoded using UTF-8.
This function comes from `Tornado`_.
:param value:
A unicode or string to be encoded.
:returns:
The encoded string. | ['Returns', 'a', 'string', 'encoded', 'using', 'UTF', '-', '8', '.'] | train | https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/util/unicode.py#L42-L56 |
7,741 | tjvr/skip | skip/__init__.py | Interpreter.push_script | def push_script(self, scriptable, script, callback=None):
"""Run the script and add it to the list of threads."""
if script in self.threads:
self.threads[script].finish()
thread = Thread(self.run_script(scriptable, script),
scriptable, callback)
self.new_threads[script] = thread
return thread | python | def push_script(self, scriptable, script, callback=None):
"""Run the script and add it to the list of threads."""
if script in self.threads:
self.threads[script].finish()
thread = Thread(self.run_script(scriptable, script),
scriptable, callback)
self.new_threads[script] = thread
return thread | ['def', 'push_script', '(', 'self', ',', 'scriptable', ',', 'script', ',', 'callback', '=', 'None', ')', ':', 'if', 'script', 'in', 'self', '.', 'threads', ':', 'self', '.', 'threads', '[', 'script', ']', '.', 'finish', '(', ')', 'thread', '=', 'Thread', '(', 'self', '.', 'run_script', '(', 'scriptable', ',', 'script', ')', ',', 'scriptable', ',', 'callback', ')', 'self', '.', 'new_threads', '[', 'script', ']', '=', 'thread', 'return', 'thread'] | Run the script and add it to the list of threads. | ['Run', 'the', 'script', 'and', 'add', 'it', 'to', 'the', 'list', 'of', 'threads', '.'] | train | https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L134-L141 |
7,742 | splunk/splunk-sdk-python | examples/job.py | Program.run | def run(self, argv):
"""Dispatch the given command."""
command = argv[0]
handlers = {
'cancel': self.cancel,
'create': self.create,
'events': self.events,
'finalize': self.finalize,
'list': self.list,
'pause': self.pause,
'preview': self.preview,
'results': self.results,
'searchlog': self.searchlog,
'summary': self.summary,
'perf': self.perf,
'timeline': self.timeline,
'touch': self.touch,
'unpause': self.unpause,
}
handler = handlers.get(command, None)
if handler is None:
error("Unrecognized command: %s" % command, 2)
handler(argv[1:]) | python | def run(self, argv):
"""Dispatch the given command."""
command = argv[0]
handlers = {
'cancel': self.cancel,
'create': self.create,
'events': self.events,
'finalize': self.finalize,
'list': self.list,
'pause': self.pause,
'preview': self.preview,
'results': self.results,
'searchlog': self.searchlog,
'summary': self.summary,
'perf': self.perf,
'timeline': self.timeline,
'touch': self.touch,
'unpause': self.unpause,
}
handler = handlers.get(command, None)
if handler is None:
error("Unrecognized command: %s" % command, 2)
handler(argv[1:]) | ['def', 'run', '(', 'self', ',', 'argv', ')', ':', 'command', '=', 'argv', '[', '0', ']', 'handlers', '=', '{', "'cancel'", ':', 'self', '.', 'cancel', ',', "'create'", ':', 'self', '.', 'create', ',', "'events'", ':', 'self', '.', 'events', ',', "'finalize'", ':', 'self', '.', 'finalize', ',', "'list'", ':', 'self', '.', 'list', ',', "'pause'", ':', 'self', '.', 'pause', ',', "'preview'", ':', 'self', '.', 'preview', ',', "'results'", ':', 'self', '.', 'results', ',', "'searchlog'", ':', 'self', '.', 'searchlog', ',', "'summary'", ':', 'self', '.', 'summary', ',', "'perf'", ':', 'self', '.', 'perf', ',', "'timeline'", ':', 'self', '.', 'timeline', ',', "'touch'", ':', 'self', '.', 'touch', ',', "'unpause'", ':', 'self', '.', 'unpause', ',', '}', 'handler', '=', 'handlers', '.', 'get', '(', 'command', ',', 'None', ')', 'if', 'handler', 'is', 'None', ':', 'error', '(', '"Unrecognized command: %s"', '%', 'command', ',', '2', ')', 'handler', '(', 'argv', '[', '1', ':', ']', ')'] | Dispatch the given command. | ['Dispatch', 'the', 'given', 'command', '.'] | train | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/job.py#L209-L231 |
7,743 | FutunnOpen/futuquant | futuquant/common/pbjson.py | dict2pb | def dict2pb(cls, adict, strict=False):
"""
Takes a class representing the ProtoBuf Message and fills it with data from
the dict.
"""
obj = cls()
for field in obj.DESCRIPTOR.fields:
if not field.label == field.LABEL_REQUIRED:
continue
if not field.has_default_value:
continue
if not field.name in adict:
raise ConvertException('Field "%s" missing from descriptor dictionary.'
% field.name)
field_names = set([field.name for field in obj.DESCRIPTOR.fields])
if strict:
for key in adict.keys():
if key not in field_names:
raise ConvertException(
'Key "%s" can not be mapped to field in %s class.'
% (key, type(obj)))
for field in obj.DESCRIPTOR.fields:
if not field.name in adict:
continue
msg_type = field.message_type
if field.label == FD.LABEL_REPEATED:
if field.type == FD.TYPE_MESSAGE:
for sub_dict in adict[field.name]:
item = getattr(obj, field.name).add()
item.CopyFrom(dict2pb(msg_type._concrete_class, sub_dict))
else:
# fix python3 map用法变更
list(map(getattr(obj, field.name).append, adict[field.name]))
else:
if field.type == FD.TYPE_MESSAGE:
value = dict2pb(msg_type._concrete_class, adict[field.name])
getattr(obj, field.name).CopyFrom(value)
elif field.type in [FD.TYPE_UINT64, FD.TYPE_INT64, FD.TYPE_SINT64]:
setattr(obj, field.name, int(adict[field.name]))
else:
setattr(obj, field.name, adict[field.name])
return obj | python | def dict2pb(cls, adict, strict=False):
"""
Takes a class representing the ProtoBuf Message and fills it with data from
the dict.
"""
obj = cls()
for field in obj.DESCRIPTOR.fields:
if not field.label == field.LABEL_REQUIRED:
continue
if not field.has_default_value:
continue
if not field.name in adict:
raise ConvertException('Field "%s" missing from descriptor dictionary.'
% field.name)
field_names = set([field.name for field in obj.DESCRIPTOR.fields])
if strict:
for key in adict.keys():
if key not in field_names:
raise ConvertException(
'Key "%s" can not be mapped to field in %s class.'
% (key, type(obj)))
for field in obj.DESCRIPTOR.fields:
if not field.name in adict:
continue
msg_type = field.message_type
if field.label == FD.LABEL_REPEATED:
if field.type == FD.TYPE_MESSAGE:
for sub_dict in adict[field.name]:
item = getattr(obj, field.name).add()
item.CopyFrom(dict2pb(msg_type._concrete_class, sub_dict))
else:
# fix python3 map用法变更
list(map(getattr(obj, field.name).append, adict[field.name]))
else:
if field.type == FD.TYPE_MESSAGE:
value = dict2pb(msg_type._concrete_class, adict[field.name])
getattr(obj, field.name).CopyFrom(value)
elif field.type in [FD.TYPE_UINT64, FD.TYPE_INT64, FD.TYPE_SINT64]:
setattr(obj, field.name, int(adict[field.name]))
else:
setattr(obj, field.name, adict[field.name])
return obj | ['def', 'dict2pb', '(', 'cls', ',', 'adict', ',', 'strict', '=', 'False', ')', ':', 'obj', '=', 'cls', '(', ')', 'for', 'field', 'in', 'obj', '.', 'DESCRIPTOR', '.', 'fields', ':', 'if', 'not', 'field', '.', 'label', '==', 'field', '.', 'LABEL_REQUIRED', ':', 'continue', 'if', 'not', 'field', '.', 'has_default_value', ':', 'continue', 'if', 'not', 'field', '.', 'name', 'in', 'adict', ':', 'raise', 'ConvertException', '(', '\'Field "%s" missing from descriptor dictionary.\'', '%', 'field', '.', 'name', ')', 'field_names', '=', 'set', '(', '[', 'field', '.', 'name', 'for', 'field', 'in', 'obj', '.', 'DESCRIPTOR', '.', 'fields', ']', ')', 'if', 'strict', ':', 'for', 'key', 'in', 'adict', '.', 'keys', '(', ')', ':', 'if', 'key', 'not', 'in', 'field_names', ':', 'raise', 'ConvertException', '(', '\'Key "%s" can not be mapped to field in %s class.\'', '%', '(', 'key', ',', 'type', '(', 'obj', ')', ')', ')', 'for', 'field', 'in', 'obj', '.', 'DESCRIPTOR', '.', 'fields', ':', 'if', 'not', 'field', '.', 'name', 'in', 'adict', ':', 'continue', 'msg_type', '=', 'field', '.', 'message_type', 'if', 'field', '.', 'label', '==', 'FD', '.', 'LABEL_REPEATED', ':', 'if', 'field', '.', 'type', '==', 'FD', '.', 'TYPE_MESSAGE', ':', 'for', 'sub_dict', 'in', 'adict', '[', 'field', '.', 'name', ']', ':', 'item', '=', 'getattr', '(', 'obj', ',', 'field', '.', 'name', ')', '.', 'add', '(', ')', 'item', '.', 'CopyFrom', '(', 'dict2pb', '(', 'msg_type', '.', '_concrete_class', ',', 'sub_dict', ')', ')', 'else', ':', '# fix python3 map用法变更', 'list', '(', 'map', '(', 'getattr', '(', 'obj', ',', 'field', '.', 'name', ')', '.', 'append', ',', 'adict', '[', 'field', '.', 'name', ']', ')', ')', 'else', ':', 'if', 'field', '.', 'type', '==', 'FD', '.', 'TYPE_MESSAGE', ':', 'value', '=', 'dict2pb', '(', 'msg_type', '.', '_concrete_class', ',', 'adict', '[', 'field', '.', 'name', ']', ')', 'getattr', '(', 'obj', ',', 'field', '.', 'name', ')', '.', 'CopyFrom', '(', 'value', ')', 'elif', 'field', '.', 'type', 'in', '[', 'FD', '.', 'TYPE_UINT64', ',', 'FD', '.', 'TYPE_INT64', ',', 'FD', '.', 'TYPE_SINT64', ']', ':', 'setattr', '(', 'obj', ',', 'field', '.', 'name', ',', 'int', '(', 'adict', '[', 'field', '.', 'name', ']', ')', ')', 'else', ':', 'setattr', '(', 'obj', ',', 'field', '.', 'name', ',', 'adict', '[', 'field', '.', 'name', ']', ')', 'return', 'obj'] | Takes a class representing the ProtoBuf Message and fills it with data from
the dict. | ['Takes', 'a', 'class', 'representing', 'the', 'ProtoBuf', 'Message', 'and', 'fills', 'it', 'with', 'data', 'from', 'the', 'dict', '.'] | train | https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/pbjson.py#L40-L81 |
7,744 | kylejusticemagnuson/pyti | pyti/directional_indicators.py | calculate_up_moves | def calculate_up_moves(high_data):
"""
Up Move.
Formula:
UPMOVE = Ht - Ht-1
"""
up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))]
return [np.nan] + up_moves | python | def calculate_up_moves(high_data):
"""
Up Move.
Formula:
UPMOVE = Ht - Ht-1
"""
up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))]
return [np.nan] + up_moves | ['def', 'calculate_up_moves', '(', 'high_data', ')', ':', 'up_moves', '=', '[', 'high_data', '[', 'idx', ']', '-', 'high_data', '[', 'idx', '-', '1', ']', 'for', 'idx', 'in', 'range', '(', '1', ',', 'len', '(', 'high_data', ')', ')', ']', 'return', '[', 'np', '.', 'nan', ']', '+', 'up_moves'] | Up Move.
Formula:
UPMOVE = Ht - Ht-1 | ['Up', 'Move', '.'] | train | https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/directional_indicators.py#L13-L21 |
7,745 | ihgazni2/edict | edict/edict.py | dict2tlist | def dict2tlist(this_dict,**kwargs):
'''
#sequence will be losted
d = {'a':'b','c':'d'}
dict2tlist(d)
'''
if('check' in kwargs):
check = kwargs['check']
else:
check = 1
if(check):
if(isinstance(this_dict,dict)):
pass
else:
return(None)
else:
pass
if('deepcopy' in kwargs):
deepcopy = kwargs['deepcopy']
else:
deepcopy = 1
tuple_list = []
if(deepcopy):
new = copy.deepcopy(this_dict)
else:
new = this_dict
i = 0
for key in this_dict:
value = this_dict[key]
tuple_list.append((key,value))
return(tuple_list) | python | def dict2tlist(this_dict,**kwargs):
'''
#sequence will be losted
d = {'a':'b','c':'d'}
dict2tlist(d)
'''
if('check' in kwargs):
check = kwargs['check']
else:
check = 1
if(check):
if(isinstance(this_dict,dict)):
pass
else:
return(None)
else:
pass
if('deepcopy' in kwargs):
deepcopy = kwargs['deepcopy']
else:
deepcopy = 1
tuple_list = []
if(deepcopy):
new = copy.deepcopy(this_dict)
else:
new = this_dict
i = 0
for key in this_dict:
value = this_dict[key]
tuple_list.append((key,value))
return(tuple_list) | ['def', 'dict2tlist', '(', 'this_dict', ',', '*', '*', 'kwargs', ')', ':', 'if', '(', "'check'", 'in', 'kwargs', ')', ':', 'check', '=', 'kwargs', '[', "'check'", ']', 'else', ':', 'check', '=', '1', 'if', '(', 'check', ')', ':', 'if', '(', 'isinstance', '(', 'this_dict', ',', 'dict', ')', ')', ':', 'pass', 'else', ':', 'return', '(', 'None', ')', 'else', ':', 'pass', 'if', '(', "'deepcopy'", 'in', 'kwargs', ')', ':', 'deepcopy', '=', 'kwargs', '[', "'deepcopy'", ']', 'else', ':', 'deepcopy', '=', '1', 'tuple_list', '=', '[', ']', 'if', '(', 'deepcopy', ')', ':', 'new', '=', 'copy', '.', 'deepcopy', '(', 'this_dict', ')', 'else', ':', 'new', '=', 'this_dict', 'i', '=', '0', 'for', 'key', 'in', 'this_dict', ':', 'value', '=', 'this_dict', '[', 'key', ']', 'tuple_list', '.', 'append', '(', '(', 'key', ',', 'value', ')', ')', 'return', '(', 'tuple_list', ')'] | #sequence will be losted
d = {'a':'b','c':'d'}
dict2tlist(d) | ['#sequence', 'will', 'be', 'losted', 'd', '=', '{', 'a', ':', 'b', 'c', ':', 'd', '}', 'dict2tlist', '(', 'd', ')'] | train | https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L744-L774 |
7,746 | ThreatConnect-Inc/tcex | tcex/tcex_resources.py | Resource.associations | def associations(self, association_resource):
"""Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations.
"""
resource = self.copy()
resource._request_entity = association_resource.api_entity
resource._request_uri = '{}/{}'.format(
resource._request_uri, association_resource.request_uri
)
return resource | python | def associations(self, association_resource):
"""Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations.
"""
resource = self.copy()
resource._request_entity = association_resource.api_entity
resource._request_uri = '{}/{}'.format(
resource._request_uri, association_resource.request_uri
)
return resource | ['def', 'associations', '(', 'self', ',', 'association_resource', ')', ':', 'resource', '=', 'self', '.', 'copy', '(', ')', 'resource', '.', '_request_entity', '=', 'association_resource', '.', 'api_entity', 'resource', '.', '_request_uri', '=', "'{}/{}'", '.', 'format', '(', 'resource', '.', '_request_uri', ',', 'association_resource', '.', 'request_uri', ')', 'return', 'resource'] | Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations. | ['Retrieve', 'Association', 'for', 'this', 'resource', 'of', 'the', 'type', 'in', 'association_resource', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L397-L431 |
7,747 | ReFirmLabs/binwalk | src/binwalk/core/module.py | Module.result | def result(self, r=None, **kwargs):
'''
Validates a result, stores it in self.results and prints it.
Accepts the same kwargs as the binwalk.core.module.Result class.
@r - An existing instance of binwalk.core.module.Result.
Returns an instance of binwalk.core.module.Result.
'''
if r is None:
r = Result(**kwargs)
# Add the name of the current module to the result
r.module = self.__class__.__name__
# Any module that is reporting results, valid or not, should be marked
# as enabled
if not self.enabled:
self.enabled = True
self.validate(r)
self._plugins_result(r)
# Update the progress status automatically if it is not being done
# manually by the module
if r.offset and r.file and self.AUTO_UPDATE_STATUS:
self.status.total = r.file.length
self.status.completed = r.offset
self.status.fp = r.file
for dependency in self.dependencies:
try:
getattr(self, dependency.attribute).callback(r)
except AttributeError:
continue
if r.valid:
self.results.append(r)
if r.display:
display_args = self._build_display_args(r)
if display_args:
self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT)
self.config.display.result(*display_args)
return r | python | def result(self, r=None, **kwargs):
'''
Validates a result, stores it in self.results and prints it.
Accepts the same kwargs as the binwalk.core.module.Result class.
@r - An existing instance of binwalk.core.module.Result.
Returns an instance of binwalk.core.module.Result.
'''
if r is None:
r = Result(**kwargs)
# Add the name of the current module to the result
r.module = self.__class__.__name__
# Any module that is reporting results, valid or not, should be marked
# as enabled
if not self.enabled:
self.enabled = True
self.validate(r)
self._plugins_result(r)
# Update the progress status automatically if it is not being done
# manually by the module
if r.offset and r.file and self.AUTO_UPDATE_STATUS:
self.status.total = r.file.length
self.status.completed = r.offset
self.status.fp = r.file
for dependency in self.dependencies:
try:
getattr(self, dependency.attribute).callback(r)
except AttributeError:
continue
if r.valid:
self.results.append(r)
if r.display:
display_args = self._build_display_args(r)
if display_args:
self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT)
self.config.display.result(*display_args)
return r | ['def', 'result', '(', 'self', ',', 'r', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'r', 'is', 'None', ':', 'r', '=', 'Result', '(', '*', '*', 'kwargs', ')', '# Add the name of the current module to the result', 'r', '.', 'module', '=', 'self', '.', '__class__', '.', '__name__', '# Any module that is reporting results, valid or not, should be marked', '# as enabled', 'if', 'not', 'self', '.', 'enabled', ':', 'self', '.', 'enabled', '=', 'True', 'self', '.', 'validate', '(', 'r', ')', 'self', '.', '_plugins_result', '(', 'r', ')', '# Update the progress status automatically if it is not being done', '# manually by the module', 'if', 'r', '.', 'offset', 'and', 'r', '.', 'file', 'and', 'self', '.', 'AUTO_UPDATE_STATUS', ':', 'self', '.', 'status', '.', 'total', '=', 'r', '.', 'file', '.', 'length', 'self', '.', 'status', '.', 'completed', '=', 'r', '.', 'offset', 'self', '.', 'status', '.', 'fp', '=', 'r', '.', 'file', 'for', 'dependency', 'in', 'self', '.', 'dependencies', ':', 'try', ':', 'getattr', '(', 'self', ',', 'dependency', '.', 'attribute', ')', '.', 'callback', '(', 'r', ')', 'except', 'AttributeError', ':', 'continue', 'if', 'r', '.', 'valid', ':', 'self', '.', 'results', '.', 'append', '(', 'r', ')', 'if', 'r', '.', 'display', ':', 'display_args', '=', 'self', '.', '_build_display_args', '(', 'r', ')', 'if', 'display_args', ':', 'self', '.', 'config', '.', 'display', '.', 'format_strings', '(', 'self', '.', 'HEADER_FORMAT', ',', 'self', '.', 'RESULT_FORMAT', ')', 'self', '.', 'config', '.', 'display', '.', 'result', '(', '*', 'display_args', ')', 'return', 'r'] | Validates a result, stores it in self.results and prints it.
Accepts the same kwargs as the binwalk.core.module.Result class.
@r - An existing instance of binwalk.core.module.Result.
Returns an instance of binwalk.core.module.Result. | ['Validates', 'a', 'result', 'stores', 'it', 'in', 'self', '.', 'results', 'and', 'prints', 'it', '.', 'Accepts', 'the', 'same', 'kwargs', 'as', 'the', 'binwalk', '.', 'core', '.', 'module', '.', 'Result', 'class', '.'] | train | https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/module.py#L458-L503 |
7,748 | eandersson/amqpstorm | amqpstorm/basic.py | Basic._consume_add_and_get_tag | def _consume_add_and_get_tag(self, consume_rpc_result):
"""Add the tag to the channel and return it.
:param dict consume_rpc_result:
:rtype: str
"""
consumer_tag = consume_rpc_result['consumer_tag']
self._channel.add_consumer_tag(consumer_tag)
return consumer_tag | python | def _consume_add_and_get_tag(self, consume_rpc_result):
"""Add the tag to the channel and return it.
:param dict consume_rpc_result:
:rtype: str
"""
consumer_tag = consume_rpc_result['consumer_tag']
self._channel.add_consumer_tag(consumer_tag)
return consumer_tag | ['def', '_consume_add_and_get_tag', '(', 'self', ',', 'consume_rpc_result', ')', ':', 'consumer_tag', '=', 'consume_rpc_result', '[', "'consumer_tag'", ']', 'self', '.', '_channel', '.', 'add_consumer_tag', '(', 'consumer_tag', ')', 'return', 'consumer_tag'] | Add the tag to the channel and return it.
:param dict consume_rpc_result:
:rtype: str | ['Add', 'the', 'tag', 'to', 'the', 'channel', 'and', 'return', 'it', '.'] | train | https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L268-L277 |
7,749 | wummel/linkchecker | linkcheck/bookmarks/opera.py | find_bookmark_file | def find_bookmark_file ():
"""Return the bookmark file of the Opera profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found.
"""
try:
dirname = get_profile_dir()
if os.path.isdir(dirname):
for name in OperaBookmarkFiles:
fname = os.path.join(dirname, name)
if os.path.isfile(fname):
return fname
except Exception:
pass
return u"" | python | def find_bookmark_file ():
"""Return the bookmark file of the Opera profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found.
"""
try:
dirname = get_profile_dir()
if os.path.isdir(dirname):
for name in OperaBookmarkFiles:
fname = os.path.join(dirname, name)
if os.path.isfile(fname):
return fname
except Exception:
pass
return u"" | ['def', 'find_bookmark_file', '(', ')', ':', 'try', ':', 'dirname', '=', 'get_profile_dir', '(', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'dirname', ')', ':', 'for', 'name', 'in', 'OperaBookmarkFiles', ':', 'fname', '=', 'os', '.', 'path', '.', 'join', '(', 'dirname', ',', 'name', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'fname', ')', ':', 'return', 'fname', 'except', 'Exception', ':', 'pass', 'return', 'u""'] | Return the bookmark file of the Opera profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found. | ['Return', 'the', 'bookmark', 'file', 'of', 'the', 'Opera', 'profile', '.', 'Returns', 'absolute', 'filename', 'if', 'found', 'or', 'empty', 'string', 'if', 'no', 'bookmark', 'file', 'could', 'be', 'found', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/bookmarks/opera.py#L40-L54 |
7,750 | dmlc/gluon-nlp | scripts/sentiment_analysis/finetune_lm.py | train | def train():
"""Training process"""
start_pipeline_time = time.time()
# Training/Testing
best_valid_acc = 0
stop_early = 0
for epoch in range(args.epochs):
# Epoch training stats
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i, ((data, valid_length), label) in enumerate(train_dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
valid_length = valid_length.as_in_context(context).astype(np.float32)
wc = valid_length.sum().asscalar()
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data.shape[1]
epoch_sent_num += data.shape[1]
with autograd.record():
output = net(data, valid_length)
L = loss(output, label).mean()
L.backward()
# Clip gradient
if args.clip is not None:
grads = [p.grad(context) for p in net.collect_params().values()]
gluon.utils.clip_global_norm(grads, args.clip)
# Update parameter
trainer.step(1)
log_interval_L += L.asscalar()
epoch_L += L.asscalar()
if (i + 1) % args.log_interval == 0:
print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % (
epoch, i + 1, len(train_dataloader),
log_interval_L / log_interval_sent_num,
log_interval_wc / 1000 / (time.time() - start_log_interval_time)))
# Clear log interval training stats
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time.time()
valid_avg_L, valid_acc = evaluate(valid_dataloader)
test_avg_L, test_acc = evaluate(test_dataloader)
print('[Epoch %d] train avg loss %g, '
'valid acc %.4f, valid avg loss %g, '
'test acc %.4f, test avg loss %g, throughput %gK wps' % (
epoch, epoch_L / epoch_sent_num,
valid_acc, valid_avg_L, test_acc, test_avg_L,
epoch_wc / 1000 / (end_epoch_time - start_epoch_time)))
if valid_acc < best_valid_acc:
print('No Improvement.')
stop_early += 1
if stop_early == 3:
break
else:
# Reset stop_early if the validation loss finds a new low value
print('Observed Improvement.')
stop_early = 0
net.save_parameters(args.save_prefix + '_{:04d}.params'.format(epoch))
best_valid_acc = valid_acc
net.load_parameters(glob.glob(args.save_prefix+'_*.params')[-1], context)
valid_avg_L, valid_acc = evaluate(valid_dataloader)
test_avg_L, test_acc = evaluate(test_dataloader)
print('Best validation loss %g, validation acc %.4f'%(valid_avg_L, valid_acc))
print('Best test loss %g, test acc %.4f'%(test_avg_L, test_acc))
print('Total time cost %.2fs'%(time.time()-start_pipeline_time)) | python | def train():
"""Training process"""
start_pipeline_time = time.time()
# Training/Testing
best_valid_acc = 0
stop_early = 0
for epoch in range(args.epochs):
# Epoch training stats
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i, ((data, valid_length), label) in enumerate(train_dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
valid_length = valid_length.as_in_context(context).astype(np.float32)
wc = valid_length.sum().asscalar()
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data.shape[1]
epoch_sent_num += data.shape[1]
with autograd.record():
output = net(data, valid_length)
L = loss(output, label).mean()
L.backward()
# Clip gradient
if args.clip is not None:
grads = [p.grad(context) for p in net.collect_params().values()]
gluon.utils.clip_global_norm(grads, args.clip)
# Update parameter
trainer.step(1)
log_interval_L += L.asscalar()
epoch_L += L.asscalar()
if (i + 1) % args.log_interval == 0:
print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % (
epoch, i + 1, len(train_dataloader),
log_interval_L / log_interval_sent_num,
log_interval_wc / 1000 / (time.time() - start_log_interval_time)))
# Clear log interval training stats
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time.time()
valid_avg_L, valid_acc = evaluate(valid_dataloader)
test_avg_L, test_acc = evaluate(test_dataloader)
print('[Epoch %d] train avg loss %g, '
'valid acc %.4f, valid avg loss %g, '
'test acc %.4f, test avg loss %g, throughput %gK wps' % (
epoch, epoch_L / epoch_sent_num,
valid_acc, valid_avg_L, test_acc, test_avg_L,
epoch_wc / 1000 / (end_epoch_time - start_epoch_time)))
if valid_acc < best_valid_acc:
print('No Improvement.')
stop_early += 1
if stop_early == 3:
break
else:
# Reset stop_early if the validation loss finds a new low value
print('Observed Improvement.')
stop_early = 0
net.save_parameters(args.save_prefix + '_{:04d}.params'.format(epoch))
best_valid_acc = valid_acc
net.load_parameters(glob.glob(args.save_prefix+'_*.params')[-1], context)
valid_avg_L, valid_acc = evaluate(valid_dataloader)
test_avg_L, test_acc = evaluate(test_dataloader)
print('Best validation loss %g, validation acc %.4f'%(valid_avg_L, valid_acc))
print('Best test loss %g, test acc %.4f'%(test_avg_L, test_acc))
print('Total time cost %.2fs'%(time.time()-start_pipeline_time)) | ['def', 'train', '(', ')', ':', 'start_pipeline_time', '=', 'time', '.', 'time', '(', ')', '# Training/Testing', 'best_valid_acc', '=', '0', 'stop_early', '=', '0', 'for', 'epoch', 'in', 'range', '(', 'args', '.', 'epochs', ')', ':', '# Epoch training stats', 'start_epoch_time', '=', 'time', '.', 'time', '(', ')', 'epoch_L', '=', '0.0', 'epoch_sent_num', '=', '0', 'epoch_wc', '=', '0', '# Log interval training stats', 'start_log_interval_time', '=', 'time', '.', 'time', '(', ')', 'log_interval_wc', '=', '0', 'log_interval_sent_num', '=', '0', 'log_interval_L', '=', '0.0', 'for', 'i', ',', '(', '(', 'data', ',', 'valid_length', ')', ',', 'label', ')', 'in', 'enumerate', '(', 'train_dataloader', ')', ':', 'data', '=', 'mx', '.', 'nd', '.', 'transpose', '(', 'data', '.', 'as_in_context', '(', 'context', ')', ')', 'label', '=', 'label', '.', 'as_in_context', '(', 'context', ')', 'valid_length', '=', 'valid_length', '.', 'as_in_context', '(', 'context', ')', '.', 'astype', '(', 'np', '.', 'float32', ')', 'wc', '=', 'valid_length', '.', 'sum', '(', ')', '.', 'asscalar', '(', ')', 'log_interval_wc', '+=', 'wc', 'epoch_wc', '+=', 'wc', 'log_interval_sent_num', '+=', 'data', '.', 'shape', '[', '1', ']', 'epoch_sent_num', '+=', 'data', '.', 'shape', '[', '1', ']', 'with', 'autograd', '.', 'record', '(', ')', ':', 'output', '=', 'net', '(', 'data', ',', 'valid_length', ')', 'L', '=', 'loss', '(', 'output', ',', 'label', ')', '.', 'mean', '(', ')', 'L', '.', 'backward', '(', ')', '# Clip gradient', 'if', 'args', '.', 'clip', 'is', 'not', 'None', ':', 'grads', '=', '[', 'p', '.', 'grad', '(', 'context', ')', 'for', 'p', 'in', 'net', '.', 'collect_params', '(', ')', '.', 'values', '(', ')', ']', 'gluon', '.', 'utils', '.', 'clip_global_norm', '(', 'grads', ',', 'args', '.', 'clip', ')', '# Update parameter', 'trainer', '.', 'step', '(', '1', ')', 'log_interval_L', '+=', 'L', '.', 'asscalar', '(', ')', 'epoch_L', '+=', 'L', '.', 'asscalar', '(', ')', 'if', '(', 'i', '+', '1', ')', '%', 'args', '.', 'log_interval', '==', '0', ':', 'print', '(', "'[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps'", '%', '(', 'epoch', ',', 'i', '+', '1', ',', 'len', '(', 'train_dataloader', ')', ',', 'log_interval_L', '/', 'log_interval_sent_num', ',', 'log_interval_wc', '/', '1000', '/', '(', 'time', '.', 'time', '(', ')', '-', 'start_log_interval_time', ')', ')', ')', '# Clear log interval training stats', 'start_log_interval_time', '=', 'time', '.', 'time', '(', ')', 'log_interval_wc', '=', '0', 'log_interval_sent_num', '=', '0', 'log_interval_L', '=', '0', 'end_epoch_time', '=', 'time', '.', 'time', '(', ')', 'valid_avg_L', ',', 'valid_acc', '=', 'evaluate', '(', 'valid_dataloader', ')', 'test_avg_L', ',', 'test_acc', '=', 'evaluate', '(', 'test_dataloader', ')', 'print', '(', "'[Epoch %d] train avg loss %g, '", "'valid acc %.4f, valid avg loss %g, '", "'test acc %.4f, test avg loss %g, throughput %gK wps'", '%', '(', 'epoch', ',', 'epoch_L', '/', 'epoch_sent_num', ',', 'valid_acc', ',', 'valid_avg_L', ',', 'test_acc', ',', 'test_avg_L', ',', 'epoch_wc', '/', '1000', '/', '(', 'end_epoch_time', '-', 'start_epoch_time', ')', ')', ')', 'if', 'valid_acc', '<', 'best_valid_acc', ':', 'print', '(', "'No Improvement.'", ')', 'stop_early', '+=', '1', 'if', 'stop_early', '==', '3', ':', 'break', 'else', ':', '# Reset stop_early if the validation loss finds a new low value', 'print', '(', "'Observed Improvement.'", ')', 'stop_early', '=', '0', 'net', '.', 'save_parameters', '(', 'args', '.', 'save_prefix', '+', "'_{:04d}.params'", '.', 'format', '(', 'epoch', ')', ')', 'best_valid_acc', '=', 'valid_acc', 'net', '.', 'load_parameters', '(', 'glob', '.', 'glob', '(', 'args', '.', 'save_prefix', '+', "'_*.params'", ')', '[', '-', '1', ']', ',', 'context', ')', 'valid_avg_L', ',', 'valid_acc', '=', 'evaluate', '(', 'valid_dataloader', ')', 'test_avg_L', ',', 'test_acc', '=', 'evaluate', '(', 'test_dataloader', ')', 'print', '(', "'Best validation loss %g, validation acc %.4f'", '%', '(', 'valid_avg_L', ',', 'valid_acc', ')', ')', 'print', '(', "'Best test loss %g, test acc %.4f'", '%', '(', 'test_avg_L', ',', 'test_acc', ')', ')', 'print', '(', "'Total time cost %.2fs'", '%', '(', 'time', '.', 'time', '(', ')', '-', 'start_pipeline_time', ')', ')'] | Training process | ['Training', 'process'] | train | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/sentiment_analysis/finetune_lm.py#L263-L340 |
7,751 | ionelmc/python-fields | src/fields/__init__.py | class_sealer | def class_sealer(fields, defaults,
base=__base__, make_init_func=make_init_func,
initializer=True, comparable=True, printable=True, convertible=False, pass_kwargs=False):
"""
This sealer makes a normal container class. It's mutable and supports arguments with default values.
"""
baseclass_name = 'FieldsBase_for__{0}'.format('__'.join(fields))
if pass_kwargs:
options = dict(
header_end=', **__fields_kwargs__):\n',
super_call_end=', **__fields_kwargs__)\n',
super_call_pass_allargs=False,
)
else:
options = {}
if initializer:
global_namespace, local_namespace = make_init_func(fields, defaults, baseclass_name, **options)
class FieldsBase(base):
if initializer:
__init__ = local_namespace['__init__']
if comparable:
def __eq__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) == tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def __lt__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) < tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) <= tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) > tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) >= tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __hash__(self):
return hash(tuple(getattr(self, a) for a in fields))
if printable:
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join("{0}={1}".format(attr, repr(getattr(self, attr))) for attr in fields)
)
if convertible:
@property
def as_dict(self):
return dict((attr, getattr(self, attr)) for attr in fields)
@property
def as_tuple(self):
return tuple(getattr(self, attr) for attr in fields)
if initializer:
global_namespace[baseclass_name] = FieldsBase
return FieldsBase | python | def class_sealer(fields, defaults,
base=__base__, make_init_func=make_init_func,
initializer=True, comparable=True, printable=True, convertible=False, pass_kwargs=False):
"""
This sealer makes a normal container class. It's mutable and supports arguments with default values.
"""
baseclass_name = 'FieldsBase_for__{0}'.format('__'.join(fields))
if pass_kwargs:
options = dict(
header_end=', **__fields_kwargs__):\n',
super_call_end=', **__fields_kwargs__)\n',
super_call_pass_allargs=False,
)
else:
options = {}
if initializer:
global_namespace, local_namespace = make_init_func(fields, defaults, baseclass_name, **options)
class FieldsBase(base):
if initializer:
__init__ = local_namespace['__init__']
if comparable:
def __eq__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) == tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def __lt__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) < tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) <= tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) > tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, self.__class__):
return tuple(getattr(self, a) for a in fields) >= tuple(getattr(other, a) for a in fields)
else:
return NotImplemented
def __hash__(self):
return hash(tuple(getattr(self, a) for a in fields))
if printable:
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join("{0}={1}".format(attr, repr(getattr(self, attr))) for attr in fields)
)
if convertible:
@property
def as_dict(self):
return dict((attr, getattr(self, attr)) for attr in fields)
@property
def as_tuple(self):
return tuple(getattr(self, attr) for attr in fields)
if initializer:
global_namespace[baseclass_name] = FieldsBase
return FieldsBase | ['def', 'class_sealer', '(', 'fields', ',', 'defaults', ',', 'base', '=', '__base__', ',', 'make_init_func', '=', 'make_init_func', ',', 'initializer', '=', 'True', ',', 'comparable', '=', 'True', ',', 'printable', '=', 'True', ',', 'convertible', '=', 'False', ',', 'pass_kwargs', '=', 'False', ')', ':', 'baseclass_name', '=', "'FieldsBase_for__{0}'", '.', 'format', '(', "'__'", '.', 'join', '(', 'fields', ')', ')', 'if', 'pass_kwargs', ':', 'options', '=', 'dict', '(', 'header_end', '=', "', **__fields_kwargs__):\\n'", ',', 'super_call_end', '=', "', **__fields_kwargs__)\\n'", ',', 'super_call_pass_allargs', '=', 'False', ',', ')', 'else', ':', 'options', '=', '{', '}', 'if', 'initializer', ':', 'global_namespace', ',', 'local_namespace', '=', 'make_init_func', '(', 'fields', ',', 'defaults', ',', 'baseclass_name', ',', '*', '*', 'options', ')', 'class', 'FieldsBase', '(', 'base', ')', ':', 'if', 'initializer', ':', '__init__', '=', 'local_namespace', '[', "'__init__'", ']', 'if', 'comparable', ':', 'def', '__eq__', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'self', '.', '__class__', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', '==', 'tuple', '(', 'getattr', '(', 'other', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', 'else', ':', 'return', 'NotImplemented', 'def', '__ne__', '(', 'self', ',', 'other', ')', ':', 'result', '=', 'self', '.', '__eq__', '(', 'other', ')', 'if', 'result', 'is', 'NotImplemented', ':', 'return', 'NotImplemented', 'else', ':', 'return', 'not', 'result', 'def', '__lt__', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'self', '.', '__class__', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', '<', 'tuple', '(', 'getattr', '(', 'other', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', 'else', ':', 'return', 'NotImplemented', 'def', '__le__', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'self', '.', '__class__', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', '<=', 'tuple', '(', 'getattr', '(', 'other', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', 'else', ':', 'return', 'NotImplemented', 'def', '__gt__', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'self', '.', '__class__', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', '>', 'tuple', '(', 'getattr', '(', 'other', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', 'else', ':', 'return', 'NotImplemented', 'def', '__ge__', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'self', '.', '__class__', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', '>=', 'tuple', '(', 'getattr', '(', 'other', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', 'else', ':', 'return', 'NotImplemented', 'def', '__hash__', '(', 'self', ')', ':', 'return', 'hash', '(', 'tuple', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'fields', ')', ')', 'if', 'printable', ':', 'def', '__repr__', '(', 'self', ')', ':', 'return', '"{0}({1})"', '.', 'format', '(', 'self', '.', '__class__', '.', '__name__', ',', '", "', '.', 'join', '(', '"{0}={1}"', '.', 'format', '(', 'attr', ',', 'repr', '(', 'getattr', '(', 'self', ',', 'attr', ')', ')', ')', 'for', 'attr', 'in', 'fields', ')', ')', 'if', 'convertible', ':', '@', 'property', 'def', 'as_dict', '(', 'self', ')', ':', 'return', 'dict', '(', '(', 'attr', ',', 'getattr', '(', 'self', ',', 'attr', ')', ')', 'for', 'attr', 'in', 'fields', ')', '@', 'property', 'def', 'as_tuple', '(', 'self', ')', ':', 'return', 'tuple', '(', 'getattr', '(', 'self', ',', 'attr', ')', 'for', 'attr', 'in', 'fields', ')', 'if', 'initializer', ':', 'global_namespace', '[', 'baseclass_name', ']', '=', 'FieldsBase', 'return', 'FieldsBase'] | This sealer makes a normal container class. It's mutable and supports arguments with default values. | ['This', 'sealer', 'makes', 'a', 'normal', 'container', 'class', '.', 'It', 's', 'mutable', 'and', 'supports', 'arguments', 'with', 'default', 'values', '.'] | train | https://github.com/ionelmc/python-fields/blob/91e13560173abcc42cc1a95cbe2956b307126416/src/fields/__init__.py#L120-L201 |
7,752 | openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_restapi_client.py | CiscoNexusRestapiClient.send_request | def send_request(self, method, action, body=None,
headers=None, ipaddr=None):
"""Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
the server in plain text format.
Exception is raised when server replies with an INTERNAL SERVER ERROR
status code (500) i.e. an error has occurred on the server or SERVICE
UNAVAILABLE (404) i.e. server is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict of arguments which are sent as part of the request
:param headers: header for the HTTP request
:param server_ip: server_ip for the HTTP request.
:returns: JSON or plain text in HTTP response
"""
action = ''.join([self.scheme, '://%s/', action])
if netaddr.valid_ipv6(ipaddr):
# Enclose IPv6 address in [] in the URL
action = action % ("[%s]" % ipaddr)
else:
# IPv4 address
action = action % ipaddr
config = action + " : " + body if body else action
# if cookie needed and one not previously created
if self.request_cookie:
cookie, verify = self._get_cookie(ipaddr, config)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
else:
if ipaddr not in self.credentials:
raise cexc.NexusCredentialNotFound(switch_ip=ipaddr)
else:
headers = {'Content-Type': 'application/json'}
security_data = self.credentials[ipaddr]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
self.session.auth = (security_data[0], security_data[1])
if self.status != requests.codes.OK:
return {}
for attempt in range(self.max_retries + 1):
try:
LOG.debug("[Nexus %(ipaddr)s attempt %(id)s]: Connecting.." %
{"ipaddr": ipaddr, "id": attempt})
response = self.session.request(
method,
action,
data=body,
headers=headers,
verify=verify,
timeout=self.timeout)
if (self.request_cookie and
response.status_code in CREDENTIAL_EXPIRED):
# if need new cookie
cookie, verify = self._get_cookie(
ipaddr, config, refresh=True)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
continue
except Exception as e:
LOG.error(
"Exception raised %(err)s for Rest/NXAPI %(cfg)s",
{'err': str(e), 'cfg': config})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=e)
else:
break
status_string = requests.status_codes._codes[response.status_code][0]
if response.status_code in self.accepted_codes:
LOG.debug(
"Good status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
# 'text/json' used with nxapi else application/json with restapi
output = {}
if ('application/json' in response.headers['content-type'] or
'text/json' in response.headers['content-type']):
try:
output = response.json()
except Exception as e:
LOG.exception(
"Unexpected error encountered extracting "
"json body from response.")
if 'ins_api' in output:
# do special nxapi response handling
try:
cli_resp = output['ins_api']['outputs']['output']
except Exception:
cli_resp = []
# Check results for each command
for cli in cli_resp:
try:
status = int((cli['code']))
except ValueError:
status = 'bad_status %s' % cli['code']
if status not in self.accepted_codes:
excpt = "ins_api CLI failure occurred "
"with cli return code %s" % str(status)
raise cexc.NexusConfigFailed(
nexus_host=ipaddr, config=config,
exc=excpt)
return output
else:
LOG.error(
"Bad status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
LOG.error("Response text: %(txt)s",
{'txt': response.text})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=response.text) | python | def send_request(self, method, action, body=None,
headers=None, ipaddr=None):
"""Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
the server in plain text format.
Exception is raised when server replies with an INTERNAL SERVER ERROR
status code (500) i.e. an error has occurred on the server or SERVICE
UNAVAILABLE (404) i.e. server is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict of arguments which are sent as part of the request
:param headers: header for the HTTP request
:param server_ip: server_ip for the HTTP request.
:returns: JSON or plain text in HTTP response
"""
action = ''.join([self.scheme, '://%s/', action])
if netaddr.valid_ipv6(ipaddr):
# Enclose IPv6 address in [] in the URL
action = action % ("[%s]" % ipaddr)
else:
# IPv4 address
action = action % ipaddr
config = action + " : " + body if body else action
# if cookie needed and one not previously created
if self.request_cookie:
cookie, verify = self._get_cookie(ipaddr, config)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
else:
if ipaddr not in self.credentials:
raise cexc.NexusCredentialNotFound(switch_ip=ipaddr)
else:
headers = {'Content-Type': 'application/json'}
security_data = self.credentials[ipaddr]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
self.session.auth = (security_data[0], security_data[1])
if self.status != requests.codes.OK:
return {}
for attempt in range(self.max_retries + 1):
try:
LOG.debug("[Nexus %(ipaddr)s attempt %(id)s]: Connecting.." %
{"ipaddr": ipaddr, "id": attempt})
response = self.session.request(
method,
action,
data=body,
headers=headers,
verify=verify,
timeout=self.timeout)
if (self.request_cookie and
response.status_code in CREDENTIAL_EXPIRED):
# if need new cookie
cookie, verify = self._get_cookie(
ipaddr, config, refresh=True)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
continue
except Exception as e:
LOG.error(
"Exception raised %(err)s for Rest/NXAPI %(cfg)s",
{'err': str(e), 'cfg': config})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=e)
else:
break
status_string = requests.status_codes._codes[response.status_code][0]
if response.status_code in self.accepted_codes:
LOG.debug(
"Good status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
# 'text/json' used with nxapi else application/json with restapi
output = {}
if ('application/json' in response.headers['content-type'] or
'text/json' in response.headers['content-type']):
try:
output = response.json()
except Exception as e:
LOG.exception(
"Unexpected error encountered extracting "
"json body from response.")
if 'ins_api' in output:
# do special nxapi response handling
try:
cli_resp = output['ins_api']['outputs']['output']
except Exception:
cli_resp = []
# Check results for each command
for cli in cli_resp:
try:
status = int((cli['code']))
except ValueError:
status = 'bad_status %s' % cli['code']
if status not in self.accepted_codes:
excpt = "ins_api CLI failure occurred "
"with cli return code %s" % str(status)
raise cexc.NexusConfigFailed(
nexus_host=ipaddr, config=config,
exc=excpt)
return output
else:
LOG.error(
"Bad status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
LOG.error("Response text: %(txt)s",
{'txt': response.text})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=response.text) | ['def', 'send_request', '(', 'self', ',', 'method', ',', 'action', ',', 'body', '=', 'None', ',', 'headers', '=', 'None', ',', 'ipaddr', '=', 'None', ')', ':', 'action', '=', "''", '.', 'join', '(', '[', 'self', '.', 'scheme', ',', "'://%s/'", ',', 'action', ']', ')', 'if', 'netaddr', '.', 'valid_ipv6', '(', 'ipaddr', ')', ':', '# Enclose IPv6 address in [] in the URL', 'action', '=', 'action', '%', '(', '"[%s]"', '%', 'ipaddr', ')', 'else', ':', '# IPv4 address', 'action', '=', 'action', '%', 'ipaddr', 'config', '=', 'action', '+', '" : "', '+', 'body', 'if', 'body', 'else', 'action', '# if cookie needed and one not previously created', 'if', 'self', '.', 'request_cookie', ':', 'cookie', ',', 'verify', '=', 'self', '.', '_get_cookie', '(', 'ipaddr', ',', 'config', ')', 'headers', '=', '{', '"Content-type"', ':', '"application/json"', ',', '"Accept"', ':', '"text/plain"', ',', '"Cookie"', ':', 'cookie', '}', 'else', ':', 'if', 'ipaddr', 'not', 'in', 'self', '.', 'credentials', ':', 'raise', 'cexc', '.', 'NexusCredentialNotFound', '(', 'switch_ip', '=', 'ipaddr', ')', 'else', ':', 'headers', '=', '{', "'Content-Type'", ':', "'application/json'", '}', 'security_data', '=', 'self', '.', 'credentials', '[', 'ipaddr', ']', 'verify', '=', 'security_data', '[', 'const', '.', 'HTTPS_CERT_TUPLE', ']', 'if', 'not', 'verify', ':', 'verify', '=', 'security_data', '[', 'const', '.', 'HTTPS_VERIFY_TUPLE', ']', 'self', '.', 'session', '.', 'auth', '=', '(', 'security_data', '[', '0', ']', ',', 'security_data', '[', '1', ']', ')', 'if', 'self', '.', 'status', '!=', 'requests', '.', 'codes', '.', 'OK', ':', 'return', '{', '}', 'for', 'attempt', 'in', 'range', '(', 'self', '.', 'max_retries', '+', '1', ')', ':', 'try', ':', 'LOG', '.', 'debug', '(', '"[Nexus %(ipaddr)s attempt %(id)s]: Connecting.."', '%', '{', '"ipaddr"', ':', 'ipaddr', ',', '"id"', ':', 'attempt', '}', ')', 'response', '=', 'self', '.', 'session', '.', 'request', '(', 'method', ',', 'action', ',', 'data', '=', 'body', ',', 'headers', '=', 'headers', ',', 'verify', '=', 'verify', ',', 'timeout', '=', 'self', '.', 'timeout', ')', 'if', '(', 'self', '.', 'request_cookie', 'and', 'response', '.', 'status_code', 'in', 'CREDENTIAL_EXPIRED', ')', ':', '# if need new cookie', 'cookie', ',', 'verify', '=', 'self', '.', '_get_cookie', '(', 'ipaddr', ',', 'config', ',', 'refresh', '=', 'True', ')', 'headers', '=', '{', '"Content-type"', ':', '"application/json"', ',', '"Accept"', ':', '"text/plain"', ',', '"Cookie"', ':', 'cookie', '}', 'continue', 'except', 'Exception', 'as', 'e', ':', 'LOG', '.', 'error', '(', '"Exception raised %(err)s for Rest/NXAPI %(cfg)s"', ',', '{', "'err'", ':', 'str', '(', 'e', ')', ',', "'cfg'", ':', 'config', '}', ')', 'raise', 'cexc', '.', 'NexusConfigFailed', '(', 'nexus_host', '=', 'ipaddr', ',', 'config', '=', 'config', ',', 'exc', '=', 'e', ')', 'else', ':', 'break', 'status_string', '=', 'requests', '.', 'status_codes', '.', '_codes', '[', 'response', '.', 'status_code', ']', '[', '0', ']', 'if', 'response', '.', 'status_code', 'in', 'self', '.', 'accepted_codes', ':', 'LOG', '.', 'debug', '(', '"Good status %(status)s(%(code)d) returned for %(url)s"', ',', '{', "'status'", ':', 'status_string', ',', "'code'", ':', 'response', '.', 'status_code', ',', "'url'", ':', 'action', '}', ')', "# 'text/json' used with nxapi else application/json with restapi", 'output', '=', '{', '}', 'if', '(', "'application/json'", 'in', 'response', '.', 'headers', '[', "'content-type'", ']', 'or', "'text/json'", 'in', 'response', '.', 'headers', '[', "'content-type'", ']', ')', ':', 'try', ':', 'output', '=', 'response', '.', 'json', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'LOG', '.', 'exception', '(', '"Unexpected error encountered extracting "', '"json body from response."', ')', 'if', "'ins_api'", 'in', 'output', ':', '# do special nxapi response handling', 'try', ':', 'cli_resp', '=', 'output', '[', "'ins_api'", ']', '[', "'outputs'", ']', '[', "'output'", ']', 'except', 'Exception', ':', 'cli_resp', '=', '[', ']', '# Check results for each command', 'for', 'cli', 'in', 'cli_resp', ':', 'try', ':', 'status', '=', 'int', '(', '(', 'cli', '[', "'code'", ']', ')', ')', 'except', 'ValueError', ':', 'status', '=', "'bad_status %s'", '%', 'cli', '[', "'code'", ']', 'if', 'status', 'not', 'in', 'self', '.', 'accepted_codes', ':', 'excpt', '=', '"ins_api CLI failure occurred "', '"with cli return code %s"', '%', 'str', '(', 'status', ')', 'raise', 'cexc', '.', 'NexusConfigFailed', '(', 'nexus_host', '=', 'ipaddr', ',', 'config', '=', 'config', ',', 'exc', '=', 'excpt', ')', 'return', 'output', 'else', ':', 'LOG', '.', 'error', '(', '"Bad status %(status)s(%(code)d) returned for %(url)s"', ',', '{', "'status'", ':', 'status_string', ',', "'code'", ':', 'response', '.', 'status_code', ',', "'url'", ':', 'action', '}', ')', 'LOG', '.', 'error', '(', '"Response text: %(txt)s"', ',', '{', "'txt'", ':', 'response', '.', 'text', '}', ')', 'raise', 'cexc', '.', 'NexusConfigFailed', '(', 'nexus_host', '=', 'ipaddr', ',', 'config', '=', 'config', ',', 'exc', '=', 'response', '.', 'text', ')'] | Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
the server in plain text format.
Exception is raised when server replies with an INTERNAL SERVER ERROR
status code (500) i.e. an error has occurred on the server or SERVICE
UNAVAILABLE (404) i.e. server is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict of arguments which are sent as part of the request
:param headers: header for the HTTP request
:param server_ip: server_ip for the HTTP request.
:returns: JSON or plain text in HTTP response | ['Perform', 'the', 'HTTP', 'request', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_restapi_client.py#L107-L229 |
7,753 | django-crispy-forms/django-crispy-forms | crispy_forms/templatetags/crispy_forms_utils.py | specialspaceless | def specialspaceless(parser, token):
"""
Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout.
"""
nodelist = parser.parse(('endspecialspaceless',))
parser.delete_first_token()
return SpecialSpacelessNode(nodelist) | python | def specialspaceless(parser, token):
"""
Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout.
"""
nodelist = parser.parse(('endspecialspaceless',))
parser.delete_first_token()
return SpecialSpacelessNode(nodelist) | ['def', 'specialspaceless', '(', 'parser', ',', 'token', ')', ':', 'nodelist', '=', 'parser', '.', 'parse', '(', '(', "'endspecialspaceless'", ',', ')', ')', 'parser', '.', 'delete_first_token', '(', ')', 'return', 'SpecialSpacelessNode', '(', 'nodelist', ')'] | Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout. | ['Removes', 'whitespace', 'between', 'HTML', 'tags', 'and', 'introduces', 'a', 'whitespace', 'after', 'buttons', 'an', 'inputs', 'necessary', 'for', 'Bootstrap', 'to', 'place', 'them', 'correctly', 'in', 'the', 'layout', '.'] | train | https://github.com/django-crispy-forms/django-crispy-forms/blob/cd476927a756133c667c199bb12120f877bf6b7e/crispy_forms/templatetags/crispy_forms_utils.py#L38-L47 |
7,754 | mryellow/maze_explorer | mazeexp/engine/world.py | WorldLayer.get_state | def get_state(self):
"""
Create state from sensors and battery
"""
# Include battery level in state
battery = self.player.stats['battery']/100
# Create observation from sensor proximities
# TODO: Have state persist, then update columns by `sensed_type`
# Multi-channel; detecting `items`
if len(self.mode['items']) > 0:
observation = []
for sensor in self.player.sensors:
col = []
# Always include range in channel 0
col.append(sensor.proximity_norm())
for item_type in self.mode['items']:
if sensor.sensed_type == item_type:
col.append(sensor.proximity_norm())
else:
# Default to 1 (`max_range/max_range`)
col.append(1)
observation.append(col)
if 'battery' in self.mode:
observation.append([battery,1,1])
# Single-channel; walls only
else:
observation = [o.proximity_norm() for o in self.player.sensors]
if 'battery' in self.mode:
observation.append(battery)
return observation | python | def get_state(self):
"""
Create state from sensors and battery
"""
# Include battery level in state
battery = self.player.stats['battery']/100
# Create observation from sensor proximities
# TODO: Have state persist, then update columns by `sensed_type`
# Multi-channel; detecting `items`
if len(self.mode['items']) > 0:
observation = []
for sensor in self.player.sensors:
col = []
# Always include range in channel 0
col.append(sensor.proximity_norm())
for item_type in self.mode['items']:
if sensor.sensed_type == item_type:
col.append(sensor.proximity_norm())
else:
# Default to 1 (`max_range/max_range`)
col.append(1)
observation.append(col)
if 'battery' in self.mode:
observation.append([battery,1,1])
# Single-channel; walls only
else:
observation = [o.proximity_norm() for o in self.player.sensors]
if 'battery' in self.mode:
observation.append(battery)
return observation | ['def', 'get_state', '(', 'self', ')', ':', '# Include battery level in state', 'battery', '=', 'self', '.', 'player', '.', 'stats', '[', "'battery'", ']', '/', '100', '# Create observation from sensor proximities', '# TODO: Have state persist, then update columns by `sensed_type`', '# Multi-channel; detecting `items`', 'if', 'len', '(', 'self', '.', 'mode', '[', "'items'", ']', ')', '>', '0', ':', 'observation', '=', '[', ']', 'for', 'sensor', 'in', 'self', '.', 'player', '.', 'sensors', ':', 'col', '=', '[', ']', '# Always include range in channel 0', 'col', '.', 'append', '(', 'sensor', '.', 'proximity_norm', '(', ')', ')', 'for', 'item_type', 'in', 'self', '.', 'mode', '[', "'items'", ']', ':', 'if', 'sensor', '.', 'sensed_type', '==', 'item_type', ':', 'col', '.', 'append', '(', 'sensor', '.', 'proximity_norm', '(', ')', ')', 'else', ':', '# Default to 1 (`max_range/max_range`)', 'col', '.', 'append', '(', '1', ')', 'observation', '.', 'append', '(', 'col', ')', 'if', "'battery'", 'in', 'self', '.', 'mode', ':', 'observation', '.', 'append', '(', '[', 'battery', ',', '1', ',', '1', ']', ')', '# Single-channel; walls only', 'else', ':', 'observation', '=', '[', 'o', '.', 'proximity_norm', '(', ')', 'for', 'o', 'in', 'self', '.', 'player', '.', 'sensors', ']', 'if', "'battery'", 'in', 'self', '.', 'mode', ':', 'observation', '.', 'append', '(', 'battery', ')', 'return', 'observation'] | Create state from sensors and battery | ['Create', 'state', 'from', 'sensors', 'and', 'battery'] | train | https://github.com/mryellow/maze_explorer/blob/ab8a25ccd05105d2fe57e0213d690cfc07e45827/mazeexp/engine/world.py#L402-L434 |
7,755 | jgillick/LendingClub | lendingclub/__init__.py | LendingClub.get_cash_balance | def get_cash_balance(self):
"""
Returns the account cash balance available for investing
Returns
-------
float
The cash balance in your account.
"""
cash = False
try:
response = self.session.get('/browse/cashBalanceAj.action')
json_response = response.json()
if self.session.json_success(json_response):
self.__log('Cash available: {0}'.format(json_response['cashBalance']))
cash_value = json_response['cashBalance']
# Convert currency to float value
# Match values like $1,000.12 or 1,0000$
cash_match = re.search('^[^0-9]?([0-9\.,]+)[^0-9]?', cash_value)
if cash_match:
cash_str = cash_match.group(1)
cash_str = cash_str.replace(',', '')
cash = float(cash_str)
else:
self.__log('Could not get cash balance: {0}'.format(response.text))
except Exception as e:
self.__log('Could not get the cash balance on the account: Error: {0}\nJSON: {1}'.format(str(e), response.text))
raise e
return cash | python | def get_cash_balance(self):
"""
Returns the account cash balance available for investing
Returns
-------
float
The cash balance in your account.
"""
cash = False
try:
response = self.session.get('/browse/cashBalanceAj.action')
json_response = response.json()
if self.session.json_success(json_response):
self.__log('Cash available: {0}'.format(json_response['cashBalance']))
cash_value = json_response['cashBalance']
# Convert currency to float value
# Match values like $1,000.12 or 1,0000$
cash_match = re.search('^[^0-9]?([0-9\.,]+)[^0-9]?', cash_value)
if cash_match:
cash_str = cash_match.group(1)
cash_str = cash_str.replace(',', '')
cash = float(cash_str)
else:
self.__log('Could not get cash balance: {0}'.format(response.text))
except Exception as e:
self.__log('Could not get the cash balance on the account: Error: {0}\nJSON: {1}'.format(str(e), response.text))
raise e
return cash | ['def', 'get_cash_balance', '(', 'self', ')', ':', 'cash', '=', 'False', 'try', ':', 'response', '=', 'self', '.', 'session', '.', 'get', '(', "'/browse/cashBalanceAj.action'", ')', 'json_response', '=', 'response', '.', 'json', '(', ')', 'if', 'self', '.', 'session', '.', 'json_success', '(', 'json_response', ')', ':', 'self', '.', '__log', '(', "'Cash available: {0}'", '.', 'format', '(', 'json_response', '[', "'cashBalance'", ']', ')', ')', 'cash_value', '=', 'json_response', '[', "'cashBalance'", ']', '# Convert currency to float value', '# Match values like $1,000.12 or 1,0000$', 'cash_match', '=', 're', '.', 'search', '(', "'^[^0-9]?([0-9\\.,]+)[^0-9]?'", ',', 'cash_value', ')', 'if', 'cash_match', ':', 'cash_str', '=', 'cash_match', '.', 'group', '(', '1', ')', 'cash_str', '=', 'cash_str', '.', 'replace', '(', "','", ',', "''", ')', 'cash', '=', 'float', '(', 'cash_str', ')', 'else', ':', 'self', '.', '__log', '(', "'Could not get cash balance: {0}'", '.', 'format', '(', 'response', '.', 'text', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', '__log', '(', "'Could not get the cash balance on the account: Error: {0}\\nJSON: {1}'", '.', 'format', '(', 'str', '(', 'e', ')', ',', 'response', '.', 'text', ')', ')', 'raise', 'e', 'return', 'cash'] | Returns the account cash balance available for investing
Returns
-------
float
The cash balance in your account. | ['Returns', 'the', 'account', 'cash', 'balance', 'available', 'for', 'investing'] | train | https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/__init__.py#L154-L186 |
7,756 | pyrogram/pyrogram | pyrogram/client/types/messages_and_media/message.py | Message.reply_video | def reply_video(
self,
video: str,
quote: bool = None,
caption: str = "",
parse_mode: str = "",
duration: int = 0,
width: int = 0,
height: int = 0,
thumb: str = None,
supports_streaming: bool = True,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None,
progress: callable = None,
progress_args: tuple = ()
) -> "Message":
"""Bound method *reply_video* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_video(
chat_id=message.chat.id,
video=video
)
Example:
.. code-block:: python
message.reply_video(video)
Args:
video (``str``):
Video to send.
Pass a file_id as string to send a video that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a video from the Internet, or
pass a file path as string to upload a new video that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
caption (``str``, *optional*):
Video caption, 0-1024 characters.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption.
Defaults to Markdown.
duration (``int``, *optional*):
Duration of sent video in seconds.
width (``int``, *optional*):
Video width.
height (``int``, *optional*):
Video height.
thumb (``str``, *optional*):
Thumbnail of the video sent.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 90 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
supports_streaming (``bool``, *optional*):
Pass True, if the uploaded video is suitable for streaming.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_video(
chat_id=self.chat.id,
video=video,
caption=caption,
parse_mode=parse_mode,
duration=duration,
width=width,
height=height,
thumb=thumb,
supports_streaming=supports_streaming,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
progress=progress,
progress_args=progress_args
) | python | def reply_video(
self,
video: str,
quote: bool = None,
caption: str = "",
parse_mode: str = "",
duration: int = 0,
width: int = 0,
height: int = 0,
thumb: str = None,
supports_streaming: bool = True,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None,
progress: callable = None,
progress_args: tuple = ()
) -> "Message":
"""Bound method *reply_video* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_video(
chat_id=message.chat.id,
video=video
)
Example:
.. code-block:: python
message.reply_video(video)
Args:
video (``str``):
Video to send.
Pass a file_id as string to send a video that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a video from the Internet, or
pass a file path as string to upload a new video that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
caption (``str``, *optional*):
Video caption, 0-1024 characters.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption.
Defaults to Markdown.
duration (``int``, *optional*):
Duration of sent video in seconds.
width (``int``, *optional*):
Video width.
height (``int``, *optional*):
Video height.
thumb (``str``, *optional*):
Thumbnail of the video sent.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 90 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
supports_streaming (``bool``, *optional*):
Pass True, if the uploaded video is suitable for streaming.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_video(
chat_id=self.chat.id,
video=video,
caption=caption,
parse_mode=parse_mode,
duration=duration,
width=width,
height=height,
thumb=thumb,
supports_streaming=supports_streaming,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
progress=progress,
progress_args=progress_args
) | ['def', 'reply_video', '(', 'self', ',', 'video', ':', 'str', ',', 'quote', ':', 'bool', '=', 'None', ',', 'caption', ':', 'str', '=', '""', ',', 'parse_mode', ':', 'str', '=', '""', ',', 'duration', ':', 'int', '=', '0', ',', 'width', ':', 'int', '=', '0', ',', 'height', ':', 'int', '=', '0', ',', 'thumb', ':', 'str', '=', 'None', ',', 'supports_streaming', ':', 'bool', '=', 'True', ',', 'disable_notification', ':', 'bool', '=', 'None', ',', 'reply_to_message_id', ':', 'int', '=', 'None', ',', 'reply_markup', ':', 'Union', '[', '"pyrogram.InlineKeyboardMarkup"', ',', '"pyrogram.ReplyKeyboardMarkup"', ',', '"pyrogram.ReplyKeyboardRemove"', ',', '"pyrogram.ForceReply"', ']', '=', 'None', ',', 'progress', ':', 'callable', '=', 'None', ',', 'progress_args', ':', 'tuple', '=', '(', ')', ')', '->', '"Message"', ':', 'if', 'quote', 'is', 'None', ':', 'quote', '=', 'self', '.', 'chat', '.', 'type', '!=', '"private"', 'if', 'reply_to_message_id', 'is', 'None', 'and', 'quote', ':', 'reply_to_message_id', '=', 'self', '.', 'message_id', 'return', 'self', '.', '_client', '.', 'send_video', '(', 'chat_id', '=', 'self', '.', 'chat', '.', 'id', ',', 'video', '=', 'video', ',', 'caption', '=', 'caption', ',', 'parse_mode', '=', 'parse_mode', ',', 'duration', '=', 'duration', ',', 'width', '=', 'width', ',', 'height', '=', 'height', ',', 'thumb', '=', 'thumb', ',', 'supports_streaming', '=', 'supports_streaming', ',', 'disable_notification', '=', 'disable_notification', ',', 'reply_to_message_id', '=', 'reply_to_message_id', ',', 'reply_markup', '=', 'reply_markup', ',', 'progress', '=', 'progress', ',', 'progress_args', '=', 'progress_args', ')'] | Bound method *reply_video* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_video(
chat_id=message.chat.id,
video=video
)
Example:
.. code-block:: python
message.reply_video(video)
Args:
video (``str``):
Video to send.
Pass a file_id as string to send a video that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get a video from the Internet, or
pass a file path as string to upload a new video that exists on your local machine.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
caption (``str``, *optional*):
Video caption, 0-1024 characters.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption.
Defaults to Markdown.
duration (``int``, *optional*):
Duration of sent video in seconds.
width (``int``, *optional*):
Video width.
height (``int``, *optional*):
Video height.
thumb (``str``, *optional*):
Thumbnail of the video sent.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 90 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
supports_streaming (``bool``, *optional*):
Pass True, if the uploaded video is suitable for streaming.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. | ['Bound', 'method', '*', 'reply_video', '*', 'of', ':', 'obj', ':', 'Message', '<pyrogram', '.', 'Message', '>', '.'] | train | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/types/messages_and_media/message.py#L1997-L2135 |
7,757 | delph-in/pydelphin | delphin/mrs/query.py | select_hcons | def select_hcons(xmrs, hi=None, relation=None, lo=None):
"""
Return the list of matching HCONS for *xmrs*.
:class:`~delphin.mrs.components.HandleConstraint` objects for
*xmrs* match if their `hi` matches *hi*, `relation` matches
*relation*, and `lo` matches *lo*. The *hi*, *relation*, and *lo*
filters are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
hi (str, optional): hi handle (hole) to match
relation (str, optional): handle constraint relation to match
lo (str, optional): lo handle (label) to match
Returns:
list: matching HCONS
"""
hcmatch = lambda hc: (
(hi is None or hc.hi == hi) and
(relation is None or hc.relation == relation) and
(lo is None or hc.lo == lo))
return list(filter(hcmatch, xmrs.hcons())) | python | def select_hcons(xmrs, hi=None, relation=None, lo=None):
"""
Return the list of matching HCONS for *xmrs*.
:class:`~delphin.mrs.components.HandleConstraint` objects for
*xmrs* match if their `hi` matches *hi*, `relation` matches
*relation*, and `lo` matches *lo*. The *hi*, *relation*, and *lo*
filters are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
hi (str, optional): hi handle (hole) to match
relation (str, optional): handle constraint relation to match
lo (str, optional): lo handle (label) to match
Returns:
list: matching HCONS
"""
hcmatch = lambda hc: (
(hi is None or hc.hi == hi) and
(relation is None or hc.relation == relation) and
(lo is None or hc.lo == lo))
return list(filter(hcmatch, xmrs.hcons())) | ['def', 'select_hcons', '(', 'xmrs', ',', 'hi', '=', 'None', ',', 'relation', '=', 'None', ',', 'lo', '=', 'None', ')', ':', 'hcmatch', '=', 'lambda', 'hc', ':', '(', '(', 'hi', 'is', 'None', 'or', 'hc', '.', 'hi', '==', 'hi', ')', 'and', '(', 'relation', 'is', 'None', 'or', 'hc', '.', 'relation', '==', 'relation', ')', 'and', '(', 'lo', 'is', 'None', 'or', 'hc', '.', 'lo', '==', 'lo', ')', ')', 'return', 'list', '(', 'filter', '(', 'hcmatch', ',', 'xmrs', '.', 'hcons', '(', ')', ')', ')'] | Return the list of matching HCONS for *xmrs*.
:class:`~delphin.mrs.components.HandleConstraint` objects for
*xmrs* match if their `hi` matches *hi*, `relation` matches
*relation*, and `lo` matches *lo*. The *hi*, *relation*, and *lo*
filters are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
hi (str, optional): hi handle (hole) to match
relation (str, optional): handle constraint relation to match
lo (str, optional): lo handle (label) to match
Returns:
list: matching HCONS | ['Return', 'the', 'list', 'of', 'matching', 'HCONS', 'for', '*', 'xmrs', '*', '.'] | train | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/query.py#L158-L180 |
7,758 | riga/scinum | scinum.py | acosh | def acosh(x):
""" acosh(x)
Hyperbolic arc cos function.
"""
_math = infer_math(x)
if _math is math:
return _math.acosh(x)
else:
return _math.arccosh(x) | python | def acosh(x):
""" acosh(x)
Hyperbolic arc cos function.
"""
_math = infer_math(x)
if _math is math:
return _math.acosh(x)
else:
return _math.arccosh(x) | ['def', 'acosh', '(', 'x', ')', ':', '_math', '=', 'infer_math', '(', 'x', ')', 'if', '_math', 'is', 'math', ':', 'return', '_math', '.', 'acosh', '(', 'x', ')', 'else', ':', 'return', '_math', '.', 'arccosh', '(', 'x', ')'] | acosh(x)
Hyperbolic arc cos function. | ['acosh', '(', 'x', ')', 'Hyperbolic', 'arc', 'cos', 'function', '.'] | train | https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1256-L1264 |
7,759 | cloud-custodian/cloud-custodian | c7n/mu.py | PythonPackageArchive.get_reader | def get_reader(self):
"""Return a read-only :py:class:`~zipfile.ZipFile`."""
assert self._closed, "Archive not closed"
buf = io.BytesIO(self.get_bytes())
return zipfile.ZipFile(buf, mode='r') | python | def get_reader(self):
"""Return a read-only :py:class:`~zipfile.ZipFile`."""
assert self._closed, "Archive not closed"
buf = io.BytesIO(self.get_bytes())
return zipfile.ZipFile(buf, mode='r') | ['def', 'get_reader', '(', 'self', ')', ':', 'assert', 'self', '.', '_closed', ',', '"Archive not closed"', 'buf', '=', 'io', '.', 'BytesIO', '(', 'self', '.', 'get_bytes', '(', ')', ')', 'return', 'zipfile', '.', 'ZipFile', '(', 'buf', ',', 'mode', '=', "'r'", ')'] | Return a read-only :py:class:`~zipfile.ZipFile`. | ['Return', 'a', 'read', '-', 'only', ':', 'py', ':', 'class', ':', '~zipfile', '.', 'ZipFile', '.'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/mu.py#L235-L239 |
7,760 | crunchyroll/ef-open | efopen/ef_resolve_config.py | merge_files | def merge_files(context):
"""
Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object
"""
resolver = EFTemplateResolver(
profile=context.profile,
region=context.region,
env=context.env,
service=context.service
)
try:
with open(context.template_path, 'r') as f:
template_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error)))
if context.no_params is False:
try:
with open(context.param_path, 'r') as f:
param_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error)))
dest = yaml.safe_load(param_body)["dest"]
# if 'dest' for the current object contains an 'environments' list, check it
if "environments" in dest:
if not resolver.resolved["ENV_SHORT"] in dest["environments"]:
print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path))
return
# Process the template_body - apply context + parameters
resolver.load(template_body, param_body)
else:
resolver.load(template_body)
rendered_body = resolver.render()
if not resolver.resolved_ok():
raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols()))
if context.lint:
if context.template_path.endswith(".json"):
try:
json.loads(rendered_body, strict=False)
print("JSON passed linting process.")
except ValueError as e:
fail("JSON failed linting process.", e)
elif context.template_path.endswith((".yml", ".yaml")):
conf = yamllint_config.YamlLintConfig(content='extends: relaxed')
lint_output = yamllinter.run(rendered_body, conf)
lint_level = 'error'
lint_errors = [issue for issue in lint_output if issue.level == lint_level]
if lint_errors:
split_body = rendered_body.splitlines()
for error in lint_errors:
print(error)
# printing line - 1 because lists start at 0, but files at 1
print("\t", split_body[error.line - 1])
fail("YAML failed linting process.")
if context.verbose:
print(context)
if context.no_params:
print('no_params flag set to true!')
print('Inline template resolution based on external symbol lookup only and no destination for file write.\n')
else:
dir_path = normpath(dirname(dest["path"]))
print("make directories: {} {}".format(dir_path, dest["dir_perm"]))
print("chmod file to: " + dest["file_perm"])
user, group = dest["user_group"].split(":")
print("chown last directory in path to user: {}, group: {}".format(user, group))
print("chown file to user: {}, group: {}\n".format(user, group))
print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body))
elif context.silent:
print("Config template rendered successfully.")
else:
print(rendered_body) | python | def merge_files(context):
"""
Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object
"""
resolver = EFTemplateResolver(
profile=context.profile,
region=context.region,
env=context.env,
service=context.service
)
try:
with open(context.template_path, 'r') as f:
template_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error)))
if context.no_params is False:
try:
with open(context.param_path, 'r') as f:
param_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error)))
dest = yaml.safe_load(param_body)["dest"]
# if 'dest' for the current object contains an 'environments' list, check it
if "environments" in dest:
if not resolver.resolved["ENV_SHORT"] in dest["environments"]:
print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path))
return
# Process the template_body - apply context + parameters
resolver.load(template_body, param_body)
else:
resolver.load(template_body)
rendered_body = resolver.render()
if not resolver.resolved_ok():
raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols()))
if context.lint:
if context.template_path.endswith(".json"):
try:
json.loads(rendered_body, strict=False)
print("JSON passed linting process.")
except ValueError as e:
fail("JSON failed linting process.", e)
elif context.template_path.endswith((".yml", ".yaml")):
conf = yamllint_config.YamlLintConfig(content='extends: relaxed')
lint_output = yamllinter.run(rendered_body, conf)
lint_level = 'error'
lint_errors = [issue for issue in lint_output if issue.level == lint_level]
if lint_errors:
split_body = rendered_body.splitlines()
for error in lint_errors:
print(error)
# printing line - 1 because lists start at 0, but files at 1
print("\t", split_body[error.line - 1])
fail("YAML failed linting process.")
if context.verbose:
print(context)
if context.no_params:
print('no_params flag set to true!')
print('Inline template resolution based on external symbol lookup only and no destination for file write.\n')
else:
dir_path = normpath(dirname(dest["path"]))
print("make directories: {} {}".format(dir_path, dest["dir_perm"]))
print("chmod file to: " + dest["file_perm"])
user, group = dest["user_group"].split(":")
print("chown last directory in path to user: {}, group: {}".format(user, group))
print("chown file to user: {}, group: {}\n".format(user, group))
print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body))
elif context.silent:
print("Config template rendered successfully.")
else:
print(rendered_body) | ['def', 'merge_files', '(', 'context', ')', ':', 'resolver', '=', 'EFTemplateResolver', '(', 'profile', '=', 'context', '.', 'profile', ',', 'region', '=', 'context', '.', 'region', ',', 'env', '=', 'context', '.', 'env', ',', 'service', '=', 'context', '.', 'service', ')', 'try', ':', 'with', 'open', '(', 'context', '.', 'template_path', ',', "'r'", ')', 'as', 'f', ':', 'template_body', '=', 'f', '.', 'read', '(', ')', 'f', '.', 'close', '(', ')', 'except', 'IOError', 'as', 'error', ':', 'raise', 'IOError', '(', '"Error loading template file: {} {}"', '.', 'format', '(', 'context', '.', 'template_path', ',', 'repr', '(', 'error', ')', ')', ')', 'if', 'context', '.', 'no_params', 'is', 'False', ':', 'try', ':', 'with', 'open', '(', 'context', '.', 'param_path', ',', "'r'", ')', 'as', 'f', ':', 'param_body', '=', 'f', '.', 'read', '(', ')', 'f', '.', 'close', '(', ')', 'except', 'IOError', 'as', 'error', ':', 'raise', 'IOError', '(', '"Error loading param file: {} {}"', '.', 'format', '(', 'context', '.', 'param_path', ',', 'repr', '(', 'error', ')', ')', ')', 'dest', '=', 'yaml', '.', 'safe_load', '(', 'param_body', ')', '[', '"dest"', ']', "# if 'dest' for the current object contains an 'environments' list, check it", 'if', '"environments"', 'in', 'dest', ':', 'if', 'not', 'resolver', '.', 'resolved', '[', '"ENV_SHORT"', ']', 'in', 'dest', '[', '"environments"', ']', ':', 'print', '(', '"Environment: {} not enabled for {}"', '.', 'format', '(', 'resolver', '.', 'resolved', '[', '"ENV_SHORT"', ']', ',', 'context', '.', 'template_path', ')', ')', 'return', '# Process the template_body - apply context + parameters', 'resolver', '.', 'load', '(', 'template_body', ',', 'param_body', ')', 'else', ':', 'resolver', '.', 'load', '(', 'template_body', ')', 'rendered_body', '=', 'resolver', '.', 'render', '(', ')', 'if', 'not', 'resolver', '.', 'resolved_ok', '(', ')', ':', 'raise', 'RuntimeError', '(', '"Couldn\'t resolve all symbols; template has leftover {{ or }}: {}"', '.', 'format', '(', 'resolver', '.', 'unresolved_symbols', '(', ')', ')', ')', 'if', 'context', '.', 'lint', ':', 'if', 'context', '.', 'template_path', '.', 'endswith', '(', '".json"', ')', ':', 'try', ':', 'json', '.', 'loads', '(', 'rendered_body', ',', 'strict', '=', 'False', ')', 'print', '(', '"JSON passed linting process."', ')', 'except', 'ValueError', 'as', 'e', ':', 'fail', '(', '"JSON failed linting process."', ',', 'e', ')', 'elif', 'context', '.', 'template_path', '.', 'endswith', '(', '(', '".yml"', ',', '".yaml"', ')', ')', ':', 'conf', '=', 'yamllint_config', '.', 'YamlLintConfig', '(', 'content', '=', "'extends: relaxed'", ')', 'lint_output', '=', 'yamllinter', '.', 'run', '(', 'rendered_body', ',', 'conf', ')', 'lint_level', '=', "'error'", 'lint_errors', '=', '[', 'issue', 'for', 'issue', 'in', 'lint_output', 'if', 'issue', '.', 'level', '==', 'lint_level', ']', 'if', 'lint_errors', ':', 'split_body', '=', 'rendered_body', '.', 'splitlines', '(', ')', 'for', 'error', 'in', 'lint_errors', ':', 'print', '(', 'error', ')', '# printing line - 1 because lists start at 0, but files at 1', 'print', '(', '"\\t"', ',', 'split_body', '[', 'error', '.', 'line', '-', '1', ']', ')', 'fail', '(', '"YAML failed linting process."', ')', 'if', 'context', '.', 'verbose', ':', 'print', '(', 'context', ')', 'if', 'context', '.', 'no_params', ':', 'print', '(', "'no_params flag set to true!'", ')', 'print', '(', "'Inline template resolution based on external symbol lookup only and no destination for file write.\\n'", ')', 'else', ':', 'dir_path', '=', 'normpath', '(', 'dirname', '(', 'dest', '[', '"path"', ']', ')', ')', 'print', '(', '"make directories: {} {}"', '.', 'format', '(', 'dir_path', ',', 'dest', '[', '"dir_perm"', ']', ')', ')', 'print', '(', '"chmod file to: "', '+', 'dest', '[', '"file_perm"', ']', ')', 'user', ',', 'group', '=', 'dest', '[', '"user_group"', ']', '.', 'split', '(', '":"', ')', 'print', '(', '"chown last directory in path to user: {}, group: {}"', '.', 'format', '(', 'user', ',', 'group', ')', ')', 'print', '(', '"chown file to user: {}, group: {}\\n"', '.', 'format', '(', 'user', ',', 'group', ')', ')', 'print', '(', '"template body:\\n{}\\nrendered body:\\n{}\\n"', '.', 'format', '(', 'template_body', ',', 'rendered_body', ')', ')', 'elif', 'context', '.', 'silent', ':', 'print', '(', '"Config template rendered successfully."', ')', 'else', ':', 'print', '(', 'rendered_body', ')'] | Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object | ['Given', 'a', 'context', 'containing', 'path', 'to', 'template', 'env', 'and', 'service', ':', 'merge', 'config', 'into', 'template', 'and', 'output', 'the', 'result', 'to', 'stdout', 'Args', ':', 'context', ':', 'a', 'populated', 'context', 'object'] | train | https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_resolve_config.py#L92-L175 |
7,761 | SCIP-Interfaces/PySCIPOpt | examples/unfinished/mctransp_tuplelist.py | mctransp | def mctransp(I,J,K,c,d,M):
"""mctransp -- model for solving the Multi-commodity Transportation Problem
Parameters:
- I: set of customers
- J: set of facilities
- K: set of commodities
- c[i,j,k]: unit transportation cost on arc (i,j) for commodity k
- d[i][k]: demand for commodity k at node i
- M[j]: capacity
Returns a model, ready to be solved.
"""
model = Model("multi-commodity transportation")
# Create variables
x = {}
for (i,j,k) in c:
x[i,j,k] = model.addVar(vtype="C", name="x(%s,%s,%s)" % (i,j,k), obj=c[i,j,k])
# tuplelist is a Gurobi data structure to manage lists of equal sized tuples - try itertools as alternative
arcs = tuplelist([(i,j,k) for (i,j,k) in x])
# Demand constraints
for i in I:
for k in K:
model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select(i,"*",k)) == d[i,k], "Demand(%s,%s)" % (i,k))
# Capacity constraints
for j in J:
model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select("*",j,"*")) <= M[j], "Capacity(%s)" % j)
model.data = x
return model | python | def mctransp(I,J,K,c,d,M):
"""mctransp -- model for solving the Multi-commodity Transportation Problem
Parameters:
- I: set of customers
- J: set of facilities
- K: set of commodities
- c[i,j,k]: unit transportation cost on arc (i,j) for commodity k
- d[i][k]: demand for commodity k at node i
- M[j]: capacity
Returns a model, ready to be solved.
"""
model = Model("multi-commodity transportation")
# Create variables
x = {}
for (i,j,k) in c:
x[i,j,k] = model.addVar(vtype="C", name="x(%s,%s,%s)" % (i,j,k), obj=c[i,j,k])
# tuplelist is a Gurobi data structure to manage lists of equal sized tuples - try itertools as alternative
arcs = tuplelist([(i,j,k) for (i,j,k) in x])
# Demand constraints
for i in I:
for k in K:
model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select(i,"*",k)) == d[i,k], "Demand(%s,%s)" % (i,k))
# Capacity constraints
for j in J:
model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select("*",j,"*")) <= M[j], "Capacity(%s)" % j)
model.data = x
return model | ['def', 'mctransp', '(', 'I', ',', 'J', ',', 'K', ',', 'c', ',', 'd', ',', 'M', ')', ':', 'model', '=', 'Model', '(', '"multi-commodity transportation"', ')', '# Create variables', 'x', '=', '{', '}', 'for', '(', 'i', ',', 'j', ',', 'k', ')', 'in', 'c', ':', 'x', '[', 'i', ',', 'j', ',', 'k', ']', '=', 'model', '.', 'addVar', '(', 'vtype', '=', '"C"', ',', 'name', '=', '"x(%s,%s,%s)"', '%', '(', 'i', ',', 'j', ',', 'k', ')', ',', 'obj', '=', 'c', '[', 'i', ',', 'j', ',', 'k', ']', ')', '# tuplelist is a Gurobi data structure to manage lists of equal sized tuples - try itertools as alternative', 'arcs', '=', 'tuplelist', '(', '[', '(', 'i', ',', 'j', ',', 'k', ')', 'for', '(', 'i', ',', 'j', ',', 'k', ')', 'in', 'x', ']', ')', '# Demand constraints', 'for', 'i', 'in', 'I', ':', 'for', 'k', 'in', 'K', ':', 'model', '.', 'addCons', '(', 'sum', '(', 'x', '[', 'i', ',', 'j', ',', 'k', ']', 'for', '(', 'i', ',', 'j', ',', 'k', ')', 'in', 'arcs', '.', 'select', '(', 'i', ',', '"*"', ',', 'k', ')', ')', '==', 'd', '[', 'i', ',', 'k', ']', ',', '"Demand(%s,%s)"', '%', '(', 'i', ',', 'k', ')', ')', '# Capacity constraints', 'for', 'j', 'in', 'J', ':', 'model', '.', 'addCons', '(', 'sum', '(', 'x', '[', 'i', ',', 'j', ',', 'k', ']', 'for', '(', 'i', ',', 'j', ',', 'k', ')', 'in', 'arcs', '.', 'select', '(', '"*"', ',', 'j', ',', '"*"', ')', ')', '<=', 'M', '[', 'j', ']', ',', '"Capacity(%s)"', '%', 'j', ')', 'model', '.', 'data', '=', 'x', 'return', 'model'] | mctransp -- model for solving the Multi-commodity Transportation Problem
Parameters:
- I: set of customers
- J: set of facilities
- K: set of commodities
- c[i,j,k]: unit transportation cost on arc (i,j) for commodity k
- d[i][k]: demand for commodity k at node i
- M[j]: capacity
Returns a model, ready to be solved. | ['mctransp', '--', 'model', 'for', 'solving', 'the', 'Multi', '-', 'commodity', 'Transportation', 'Problem', 'Parameters', ':', '-', 'I', ':', 'set', 'of', 'customers', '-', 'J', ':', 'set', 'of', 'facilities', '-', 'K', ':', 'set', 'of', 'commodities', '-', 'c', '[', 'i', 'j', 'k', ']', ':', 'unit', 'transportation', 'cost', 'on', 'arc', '(', 'i', 'j', ')', 'for', 'commodity', 'k', '-', 'd', '[', 'i', ']', '[', 'k', ']', ':', 'demand', 'for', 'commodity', 'k', 'at', 'node', 'i', '-', 'M', '[', 'j', ']', ':', 'capacity', 'Returns', 'a', 'model', 'ready', 'to', 'be', 'solved', '.'] | train | https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/mctransp_tuplelist.py#L15-L47 |
7,762 | obriencj/python-javatools | javatools/opcodes.py | _unpack_lookupswitch | def _unpack_lookupswitch(bc, offset):
"""
function for unpacking the lookupswitch op arguments
"""
jump = (offset % 4)
if jump:
offset += (4 - jump)
(default, npairs), offset = _unpack(_struct_ii, bc, offset)
switches = list()
for _index in range(npairs):
pair, offset = _unpack(_struct_ii, bc, offset)
switches.append(pair)
return (default, switches), offset | python | def _unpack_lookupswitch(bc, offset):
"""
function for unpacking the lookupswitch op arguments
"""
jump = (offset % 4)
if jump:
offset += (4 - jump)
(default, npairs), offset = _unpack(_struct_ii, bc, offset)
switches = list()
for _index in range(npairs):
pair, offset = _unpack(_struct_ii, bc, offset)
switches.append(pair)
return (default, switches), offset | ['def', '_unpack_lookupswitch', '(', 'bc', ',', 'offset', ')', ':', 'jump', '=', '(', 'offset', '%', '4', ')', 'if', 'jump', ':', 'offset', '+=', '(', '4', '-', 'jump', ')', '(', 'default', ',', 'npairs', ')', ',', 'offset', '=', '_unpack', '(', '_struct_ii', ',', 'bc', ',', 'offset', ')', 'switches', '=', 'list', '(', ')', 'for', '_index', 'in', 'range', '(', 'npairs', ')', ':', 'pair', ',', 'offset', '=', '_unpack', '(', '_struct_ii', ',', 'bc', ',', 'offset', ')', 'switches', '.', 'append', '(', 'pair', ')', 'return', '(', 'default', ',', 'switches', ')', ',', 'offset'] | function for unpacking the lookupswitch op arguments | ['function', 'for', 'unpacking', 'the', 'lookupswitch', 'op', 'arguments'] | train | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/opcodes.py#L171-L187 |
7,763 | spyder-ide/spyder | spyder/widgets/fileswitcher.py | FileSwitcher.goto_line | def goto_line(self, line_number):
"""Go to specified line number in current active editor."""
if line_number:
line_number = int(line_number)
try:
self.plugin.go_to_line(line_number)
except AttributeError:
pass | python | def goto_line(self, line_number):
"""Go to specified line number in current active editor."""
if line_number:
line_number = int(line_number)
try:
self.plugin.go_to_line(line_number)
except AttributeError:
pass | ['def', 'goto_line', '(', 'self', ',', 'line_number', ')', ':', 'if', 'line_number', ':', 'line_number', '=', 'int', '(', 'line_number', ')', 'try', ':', 'self', '.', 'plugin', '.', 'go_to_line', '(', 'line_number', ')', 'except', 'AttributeError', ':', 'pass'] | Go to specified line number in current active editor. | ['Go', 'to', 'specified', 'line', 'number', 'in', 'current', 'active', 'editor', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/fileswitcher.py#L590-L597 |
7,764 | tyiannak/pyAudioAnalysis | pyAudioAnalysis/audioSegmentation.py | computePreRec | def computePreRec(cm, class_names):
'''
This function computes the precision, recall and f1 measures,
given a confusion matrix
'''
n_classes = cm.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
return
precision = []
recall = []
f1 = []
for i, c in enumerate(class_names):
precision.append(cm[i,i] / numpy.sum(cm[:,i]))
recall.append(cm[i,i] / numpy.sum(cm[i,:]))
f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1]))
return recall, precision, f1 | python | def computePreRec(cm, class_names):
'''
This function computes the precision, recall and f1 measures,
given a confusion matrix
'''
n_classes = cm.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
return
precision = []
recall = []
f1 = []
for i, c in enumerate(class_names):
precision.append(cm[i,i] / numpy.sum(cm[:,i]))
recall.append(cm[i,i] / numpy.sum(cm[i,:]))
f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1]))
return recall, precision, f1 | ['def', 'computePreRec', '(', 'cm', ',', 'class_names', ')', ':', 'n_classes', '=', 'cm', '.', 'shape', '[', '0', ']', 'if', 'len', '(', 'class_names', ')', '!=', 'n_classes', ':', 'print', '(', '"Error in computePreRec! Confusion matrix and class_names "', '"list must be of the same size!"', ')', 'return', 'precision', '=', '[', ']', 'recall', '=', '[', ']', 'f1', '=', '[', ']', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'class_names', ')', ':', 'precision', '.', 'append', '(', 'cm', '[', 'i', ',', 'i', ']', '/', 'numpy', '.', 'sum', '(', 'cm', '[', ':', ',', 'i', ']', ')', ')', 'recall', '.', 'append', '(', 'cm', '[', 'i', ',', 'i', ']', '/', 'numpy', '.', 'sum', '(', 'cm', '[', 'i', ',', ':', ']', ')', ')', 'f1', '.', 'append', '(', '2', '*', 'precision', '[', '-', '1', ']', '*', 'recall', '[', '-', '1', ']', '/', '(', 'precision', '[', '-', '1', ']', '+', 'recall', '[', '-', '1', ']', ')', ')', 'return', 'recall', ',', 'precision', ',', 'f1'] | This function computes the precision, recall and f1 measures,
given a confusion matrix | ['This', 'function', 'computes', 'the', 'precision', 'recall', 'and', 'f1', 'measures', 'given', 'a', 'confusion', 'matrix'] | train | https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L124-L141 |
7,765 | phdata/sdc-api-tool | sdctool/commands.py | import_pipeline | def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite) | python | def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite) | ['def', 'import_pipeline', '(', 'conf', ',', 'args', ')', ':', 'with', 'open', '(', 'args', '.', 'pipeline_json', ')', 'as', 'pipeline_json', ':', 'dst', '=', 'conf', '.', 'config', '[', "'instances'", ']', '[', 'args', '.', 'dst_instance', ']', 'dst_url', '=', 'api', '.', 'build_pipeline_url', '(', 'build_instance_url', '(', 'dst', ')', ')', 'dst_auth', '=', 'tuple', '(', '[', 'conf', '.', 'creds', '[', "'instances'", ']', '[', 'args', '.', 'dst_instance', ']', '[', "'user'", ']', ',', 'conf', '.', 'creds', '[', "'instances'", ']', '[', 'args', '.', 'dst_instance', ']', '[', "'pass'", ']', ']', ')', 'parsed_json', '=', 'json', '.', 'load', '(', 'pipeline_json', ')', 'verify_ssl', '=', 'dst', '.', 'get', '(', "'verify_ssl'", ',', 'True', ')', 'return', 'api', '.', 'import_pipeline', '(', 'dst_url', ',', 'args', '.', 'pipeline_id', ',', 'dst_auth', ',', 'parsed_json', ',', 'verify_ssl', ',', 'overwrite', '=', 'args', '.', 'overwrite', ')'] | Import a pipeline from json. | ['Import', 'a', 'pipeline', 'from', 'json', '.'] | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L20-L29 |
7,766 | inveniosoftware-attic/invenio-utils | invenio_utils/datastructures.py | SmartDict.__setitem | def __setitem(self, chunk, key, keys, value, extend=False):
"""Helper function to fill up the dictionary."""
def setitem(chunk):
if keys:
return self.__setitem(chunk, keys[0], keys[1:], value, extend)
else:
return value
if key in ['.', ']']:
chunk[key] = value
elif ']' in key: # list
key = int(key[:-1].replace('n', '-1'))
if extend:
if chunk is None:
chunk = [None, ]
else:
if not isinstance(chunk, list):
chunk = [chunk, ]
if key != -1:
chunk.insert(key, None)
else:
chunk.append(None)
else:
if chunk is None:
chunk = [None, ]
chunk[key] = setitem(chunk[key])
else: # dict
if extend:
if chunk is None:
chunk = {}
chunk[key] = None
chunk[key] = setitem(chunk[key])
elif key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
else:
if keys:
chunk[key] = setitem(chunk[key])
else:
if not isinstance(chunk[key], list):
chunk[key] = [chunk[key], ]
chunk[key].append(None)
chunk[key][-1] = setitem(chunk[key][-1])
else:
if chunk is None:
chunk = {}
if key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
return chunk | python | def __setitem(self, chunk, key, keys, value, extend=False):
"""Helper function to fill up the dictionary."""
def setitem(chunk):
if keys:
return self.__setitem(chunk, keys[0], keys[1:], value, extend)
else:
return value
if key in ['.', ']']:
chunk[key] = value
elif ']' in key: # list
key = int(key[:-1].replace('n', '-1'))
if extend:
if chunk is None:
chunk = [None, ]
else:
if not isinstance(chunk, list):
chunk = [chunk, ]
if key != -1:
chunk.insert(key, None)
else:
chunk.append(None)
else:
if chunk is None:
chunk = [None, ]
chunk[key] = setitem(chunk[key])
else: # dict
if extend:
if chunk is None:
chunk = {}
chunk[key] = None
chunk[key] = setitem(chunk[key])
elif key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
else:
if keys:
chunk[key] = setitem(chunk[key])
else:
if not isinstance(chunk[key], list):
chunk[key] = [chunk[key], ]
chunk[key].append(None)
chunk[key][-1] = setitem(chunk[key][-1])
else:
if chunk is None:
chunk = {}
if key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
return chunk | ['def', '__setitem', '(', 'self', ',', 'chunk', ',', 'key', ',', 'keys', ',', 'value', ',', 'extend', '=', 'False', ')', ':', 'def', 'setitem', '(', 'chunk', ')', ':', 'if', 'keys', ':', 'return', 'self', '.', '__setitem', '(', 'chunk', ',', 'keys', '[', '0', ']', ',', 'keys', '[', '1', ':', ']', ',', 'value', ',', 'extend', ')', 'else', ':', 'return', 'value', 'if', 'key', 'in', '[', "'.'", ',', "']'", ']', ':', 'chunk', '[', 'key', ']', '=', 'value', 'elif', "']'", 'in', 'key', ':', '# list', 'key', '=', 'int', '(', 'key', '[', ':', '-', '1', ']', '.', 'replace', '(', "'n'", ',', "'-1'", ')', ')', 'if', 'extend', ':', 'if', 'chunk', 'is', 'None', ':', 'chunk', '=', '[', 'None', ',', ']', 'else', ':', 'if', 'not', 'isinstance', '(', 'chunk', ',', 'list', ')', ':', 'chunk', '=', '[', 'chunk', ',', ']', 'if', 'key', '!=', '-', '1', ':', 'chunk', '.', 'insert', '(', 'key', ',', 'None', ')', 'else', ':', 'chunk', '.', 'append', '(', 'None', ')', 'else', ':', 'if', 'chunk', 'is', 'None', ':', 'chunk', '=', '[', 'None', ',', ']', 'chunk', '[', 'key', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', ')', 'else', ':', '# dict', 'if', 'extend', ':', 'if', 'chunk', 'is', 'None', ':', 'chunk', '=', '{', '}', 'chunk', '[', 'key', ']', '=', 'None', 'chunk', '[', 'key', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', ')', 'elif', 'key', 'not', 'in', 'chunk', ':', 'chunk', '[', 'key', ']', '=', 'None', 'chunk', '[', 'key', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', ')', 'else', ':', 'if', 'keys', ':', 'chunk', '[', 'key', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', ')', 'else', ':', 'if', 'not', 'isinstance', '(', 'chunk', '[', 'key', ']', ',', 'list', ')', ':', 'chunk', '[', 'key', ']', '=', '[', 'chunk', '[', 'key', ']', ',', ']', 'chunk', '[', 'key', ']', '.', 'append', '(', 'None', ')', 'chunk', '[', 'key', ']', '[', '-', '1', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', '[', '-', '1', ']', ')', 'else', ':', 'if', 'chunk', 'is', 'None', ':', 'chunk', '=', '{', '}', 'if', 'key', 'not', 'in', 'chunk', ':', 'chunk', '[', 'key', ']', '=', 'None', 'chunk', '[', 'key', ']', '=', 'setitem', '(', 'chunk', '[', 'key', ']', ')', 'return', 'chunk'] | Helper function to fill up the dictionary. | ['Helper', 'function', 'to', 'fill', 'up', 'the', 'dictionary', '.'] | train | https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/datastructures.py#L323-L373 |
7,767 | AltSchool/dynamic-rest | dynamic_rest/processors.py | register_post_processor | def register_post_processor(func):
"""
Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data
"""
global POST_PROCESSORS
key = func.__name__
POST_PROCESSORS[key] = func
return func | python | def register_post_processor(func):
"""
Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data
"""
global POST_PROCESSORS
key = func.__name__
POST_PROCESSORS[key] = func
return func | ['def', 'register_post_processor', '(', 'func', ')', ':', 'global', 'POST_PROCESSORS', 'key', '=', 'func', '.', '__name__', 'POST_PROCESSORS', '[', 'key', ']', '=', 'func', 'return', 'func'] | Register a post processor function to be run as the final step in
serialization. The data passed in will already have gone through the
sideloading processor.
Usage:
@register_post_processor
def my_post_processor(data):
# do stuff with `data`
return data | ['Register', 'a', 'post', 'processor', 'function', 'to', 'be', 'run', 'as', 'the', 'final', 'step', 'in', 'serialization', '.', 'The', 'data', 'passed', 'in', 'will', 'already', 'have', 'gone', 'through', 'the', 'sideloading', 'processor', '.'] | train | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/processors.py#L15-L32 |
7,768 | learningequality/ricecooker | ricecooker/utils/zip.py | write_file_to_zip_with_neutral_metadata | def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
"""
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
"""
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = "".encode()
info.create_system = 0
zfile.writestr(info, content) | python | def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
"""
Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None
"""
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = "".encode()
info.create_system = 0
zfile.writestr(info, content) | ['def', 'write_file_to_zip_with_neutral_metadata', '(', 'zfile', ',', 'filename', ',', 'content', ')', ':', 'info', '=', 'zipfile', '.', 'ZipInfo', '(', 'filename', ',', 'date_time', '=', '(', '2015', ',', '10', ',', '21', ',', '7', ',', '28', ',', '0', ')', ')', 'info', '.', 'compress_type', '=', 'zipfile', '.', 'ZIP_DEFLATED', 'info', '.', 'comment', '=', '""', '.', 'encode', '(', ')', 'info', '.', 'create_system', '=', '0', 'zfile', '.', 'writestr', '(', 'info', ',', 'content', ')'] | Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None | ['Write', 'the', 'string', 'content', 'to', 'filename', 'in', 'the', 'open', 'ZipFile', 'zfile', '.', 'Args', ':', 'zfile', '(', 'ZipFile', ')', ':', 'open', 'ZipFile', 'to', 'write', 'the', 'content', 'into', 'filename', '(', 'str', ')', ':', 'the', 'file', 'path', 'within', 'the', 'zip', 'file', 'to', 'write', 'into', 'content', '(', 'str', ')', ':', 'the', 'content', 'to', 'write', 'into', 'the', 'zip', 'Returns', ':', 'None'] | train | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/zip.py#L42-L55 |
7,769 | Kronuz/pyScss | scss/extension/compass/sprites.py | sprite_map_name | def sprite_map_name(map):
"""
Returns the name of a sprite map The name is derived from the folder than
contains the sprites.
"""
map = map.render()
sprite_maps = _get_cache('sprite_maps')
sprite_map = sprite_maps.get(map)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
if sprite_map:
return String.unquoted(sprite_map['*n*'])
return String.unquoted('') | python | def sprite_map_name(map):
"""
Returns the name of a sprite map The name is derived from the folder than
contains the sprites.
"""
map = map.render()
sprite_maps = _get_cache('sprite_maps')
sprite_map = sprite_maps.get(map)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
if sprite_map:
return String.unquoted(sprite_map['*n*'])
return String.unquoted('') | ['def', 'sprite_map_name', '(', 'map', ')', ':', 'map', '=', 'map', '.', 'render', '(', ')', 'sprite_maps', '=', '_get_cache', '(', "'sprite_maps'", ')', 'sprite_map', '=', 'sprite_maps', '.', 'get', '(', 'map', ')', 'if', 'not', 'sprite_map', ':', 'log', '.', 'error', '(', '"No sprite map found: %s"', ',', 'map', ',', 'extra', '=', '{', "'stack'", ':', 'True', '}', ')', 'if', 'sprite_map', ':', 'return', 'String', '.', 'unquoted', '(', 'sprite_map', '[', "'*n*'", ']', ')', 'return', 'String', '.', 'unquoted', '(', "''", ')'] | Returns the name of a sprite map The name is derived from the folder than
contains the sprites. | ['Returns', 'the', 'name', 'of', 'a', 'sprite', 'map', 'The', 'name', 'is', 'derived', 'from', 'the', 'folder', 'than', 'contains', 'the', 'sprites', '.'] | train | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/sprites.py#L428-L440 |
7,770 | Oneiroe/PySimpleAutomata | PySimpleAutomata/AFW.py | afw_union | def afw_union(afw_1: dict, afw_2: dict) -> dict:
""" Returns a AFW that reads the union of the languages read
by input AFWs.
Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2
= (Σ, S_2 , s^0_2, ρ_2 , F_2 )`
be alternating automata accepting the languages :math:`L(
A_1)` and :math:`L(A_2)`.
Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root ,
F_1 ∪ F_2 )` with
:math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 ,
a)]` accepts :math:`L(A_1) ∪ L(A_2)`.
Pay attention to avoid having the AFWs with state names in common, in case
use :mod:`PySimpleAutomata.AFW.rename_afw_states` function.
:param dict afw_1: first input AFW;
:param dict afw_2: second input AFW;.
:return: *(dict)* representing the united AFW.
"""
# make sure new root state is unique
initial_state = 'root'
i = 0
while initial_state in afw_1['states'] or initial_state in afw_2['states']:
initial_state = 'root' + str(i)
i += 1
union = {
'alphabet': afw_1['alphabet'].union(afw_2['alphabet']),
'states':
afw_1['states'].union(afw_2['states']).union({initial_state}),
'initial_state': initial_state,
'accepting_states':
afw_1['accepting_states'].union(afw_2['accepting_states']),
'transitions': deepcopy(afw_1['transitions'])
}
# add also afw_2 transitions
union['transitions'].update(afw_2['transitions'])
# if just one initial state is accepting, so the new one is
if afw_1['initial_state'] in afw_1['accepting_states'] \
or afw_2['initial_state'] in afw_2['accepting_states']:
union['accepting_states'].add(union['initial_state'])
# copy all transitions of initial states and eventually their conjunction
# into the new initial state
for action in union['alphabet']:
if (afw_1['initial_state'], action) in afw_1['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_1['transitions'][afw_1['initial_state'], action] + \
')'
if (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] += \
' or (' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
elif (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
return union | python | def afw_union(afw_1: dict, afw_2: dict) -> dict:
""" Returns a AFW that reads the union of the languages read
by input AFWs.
Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2
= (Σ, S_2 , s^0_2, ρ_2 , F_2 )`
be alternating automata accepting the languages :math:`L(
A_1)` and :math:`L(A_2)`.
Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root ,
F_1 ∪ F_2 )` with
:math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 ,
a)]` accepts :math:`L(A_1) ∪ L(A_2)`.
Pay attention to avoid having the AFWs with state names in common, in case
use :mod:`PySimpleAutomata.AFW.rename_afw_states` function.
:param dict afw_1: first input AFW;
:param dict afw_2: second input AFW;.
:return: *(dict)* representing the united AFW.
"""
# make sure new root state is unique
initial_state = 'root'
i = 0
while initial_state in afw_1['states'] or initial_state in afw_2['states']:
initial_state = 'root' + str(i)
i += 1
union = {
'alphabet': afw_1['alphabet'].union(afw_2['alphabet']),
'states':
afw_1['states'].union(afw_2['states']).union({initial_state}),
'initial_state': initial_state,
'accepting_states':
afw_1['accepting_states'].union(afw_2['accepting_states']),
'transitions': deepcopy(afw_1['transitions'])
}
# add also afw_2 transitions
union['transitions'].update(afw_2['transitions'])
# if just one initial state is accepting, so the new one is
if afw_1['initial_state'] in afw_1['accepting_states'] \
or afw_2['initial_state'] in afw_2['accepting_states']:
union['accepting_states'].add(union['initial_state'])
# copy all transitions of initial states and eventually their conjunction
# into the new initial state
for action in union['alphabet']:
if (afw_1['initial_state'], action) in afw_1['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_1['transitions'][afw_1['initial_state'], action] + \
')'
if (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] += \
' or (' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
elif (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
return union | ['def', 'afw_union', '(', 'afw_1', ':', 'dict', ',', 'afw_2', ':', 'dict', ')', '->', 'dict', ':', '# make sure new root state is unique', 'initial_state', '=', "'root'", 'i', '=', '0', 'while', 'initial_state', 'in', 'afw_1', '[', "'states'", ']', 'or', 'initial_state', 'in', 'afw_2', '[', "'states'", ']', ':', 'initial_state', '=', "'root'", '+', 'str', '(', 'i', ')', 'i', '+=', '1', 'union', '=', '{', "'alphabet'", ':', 'afw_1', '[', "'alphabet'", ']', '.', 'union', '(', 'afw_2', '[', "'alphabet'", ']', ')', ',', "'states'", ':', 'afw_1', '[', "'states'", ']', '.', 'union', '(', 'afw_2', '[', "'states'", ']', ')', '.', 'union', '(', '{', 'initial_state', '}', ')', ',', "'initial_state'", ':', 'initial_state', ',', "'accepting_states'", ':', 'afw_1', '[', "'accepting_states'", ']', '.', 'union', '(', 'afw_2', '[', "'accepting_states'", ']', ')', ',', "'transitions'", ':', 'deepcopy', '(', 'afw_1', '[', "'transitions'", ']', ')', '}', '# add also afw_2 transitions', 'union', '[', "'transitions'", ']', '.', 'update', '(', 'afw_2', '[', "'transitions'", ']', ')', '# if just one initial state is accepting, so the new one is', 'if', 'afw_1', '[', "'initial_state'", ']', 'in', 'afw_1', '[', "'accepting_states'", ']', 'or', 'afw_2', '[', "'initial_state'", ']', 'in', 'afw_2', '[', "'accepting_states'", ']', ':', 'union', '[', "'accepting_states'", ']', '.', 'add', '(', 'union', '[', "'initial_state'", ']', ')', '# copy all transitions of initial states and eventually their conjunction', '# into the new initial state', 'for', 'action', 'in', 'union', '[', "'alphabet'", ']', ':', 'if', '(', 'afw_1', '[', "'initial_state'", ']', ',', 'action', ')', 'in', 'afw_1', '[', "'transitions'", ']', ':', 'union', '[', "'transitions'", ']', '[', 'initial_state', ',', 'action', ']', '=', "'('", '+', 'afw_1', '[', "'transitions'", ']', '[', 'afw_1', '[', "'initial_state'", ']', ',', 'action', ']', '+', "')'", 'if', '(', 'afw_2', '[', "'initial_state'", ']', ',', 'action', ')', 'in', 'afw_2', '[', "'transitions'", ']', ':', 'union', '[', "'transitions'", ']', '[', 'initial_state', ',', 'action', ']', '+=', "' or ('", '+', 'afw_2', '[', "'transitions'", ']', '[', 'afw_2', '[', "'initial_state'", ']', ',', 'action', ']', '+', "')'", 'elif', '(', 'afw_2', '[', "'initial_state'", ']', ',', 'action', ')', 'in', 'afw_2', '[', "'transitions'", ']', ':', 'union', '[', "'transitions'", ']', '[', 'initial_state', ',', 'action', ']', '=', "'('", '+', 'afw_2', '[', "'transitions'", ']', '[', 'afw_2', '[', "'initial_state'", ']', ',', 'action', ']', '+', "')'", 'return', 'union'] | Returns a AFW that reads the union of the languages read
by input AFWs.
Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2
= (Σ, S_2 , s^0_2, ρ_2 , F_2 )`
be alternating automata accepting the languages :math:`L(
A_1)` and :math:`L(A_2)`.
Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root ,
F_1 ∪ F_2 )` with
:math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 ,
a)]` accepts :math:`L(A_1) ∪ L(A_2)`.
Pay attention to avoid having the AFWs with state names in common, in case
use :mod:`PySimpleAutomata.AFW.rename_afw_states` function.
:param dict afw_1: first input AFW;
:param dict afw_2: second input AFW;.
:return: *(dict)* representing the united AFW. | ['Returns', 'a', 'AFW', 'that', 'reads', 'the', 'union', 'of', 'the', 'languages', 'read', 'by', 'input', 'AFWs', '.'] | train | https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/AFW.py#L368-L432 |
7,771 | sorgerlab/indra | indra/tools/reading/submit_reading_pipeline.py | Submitter.submit_reading | def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,
num_tries=1, stagger=0):
"""Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings.
"""
# stash this for later.
self.ids_per_job = ids_per_job
# Upload the pmid_list to Amazon S3
id_list_key = 'reading_results/%s/%s' % (self.basename,
self._s3_input_name)
s3_client = boto3.client('s3')
s3_client.upload_file(input_fname, bucket_name, id_list_key)
# If no end index is specified, read all the PMIDs
if end_ix is None:
with open(input_fname, 'rt') as f:
lines = f.readlines()
end_ix = len(lines)
if start_ix is None:
start_ix = 0
# Get environment variables
environment_vars = get_environment()
# Iterate over the list of PMIDs and submit the job in chunks
batch_client = boto3.client('batch', region_name='us-east-1')
job_list = []
for job_start_ix in range(start_ix, end_ix, ids_per_job):
sleep(stagger)
job_end_ix = job_start_ix + ids_per_job
if job_end_ix > end_ix:
job_end_ix = end_ix
job_name, cmd = self._make_command(job_start_ix, job_end_ix)
command_list = get_batch_command(cmd, purpose=self._purpose,
project=self.project_name)
logger.info('Command list: %s' % str(command_list))
job_info = batch_client.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=self._job_def,
containerOverrides={
'environment': environment_vars,
'command': command_list},
retryStrategy={'attempts': num_tries}
)
logger.info("submitted...")
job_list.append({'jobId': job_info['jobId']})
self.job_list = job_list
return job_list | python | def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,
num_tries=1, stagger=0):
"""Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings.
"""
# stash this for later.
self.ids_per_job = ids_per_job
# Upload the pmid_list to Amazon S3
id_list_key = 'reading_results/%s/%s' % (self.basename,
self._s3_input_name)
s3_client = boto3.client('s3')
s3_client.upload_file(input_fname, bucket_name, id_list_key)
# If no end index is specified, read all the PMIDs
if end_ix is None:
with open(input_fname, 'rt') as f:
lines = f.readlines()
end_ix = len(lines)
if start_ix is None:
start_ix = 0
# Get environment variables
environment_vars = get_environment()
# Iterate over the list of PMIDs and submit the job in chunks
batch_client = boto3.client('batch', region_name='us-east-1')
job_list = []
for job_start_ix in range(start_ix, end_ix, ids_per_job):
sleep(stagger)
job_end_ix = job_start_ix + ids_per_job
if job_end_ix > end_ix:
job_end_ix = end_ix
job_name, cmd = self._make_command(job_start_ix, job_end_ix)
command_list = get_batch_command(cmd, purpose=self._purpose,
project=self.project_name)
logger.info('Command list: %s' % str(command_list))
job_info = batch_client.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=self._job_def,
containerOverrides={
'environment': environment_vars,
'command': command_list},
retryStrategy={'attempts': num_tries}
)
logger.info("submitted...")
job_list.append({'jobId': job_info['jobId']})
self.job_list = job_list
return job_list | ['def', 'submit_reading', '(', 'self', ',', 'input_fname', ',', 'start_ix', ',', 'end_ix', ',', 'ids_per_job', ',', 'num_tries', '=', '1', ',', 'stagger', '=', '0', ')', ':', '# stash this for later.', 'self', '.', 'ids_per_job', '=', 'ids_per_job', '# Upload the pmid_list to Amazon S3', 'id_list_key', '=', "'reading_results/%s/%s'", '%', '(', 'self', '.', 'basename', ',', 'self', '.', '_s3_input_name', ')', 's3_client', '=', 'boto3', '.', 'client', '(', "'s3'", ')', 's3_client', '.', 'upload_file', '(', 'input_fname', ',', 'bucket_name', ',', 'id_list_key', ')', '# If no end index is specified, read all the PMIDs', 'if', 'end_ix', 'is', 'None', ':', 'with', 'open', '(', 'input_fname', ',', "'rt'", ')', 'as', 'f', ':', 'lines', '=', 'f', '.', 'readlines', '(', ')', 'end_ix', '=', 'len', '(', 'lines', ')', 'if', 'start_ix', 'is', 'None', ':', 'start_ix', '=', '0', '# Get environment variables', 'environment_vars', '=', 'get_environment', '(', ')', '# Iterate over the list of PMIDs and submit the job in chunks', 'batch_client', '=', 'boto3', '.', 'client', '(', "'batch'", ',', 'region_name', '=', "'us-east-1'", ')', 'job_list', '=', '[', ']', 'for', 'job_start_ix', 'in', 'range', '(', 'start_ix', ',', 'end_ix', ',', 'ids_per_job', ')', ':', 'sleep', '(', 'stagger', ')', 'job_end_ix', '=', 'job_start_ix', '+', 'ids_per_job', 'if', 'job_end_ix', '>', 'end_ix', ':', 'job_end_ix', '=', 'end_ix', 'job_name', ',', 'cmd', '=', 'self', '.', '_make_command', '(', 'job_start_ix', ',', 'job_end_ix', ')', 'command_list', '=', 'get_batch_command', '(', 'cmd', ',', 'purpose', '=', 'self', '.', '_purpose', ',', 'project', '=', 'self', '.', 'project_name', ')', 'logger', '.', 'info', '(', "'Command list: %s'", '%', 'str', '(', 'command_list', ')', ')', 'job_info', '=', 'batch_client', '.', 'submit_job', '(', 'jobName', '=', 'job_name', ',', 'jobQueue', '=', 'self', '.', '_job_queue', ',', 'jobDefinition', '=', 'self', '.', '_job_def', ',', 'containerOverrides', '=', '{', "'environment'", ':', 'environment_vars', ',', "'command'", ':', 'command_list', '}', ',', 'retryStrategy', '=', '{', "'attempts'", ':', 'num_tries', '}', ')', 'logger', '.', 'info', '(', '"submitted..."', ')', 'job_list', '.', 'append', '(', '{', "'jobId'", ':', 'job_info', '[', "'jobId'", ']', '}', ')', 'self', '.', 'job_list', '=', 'job_list', 'return', 'job_list'] | Submit a batch of reading jobs
Parameters
----------
input_fname : str
The name of the file containing the ids to be read.
start_ix : int
The line index of the first item in the list to read.
end_ix : int
The line index of the last item in the list to be read.
ids_per_job : int
The number of ids to be given to each job.
num_tries : int
The number of times a job may be attempted.
stagger : float
The number of seconds to wait between job submissions.
Returns
-------
job_list : list[str]
A list of job id strings. | ['Submit', 'a', 'batch', 'of', 'reading', 'jobs'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/submit_reading_pipeline.py#L397-L466 |
7,772 | emilydolson/avida-spatial-tools | avidaspatial/utils.py | initialize_grid | def initialize_grid(world_size, inner):
"""
Creates an empty grid (2d list) with the dimensions specified in
world_size. Each element is initialized to the inner argument.
"""
data = []
for i in range(world_size[1]):
data.append([])
for j in range(world_size[0]):
data[i].append(deepcopy(inner))
return data | python | def initialize_grid(world_size, inner):
"""
Creates an empty grid (2d list) with the dimensions specified in
world_size. Each element is initialized to the inner argument.
"""
data = []
for i in range(world_size[1]):
data.append([])
for j in range(world_size[0]):
data[i].append(deepcopy(inner))
return data | ['def', 'initialize_grid', '(', 'world_size', ',', 'inner', ')', ':', 'data', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'world_size', '[', '1', ']', ')', ':', 'data', '.', 'append', '(', '[', ']', ')', 'for', 'j', 'in', 'range', '(', 'world_size', '[', '0', ']', ')', ':', 'data', '[', 'i', ']', '.', 'append', '(', 'deepcopy', '(', 'inner', ')', ')', 'return', 'data'] | Creates an empty grid (2d list) with the dimensions specified in
world_size. Each element is initialized to the inner argument. | ['Creates', 'an', 'empty', 'grid', '(', '2d', 'list', ')', 'with', 'the', 'dimensions', 'specified', 'in', 'world_size', '.', 'Each', 'element', 'is', 'initialized', 'to', 'the', 'inner', 'argument', '.'] | train | https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/utils.py#L382-L393 |
7,773 | spacetelescope/drizzlepac | drizzlepac/imageObject.py | baseImageObject.updateData | def updateData(self,exten,data):
""" Write out updated data and header to
the original input file for this object.
"""
_extnum=self._interpretExten(exten)
fimg = fileutil.openImage(self._filename, mode='update', memmap=False)
fimg[_extnum].data = data
fimg[_extnum].header = self._image[_extnum].header
fimg.close() | python | def updateData(self,exten,data):
""" Write out updated data and header to
the original input file for this object.
"""
_extnum=self._interpretExten(exten)
fimg = fileutil.openImage(self._filename, mode='update', memmap=False)
fimg[_extnum].data = data
fimg[_extnum].header = self._image[_extnum].header
fimg.close() | ['def', 'updateData', '(', 'self', ',', 'exten', ',', 'data', ')', ':', '_extnum', '=', 'self', '.', '_interpretExten', '(', 'exten', ')', 'fimg', '=', 'fileutil', '.', 'openImage', '(', 'self', '.', '_filename', ',', 'mode', '=', "'update'", ',', 'memmap', '=', 'False', ')', 'fimg', '[', '_extnum', ']', '.', 'data', '=', 'data', 'fimg', '[', '_extnum', ']', '.', 'header', '=', 'self', '.', '_image', '[', '_extnum', ']', '.', 'header', 'fimg', '.', 'close', '(', ')'] | Write out updated data and header to
the original input file for this object. | ['Write', 'out', 'updated', 'data', 'and', 'header', 'to', 'the', 'original', 'input', 'file', 'for', 'this', 'object', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L212-L220 |
7,774 | tjcsl/cslbot | cslbot/commands/guarded.py | cmd | def cmd(send, _, args):
"""Shows the currently guarded nicks.
Syntax: {command}
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded)) | python | def cmd(send, _, args):
"""Shows the currently guarded nicks.
Syntax: {command}
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded)) | ['def', 'cmd', '(', 'send', ',', '_', ',', 'args', ')', ':', 'guarded', '=', 'args', '[', "'handler'", ']', '.', 'guarded', 'if', 'not', 'guarded', ':', 'send', '(', '"Nobody is guarded."', ')', 'else', ':', 'send', '(', '", "', '.', 'join', '(', 'guarded', ')', ')'] | Shows the currently guarded nicks.
Syntax: {command} | ['Shows', 'the', 'currently', 'guarded', 'nicks', '.'] | train | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/guarded.py#L22-L32 |
7,775 | fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.get_expanded_path | def get_expanded_path(self, path):
"""
expands a path that starts with an ~ to an absolute path
:param path:
:return:
"""
if path.startswith('~'):
return os.path.expanduser('~') + path[1:]
else:
return path | python | def get_expanded_path(self, path):
"""
expands a path that starts with an ~ to an absolute path
:param path:
:return:
"""
if path.startswith('~'):
return os.path.expanduser('~') + path[1:]
else:
return path | ['def', 'get_expanded_path', '(', 'self', ',', 'path', ')', ':', 'if', 'path', '.', 'startswith', '(', "'~'", ')', ':', 'return', 'os', '.', 'path', '.', 'expanduser', '(', "'~'", ')', '+', 'path', '[', '1', ':', ']', 'else', ':', 'return', 'path'] | expands a path that starts with an ~ to an absolute path
:param path:
:return: | ['expands', 'a', 'path', 'that', 'starts', 'with', 'an', '~', 'to', 'an', 'absolute', 'path', ':', 'param', 'path', ':', ':', 'return', ':'] | train | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L317-L326 |
7,776 | Dallinger/Dallinger | dallinger/experiment_server/gunicorn.py | StandaloneServer.load | def load(self):
"""Return our application to be run."""
app = util.import_app("dallinger.experiment_server.sockets:app")
if self.options.get("mode") == "debug":
app.debug = True
return app | python | def load(self):
"""Return our application to be run."""
app = util.import_app("dallinger.experiment_server.sockets:app")
if self.options.get("mode") == "debug":
app.debug = True
return app | ['def', 'load', '(', 'self', ')', ':', 'app', '=', 'util', '.', 'import_app', '(', '"dallinger.experiment_server.sockets:app"', ')', 'if', 'self', '.', 'options', '.', 'get', '(', '"mode"', ')', '==', '"debug"', ':', 'app', '.', 'debug', '=', 'True', 'return', 'app'] | Return our application to be run. | ['Return', 'our', 'application', 'to', 'be', 'run', '.'] | train | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/experiment_server/gunicorn.py#L52-L57 |
7,777 | opentracing/opentracing-python | opentracing/scope_managers/tornado.py | TornadoScopeManager.activate | def activate(self, span, finish_on_close):
"""
Make a :class:`~opentracing.Span` instance active.
:param span: the :class:`~opentracing.Span` that should become active.
:param finish_on_close: whether *span* should automatically be
finished when :meth:`Scope.close()` is called.
If no :func:`tracer_stack_context()` is detected, thread-local
storage will be used to store the :class:`~opentracing.Scope`.
Observe that in this case the active :class:`~opentracing.Span`
will not be automatically propagated to the child corotuines.
:return: a :class:`~opentracing.Scope` instance to control the end
of the active period for the :class:`~opentracing.Span`.
It is a programming error to neglect to call :meth:`Scope.close()`
on the returned instance.
"""
context = self._get_context()
if context is None:
return super(TornadoScopeManager, self).activate(span,
finish_on_close)
scope = _TornadoScope(self, span, finish_on_close)
context.active = scope
return scope | python | def activate(self, span, finish_on_close):
"""
Make a :class:`~opentracing.Span` instance active.
:param span: the :class:`~opentracing.Span` that should become active.
:param finish_on_close: whether *span* should automatically be
finished when :meth:`Scope.close()` is called.
If no :func:`tracer_stack_context()` is detected, thread-local
storage will be used to store the :class:`~opentracing.Scope`.
Observe that in this case the active :class:`~opentracing.Span`
will not be automatically propagated to the child corotuines.
:return: a :class:`~opentracing.Scope` instance to control the end
of the active period for the :class:`~opentracing.Span`.
It is a programming error to neglect to call :meth:`Scope.close()`
on the returned instance.
"""
context = self._get_context()
if context is None:
return super(TornadoScopeManager, self).activate(span,
finish_on_close)
scope = _TornadoScope(self, span, finish_on_close)
context.active = scope
return scope | ['def', 'activate', '(', 'self', ',', 'span', ',', 'finish_on_close', ')', ':', 'context', '=', 'self', '.', '_get_context', '(', ')', 'if', 'context', 'is', 'None', ':', 'return', 'super', '(', 'TornadoScopeManager', ',', 'self', ')', '.', 'activate', '(', 'span', ',', 'finish_on_close', ')', 'scope', '=', '_TornadoScope', '(', 'self', ',', 'span', ',', 'finish_on_close', ')', 'context', '.', 'active', '=', 'scope', 'return', 'scope'] | Make a :class:`~opentracing.Span` instance active.
:param span: the :class:`~opentracing.Span` that should become active.
:param finish_on_close: whether *span* should automatically be
finished when :meth:`Scope.close()` is called.
If no :func:`tracer_stack_context()` is detected, thread-local
storage will be used to store the :class:`~opentracing.Scope`.
Observe that in this case the active :class:`~opentracing.Span`
will not be automatically propagated to the child corotuines.
:return: a :class:`~opentracing.Scope` instance to control the end
of the active period for the :class:`~opentracing.Span`.
It is a programming error to neglect to call :meth:`Scope.close()`
on the returned instance. | ['Make', 'a', ':', 'class', ':', '~opentracing', '.', 'Span', 'instance', 'active', '.'] | train | https://github.com/opentracing/opentracing-python/blob/5ceb76d67b01c1c3316cfed02371d7922830e317/opentracing/scope_managers/tornado.py#L87-L114 |
7,778 | jwodder/javaproperties | javaproperties/reading.py | parse | def parse(fp):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a
generator of ``(key, value, original_lines)`` triples for every entry in
``fp`` (including duplicate keys) in order of occurrence. The third
element of each triple is the concatenation of the unmodified lines in
``fp`` (including trailing newlines) from which the key and value were
extracted. The generator also includes comments and blank/all-whitespace
lines found in ``fp``, one triple per line, with the first two elements of
the triples set to `None`. This is the only way to extract comments from a
``.properties`` file with this library.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: generator of triples of text strings
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
def lineiter():
while True:
ln = fp.readline()
if isinstance(ln, binary_type):
ln = ln.decode('iso-8859-1')
if ln == '':
return
for l in ascii_splitlines(ln):
yield l
liter = lineiter()
for source in liter:
line = source
if re.match(r'^[ \t\f]*(?:[#!]|\r?\n?$)', line):
yield (None, None, source)
continue
line = line.lstrip(' \t\f').rstrip('\r\n')
while re.search(r'(?<!\\)(?:\\\\)*\\$', line):
line = line[:-1]
nextline = next(liter, '')
source += nextline
line += nextline.lstrip(' \t\f').rstrip('\r\n')
if line == '': # series of otherwise-blank lines with continuations
yield (None, None, source)
continue
m = re.search(r'(?<!\\)(?:\\\\)*([ \t\f]*[=:]|[ \t\f])[ \t\f]*', line)
if m:
yield (unescape(line[:m.start(1)]),unescape(line[m.end():]),source)
else:
yield (unescape(line), '', source) | python | def parse(fp):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a
generator of ``(key, value, original_lines)`` triples for every entry in
``fp`` (including duplicate keys) in order of occurrence. The third
element of each triple is the concatenation of the unmodified lines in
``fp`` (including trailing newlines) from which the key and value were
extracted. The generator also includes comments and blank/all-whitespace
lines found in ``fp``, one triple per line, with the first two elements of
the triples set to `None`. This is the only way to extract comments from a
``.properties`` file with this library.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: generator of triples of text strings
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
def lineiter():
while True:
ln = fp.readline()
if isinstance(ln, binary_type):
ln = ln.decode('iso-8859-1')
if ln == '':
return
for l in ascii_splitlines(ln):
yield l
liter = lineiter()
for source in liter:
line = source
if re.match(r'^[ \t\f]*(?:[#!]|\r?\n?$)', line):
yield (None, None, source)
continue
line = line.lstrip(' \t\f').rstrip('\r\n')
while re.search(r'(?<!\\)(?:\\\\)*\\$', line):
line = line[:-1]
nextline = next(liter, '')
source += nextline
line += nextline.lstrip(' \t\f').rstrip('\r\n')
if line == '': # series of otherwise-blank lines with continuations
yield (None, None, source)
continue
m = re.search(r'(?<!\\)(?:\\\\)*([ \t\f]*[=:]|[ \t\f])[ \t\f]*', line)
if m:
yield (unescape(line[:m.start(1)]),unescape(line[m.end():]),source)
else:
yield (unescape(line), '', source) | ['def', 'parse', '(', 'fp', ')', ':', 'def', 'lineiter', '(', ')', ':', 'while', 'True', ':', 'ln', '=', 'fp', '.', 'readline', '(', ')', 'if', 'isinstance', '(', 'ln', ',', 'binary_type', ')', ':', 'ln', '=', 'ln', '.', 'decode', '(', "'iso-8859-1'", ')', 'if', 'ln', '==', "''", ':', 'return', 'for', 'l', 'in', 'ascii_splitlines', '(', 'ln', ')', ':', 'yield', 'l', 'liter', '=', 'lineiter', '(', ')', 'for', 'source', 'in', 'liter', ':', 'line', '=', 'source', 'if', 're', '.', 'match', '(', "r'^[ \\t\\f]*(?:[#!]|\\r?\\n?$)'", ',', 'line', ')', ':', 'yield', '(', 'None', ',', 'None', ',', 'source', ')', 'continue', 'line', '=', 'line', '.', 'lstrip', '(', "' \\t\\f'", ')', '.', 'rstrip', '(', "'\\r\\n'", ')', 'while', 're', '.', 'search', '(', "r'(?<!\\\\)(?:\\\\\\\\)*\\\\$'", ',', 'line', ')', ':', 'line', '=', 'line', '[', ':', '-', '1', ']', 'nextline', '=', 'next', '(', 'liter', ',', "''", ')', 'source', '+=', 'nextline', 'line', '+=', 'nextline', '.', 'lstrip', '(', "' \\t\\f'", ')', '.', 'rstrip', '(', "'\\r\\n'", ')', 'if', 'line', '==', "''", ':', '# series of otherwise-blank lines with continuations', 'yield', '(', 'None', ',', 'None', ',', 'source', ')', 'continue', 'm', '=', 're', '.', 'search', '(', "r'(?<!\\\\)(?:\\\\\\\\)*([ \\t\\f]*[=:]|[ \\t\\f])[ \\t\\f]*'", ',', 'line', ')', 'if', 'm', ':', 'yield', '(', 'unescape', '(', 'line', '[', ':', 'm', '.', 'start', '(', '1', ')', ']', ')', ',', 'unescape', '(', 'line', '[', 'm', '.', 'end', '(', ')', ':', ']', ')', ',', 'source', ')', 'else', ':', 'yield', '(', 'unescape', '(', 'line', ')', ',', "''", ',', 'source', ')'] | Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a
generator of ``(key, value, original_lines)`` triples for every entry in
``fp`` (including duplicate keys) in order of occurrence. The third
element of each triple is the concatenation of the unmodified lines in
``fp`` (including trailing newlines) from which the key and value were
extracted. The generator also includes comments and blank/all-whitespace
lines found in ``fp``, one triple per line, with the first two elements of
the triples set to `None`. This is the only way to extract comments from a
``.properties`` file with this library.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: generator of triples of text strings
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | ['Parse', 'the', 'contents', 'of', 'the', '~io', '.', 'IOBase', '.', 'readline', '-', 'supporting', 'file', '-', 'like', 'object', 'fp', 'as', 'a', 'simple', 'line', '-', 'oriented', '.', 'properties', 'file', 'and', 'return', 'a', 'generator', 'of', '(', 'key', 'value', 'original_lines', ')', 'triples', 'for', 'every', 'entry', 'in', 'fp', '(', 'including', 'duplicate', 'keys', ')', 'in', 'order', 'of', 'occurrence', '.', 'The', 'third', 'element', 'of', 'each', 'triple', 'is', 'the', 'concatenation', 'of', 'the', 'unmodified', 'lines', 'in', 'fp', '(', 'including', 'trailing', 'newlines', ')', 'from', 'which', 'the', 'key', 'and', 'value', 'were', 'extracted', '.', 'The', 'generator', 'also', 'includes', 'comments', 'and', 'blank', '/', 'all', '-', 'whitespace', 'lines', 'found', 'in', 'fp', 'one', 'triple', 'per', 'line', 'with', 'the', 'first', 'two', 'elements', 'of', 'the', 'triples', 'set', 'to', 'None', '.', 'This', 'is', 'the', 'only', 'way', 'to', 'extract', 'comments', 'from', 'a', '.', 'properties', 'file', 'with', 'this', 'library', '.'] | train | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/reading.py#L68-L123 |
7,779 | jobovy/galpy | galpy/df/streamdf.py | streamdf._parse_call_args | def _parse_call_args(self,*args,**kwargs):
"""Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:)"""
interp= kwargs.get('interp',self._useInterp)
if len(args) == 5:
raise IOError("Must specify phi for streamdf")
elif len(args) == 6:
if kwargs.get('aAInput',False):
if isinstance(args[0],(int,float,numpy.float32,numpy.float64)):
out= numpy.empty((6,1))
else:
out= numpy.empty((6,len(args[0])))
for ii in range(6):
out[ii,:]= args[ii]
return out
else:
return self._approxaA(*args,interp=interp)
elif isinstance(args[0],Orbit):
o= args[0]
return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(),
interp=interp)
elif isinstance(args[0],list) and isinstance(args[0][0],Orbit):
R, vR, vT, z, vz, phi= [], [], [], [], [], []
for o in args[0]:
R.append(o.R())
vR.append(o.vR())
vT.append(o.vT())
z.append(o.z())
vz.append(o.vz())
phi.append(o.phi())
return self._approxaA(numpy.array(R),numpy.array(vR),
numpy.array(vT),numpy.array(z),
numpy.array(vz),numpy.array(phi),
interp=interp) | python | def _parse_call_args(self,*args,**kwargs):
"""Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:)"""
interp= kwargs.get('interp',self._useInterp)
if len(args) == 5:
raise IOError("Must specify phi for streamdf")
elif len(args) == 6:
if kwargs.get('aAInput',False):
if isinstance(args[0],(int,float,numpy.float32,numpy.float64)):
out= numpy.empty((6,1))
else:
out= numpy.empty((6,len(args[0])))
for ii in range(6):
out[ii,:]= args[ii]
return out
else:
return self._approxaA(*args,interp=interp)
elif isinstance(args[0],Orbit):
o= args[0]
return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(),
interp=interp)
elif isinstance(args[0],list) and isinstance(args[0][0],Orbit):
R, vR, vT, z, vz, phi= [], [], [], [], [], []
for o in args[0]:
R.append(o.R())
vR.append(o.vR())
vT.append(o.vT())
z.append(o.z())
vz.append(o.vz())
phi.append(o.phi())
return self._approxaA(numpy.array(R),numpy.array(vR),
numpy.array(vT),numpy.array(z),
numpy.array(vz),numpy.array(phi),
interp=interp) | ['def', '_parse_call_args', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'interp', '=', 'kwargs', '.', 'get', '(', "'interp'", ',', 'self', '.', '_useInterp', ')', 'if', 'len', '(', 'args', ')', '==', '5', ':', 'raise', 'IOError', '(', '"Must specify phi for streamdf"', ')', 'elif', 'len', '(', 'args', ')', '==', '6', ':', 'if', 'kwargs', '.', 'get', '(', "'aAInput'", ',', 'False', ')', ':', 'if', 'isinstance', '(', 'args', '[', '0', ']', ',', '(', 'int', ',', 'float', ',', 'numpy', '.', 'float32', ',', 'numpy', '.', 'float64', ')', ')', ':', 'out', '=', 'numpy', '.', 'empty', '(', '(', '6', ',', '1', ')', ')', 'else', ':', 'out', '=', 'numpy', '.', 'empty', '(', '(', '6', ',', 'len', '(', 'args', '[', '0', ']', ')', ')', ')', 'for', 'ii', 'in', 'range', '(', '6', ')', ':', 'out', '[', 'ii', ',', ':', ']', '=', 'args', '[', 'ii', ']', 'return', 'out', 'else', ':', 'return', 'self', '.', '_approxaA', '(', '*', 'args', ',', 'interp', '=', 'interp', ')', 'elif', 'isinstance', '(', 'args', '[', '0', ']', ',', 'Orbit', ')', ':', 'o', '=', 'args', '[', '0', ']', 'return', 'self', '.', '_approxaA', '(', 'o', '.', 'R', '(', ')', ',', 'o', '.', 'vR', '(', ')', ',', 'o', '.', 'vT', '(', ')', ',', 'o', '.', 'z', '(', ')', ',', 'o', '.', 'vz', '(', ')', ',', 'o', '.', 'phi', '(', ')', ',', 'interp', '=', 'interp', ')', 'elif', 'isinstance', '(', 'args', '[', '0', ']', ',', 'list', ')', 'and', 'isinstance', '(', 'args', '[', '0', ']', '[', '0', ']', ',', 'Orbit', ')', ':', 'R', ',', 'vR', ',', 'vT', ',', 'z', ',', 'vz', ',', 'phi', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'o', 'in', 'args', '[', '0', ']', ':', 'R', '.', 'append', '(', 'o', '.', 'R', '(', ')', ')', 'vR', '.', 'append', '(', 'o', '.', 'vR', '(', ')', ')', 'vT', '.', 'append', '(', 'o', '.', 'vT', '(', ')', ')', 'z', '.', 'append', '(', 'o', '.', 'z', '(', ')', ')', 'vz', '.', 'append', '(', 'o', '.', 'vz', '(', ')', ')', 'phi', '.', 'append', '(', 'o', '.', 'phi', '(', ')', ')', 'return', 'self', '.', '_approxaA', '(', 'numpy', '.', 'array', '(', 'R', ')', ',', 'numpy', '.', 'array', '(', 'vR', ')', ',', 'numpy', '.', 'array', '(', 'vT', ')', ',', 'numpy', '.', 'array', '(', 'z', ')', ',', 'numpy', '.', 'array', '(', 'vz', ')', ',', 'numpy', '.', 'array', '(', 'phi', ')', ',', 'interp', '=', 'interp', ')'] | Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:) | ['Helper', 'function', 'to', 'parse', 'the', 'arguments', 'to', 'the', '__call__', 'and', 'related', 'functions', 'return', '[', '6', 'nobj', ']', 'array', 'of', 'frequencies', '(', ':', '3', ')', 'and', 'angles', '(', '3', ':', ')'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L2606-L2639 |
7,780 | pantsbuild/pants | src/python/pants/goal/run_tracker.py | RunTracker.post_stats | def post_stats(cls, stats_url, stats, timeout=2, auth_provider=None):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {}. due to {}'.format(stats_url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this.
params = {k: cls._json_dump_options(v) for (k, v) in stats.items()}
cookies = Cookies.global_instance()
auth_provider = auth_provider or '<provider>'
# We can't simply let requests handle redirects, as we only allow them for specific codes:
# 307 and 308 indicate that the redirected request must use the same method, POST in this case.
# So they indicate a true redirect of the POST itself, and we allow them.
# The other redirect codes either must, or in practice do, cause the user agent to switch the
# method to GET. So when they are encountered on a POST, it indicates an auth problem (a
# redirection to a login page).
def do_post(url, num_redirects_allowed):
if num_redirects_allowed < 0:
return error('too many redirects.')
r = requests.post(url, data=params, timeout=timeout,
cookies=cookies.get_cookie_jar(), allow_redirects=False)
if r.status_code in {307, 308}:
return do_post(r.headers['location'], num_redirects_allowed - 1)
elif r.status_code != 200:
error('HTTP error code: {}. Reason: {}.'.format(r.status_code, r.reason))
if 300 <= r.status_code < 400 or r.status_code == 401:
print('Use `path/to/pants login --to={}` to authenticate against the stats '
'upload service.'.format(auth_provider), file=sys.stderr)
return False
return True
try:
return do_post(stats_url, num_redirects_allowed=6)
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error('Error: {}'.format(e)) | python | def post_stats(cls, stats_url, stats, timeout=2, auth_provider=None):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {}. due to {}'.format(stats_url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this.
params = {k: cls._json_dump_options(v) for (k, v) in stats.items()}
cookies = Cookies.global_instance()
auth_provider = auth_provider or '<provider>'
# We can't simply let requests handle redirects, as we only allow them for specific codes:
# 307 and 308 indicate that the redirected request must use the same method, POST in this case.
# So they indicate a true redirect of the POST itself, and we allow them.
# The other redirect codes either must, or in practice do, cause the user agent to switch the
# method to GET. So when they are encountered on a POST, it indicates an auth problem (a
# redirection to a login page).
def do_post(url, num_redirects_allowed):
if num_redirects_allowed < 0:
return error('too many redirects.')
r = requests.post(url, data=params, timeout=timeout,
cookies=cookies.get_cookie_jar(), allow_redirects=False)
if r.status_code in {307, 308}:
return do_post(r.headers['location'], num_redirects_allowed - 1)
elif r.status_code != 200:
error('HTTP error code: {}. Reason: {}.'.format(r.status_code, r.reason))
if 300 <= r.status_code < 400 or r.status_code == 401:
print('Use `path/to/pants login --to={}` to authenticate against the stats '
'upload service.'.format(auth_provider), file=sys.stderr)
return False
return True
try:
return do_post(stats_url, num_redirects_allowed=6)
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error('Error: {}'.format(e)) | ['def', 'post_stats', '(', 'cls', ',', 'stats_url', ',', 'stats', ',', 'timeout', '=', '2', ',', 'auth_provider', '=', 'None', ')', ':', 'def', 'error', '(', 'msg', ')', ':', '# Report aleady closed, so just print error.', 'print', '(', "'WARNING: Failed to upload stats to {}. due to {}'", '.', 'format', '(', 'stats_url', ',', 'msg', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'False', '# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON', '# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).', '# But this will first require changing the upload receiver at every shop that uses this.', 'params', '=', '{', 'k', ':', 'cls', '.', '_json_dump_options', '(', 'v', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'stats', '.', 'items', '(', ')', '}', 'cookies', '=', 'Cookies', '.', 'global_instance', '(', ')', 'auth_provider', '=', 'auth_provider', 'or', "'<provider>'", "# We can't simply let requests handle redirects, as we only allow them for specific codes:", '# 307 and 308 indicate that the redirected request must use the same method, POST in this case.', '# So they indicate a true redirect of the POST itself, and we allow them.', '# The other redirect codes either must, or in practice do, cause the user agent to switch the', '# method to GET. So when they are encountered on a POST, it indicates an auth problem (a', '# redirection to a login page).', 'def', 'do_post', '(', 'url', ',', 'num_redirects_allowed', ')', ':', 'if', 'num_redirects_allowed', '<', '0', ':', 'return', 'error', '(', "'too many redirects.'", ')', 'r', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'params', ',', 'timeout', '=', 'timeout', ',', 'cookies', '=', 'cookies', '.', 'get_cookie_jar', '(', ')', ',', 'allow_redirects', '=', 'False', ')', 'if', 'r', '.', 'status_code', 'in', '{', '307', ',', '308', '}', ':', 'return', 'do_post', '(', 'r', '.', 'headers', '[', "'location'", ']', ',', 'num_redirects_allowed', '-', '1', ')', 'elif', 'r', '.', 'status_code', '!=', '200', ':', 'error', '(', "'HTTP error code: {}. Reason: {}.'", '.', 'format', '(', 'r', '.', 'status_code', ',', 'r', '.', 'reason', ')', ')', 'if', '300', '<=', 'r', '.', 'status_code', '<', '400', 'or', 'r', '.', 'status_code', '==', '401', ':', 'print', '(', "'Use `path/to/pants login --to={}` to authenticate against the stats '", "'upload service.'", '.', 'format', '(', 'auth_provider', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'False', 'return', 'True', 'try', ':', 'return', 'do_post', '(', 'stats_url', ',', 'num_redirects_allowed', '=', '6', ')', 'except', 'Exception', 'as', 'e', ':', "# Broad catch - we don't want to fail the build over upload errors.", 'return', 'error', '(', "'Error: {}'", '.', 'format', '(', 'e', ')', ')'] | POST stats to the given url.
:return: True if upload was successful, False otherwise. | ['POST', 'stats', 'to', 'the', 'given', 'url', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/run_tracker.py#L355-L397 |
7,781 | ibis-project/ibis | ibis/pandas/execution/generic.py | wrap_case_result | def wrap_case_result(raw, expr):
"""Wrap a CASE statement result in a Series and handle returning scalars.
Parameters
----------
raw : ndarray[T]
The raw results of executing the ``CASE`` expression
expr : ValueExpr
The expression from the which `raw` was computed
Returns
-------
Union[scalar, Series]
"""
raw_1d = np.atleast_1d(raw)
if np.any(pd.isnull(raw_1d)):
result = pd.Series(raw_1d)
else:
result = pd.Series(
raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()]
)
if result.size == 1 and isinstance(expr, ir.ScalarExpr):
return result.item()
return result | python | def wrap_case_result(raw, expr):
"""Wrap a CASE statement result in a Series and handle returning scalars.
Parameters
----------
raw : ndarray[T]
The raw results of executing the ``CASE`` expression
expr : ValueExpr
The expression from the which `raw` was computed
Returns
-------
Union[scalar, Series]
"""
raw_1d = np.atleast_1d(raw)
if np.any(pd.isnull(raw_1d)):
result = pd.Series(raw_1d)
else:
result = pd.Series(
raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()]
)
if result.size == 1 and isinstance(expr, ir.ScalarExpr):
return result.item()
return result | ['def', 'wrap_case_result', '(', 'raw', ',', 'expr', ')', ':', 'raw_1d', '=', 'np', '.', 'atleast_1d', '(', 'raw', ')', 'if', 'np', '.', 'any', '(', 'pd', '.', 'isnull', '(', 'raw_1d', ')', ')', ':', 'result', '=', 'pd', '.', 'Series', '(', 'raw_1d', ')', 'else', ':', 'result', '=', 'pd', '.', 'Series', '(', 'raw_1d', ',', 'dtype', '=', 'constants', '.', 'IBIS_TYPE_TO_PANDAS_TYPE', '[', 'expr', '.', 'type', '(', ')', ']', ')', 'if', 'result', '.', 'size', '==', '1', 'and', 'isinstance', '(', 'expr', ',', 'ir', '.', 'ScalarExpr', ')', ':', 'return', 'result', '.', 'item', '(', ')', 'return', 'result'] | Wrap a CASE statement result in a Series and handle returning scalars.
Parameters
----------
raw : ndarray[T]
The raw results of executing the ``CASE`` expression
expr : ValueExpr
The expression from the which `raw` was computed
Returns
-------
Union[scalar, Series] | ['Wrap', 'a', 'CASE', 'statement', 'result', 'in', 'a', 'Series', 'and', 'handle', 'returning', 'scalars', '.'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/execution/generic.py#L930-L953 |
7,782 | cloud-custodian/cloud-custodian | c7n/policy.py | Policy.get_variables | def get_variables(self, variables=None):
"""Get runtime variables for policy interpolation.
Runtime variables are merged with the passed in variables
if any.
"""
# Global policy variable expansion, we have to carry forward on
# various filter/action local vocabularies. Where possible defer
# by using a format string.
#
# See https://github.com/capitalone/cloud-custodian/issues/2330
if not variables:
variables = {}
if 'mode' in self.data:
if 'role' in self.data['mode'] and not self.data['mode']['role'].startswith("arn:aws"):
self.data['mode']['role'] = "arn:aws:iam::%s:role/%s" % \
(self.options.account_id, self.data['mode']['role'])
variables.update({
# standard runtime variables for interpolation
'account': '{account}',
'account_id': self.options.account_id,
'region': self.options.region,
# non-standard runtime variables from local filter/action vocabularies
#
# notify action
'policy': self.data,
'event': '{event}',
# mark for op action
'op': '{op}',
'action_date': '{action_date}',
# tag action pyformat-date handling
'now': utils.FormatDate(datetime.utcnow()),
# account increase limit action
'service': '{service}',
# s3 set logging action :-( see if we can revisit this one.
'bucket_region': '{bucket_region}',
'bucket_name': '{bucket_name}',
'source_bucket_name': '{source_bucket_name}',
'target_bucket_name': '{target_bucket_name}',
'target_prefix': '{target_prefix}',
'LoadBalancerName': '{LoadBalancerName}'
})
return variables | python | def get_variables(self, variables=None):
"""Get runtime variables for policy interpolation.
Runtime variables are merged with the passed in variables
if any.
"""
# Global policy variable expansion, we have to carry forward on
# various filter/action local vocabularies. Where possible defer
# by using a format string.
#
# See https://github.com/capitalone/cloud-custodian/issues/2330
if not variables:
variables = {}
if 'mode' in self.data:
if 'role' in self.data['mode'] and not self.data['mode']['role'].startswith("arn:aws"):
self.data['mode']['role'] = "arn:aws:iam::%s:role/%s" % \
(self.options.account_id, self.data['mode']['role'])
variables.update({
# standard runtime variables for interpolation
'account': '{account}',
'account_id': self.options.account_id,
'region': self.options.region,
# non-standard runtime variables from local filter/action vocabularies
#
# notify action
'policy': self.data,
'event': '{event}',
# mark for op action
'op': '{op}',
'action_date': '{action_date}',
# tag action pyformat-date handling
'now': utils.FormatDate(datetime.utcnow()),
# account increase limit action
'service': '{service}',
# s3 set logging action :-( see if we can revisit this one.
'bucket_region': '{bucket_region}',
'bucket_name': '{bucket_name}',
'source_bucket_name': '{source_bucket_name}',
'target_bucket_name': '{target_bucket_name}',
'target_prefix': '{target_prefix}',
'LoadBalancerName': '{LoadBalancerName}'
})
return variables | ['def', 'get_variables', '(', 'self', ',', 'variables', '=', 'None', ')', ':', '# Global policy variable expansion, we have to carry forward on', '# various filter/action local vocabularies. Where possible defer', '# by using a format string.', '#', '# See https://github.com/capitalone/cloud-custodian/issues/2330', 'if', 'not', 'variables', ':', 'variables', '=', '{', '}', 'if', "'mode'", 'in', 'self', '.', 'data', ':', 'if', "'role'", 'in', 'self', '.', 'data', '[', "'mode'", ']', 'and', 'not', 'self', '.', 'data', '[', "'mode'", ']', '[', "'role'", ']', '.', 'startswith', '(', '"arn:aws"', ')', ':', 'self', '.', 'data', '[', "'mode'", ']', '[', "'role'", ']', '=', '"arn:aws:iam::%s:role/%s"', '%', '(', 'self', '.', 'options', '.', 'account_id', ',', 'self', '.', 'data', '[', "'mode'", ']', '[', "'role'", ']', ')', 'variables', '.', 'update', '(', '{', '# standard runtime variables for interpolation', "'account'", ':', "'{account}'", ',', "'account_id'", ':', 'self', '.', 'options', '.', 'account_id', ',', "'region'", ':', 'self', '.', 'options', '.', 'region', ',', '# non-standard runtime variables from local filter/action vocabularies', '#', '# notify action', "'policy'", ':', 'self', '.', 'data', ',', "'event'", ':', "'{event}'", ',', '# mark for op action', "'op'", ':', "'{op}'", ',', "'action_date'", ':', "'{action_date}'", ',', '# tag action pyformat-date handling', "'now'", ':', 'utils', '.', 'FormatDate', '(', 'datetime', '.', 'utcnow', '(', ')', ')', ',', '# account increase limit action', "'service'", ':', "'{service}'", ',', '# s3 set logging action :-( see if we can revisit this one.', "'bucket_region'", ':', "'{bucket_region}'", ',', "'bucket_name'", ':', "'{bucket_name}'", ',', "'source_bucket_name'", ':', "'{source_bucket_name}'", ',', "'target_bucket_name'", ':', "'{target_bucket_name}'", ',', "'target_prefix'", ':', "'{target_prefix}'", ',', "'LoadBalancerName'", ':', "'{LoadBalancerName}'", '}', ')', 'return', 'variables'] | Get runtime variables for policy interpolation.
Runtime variables are merged with the passed in variables
if any. | ['Get', 'runtime', 'variables', 'for', 'policy', 'interpolation', '.'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/policy.py#L836-L880 |
7,783 | inasafe/inasafe | safe/datastore/folder.py | Folder._add_vector_layer | def _add_vector_layer(self, vector_layer, layer_name, save_style=False):
"""Add a vector layer to the folder.
:param vector_layer: The layer to add.
:type vector_layer: QgsVectorLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
"""
if not self.is_writable():
return False, 'The destination is not writable.'
output = QFileInfo(
self.uri.filePath(layer_name + '.' + self._default_vector_format))
driver_mapping = {
'shp': 'ESRI Shapefile',
'kml': 'KML',
'geojson': 'GeoJSON',
}
QgsVectorFileWriter.writeAsVectorFormat(
vector_layer,
output.absoluteFilePath(),
'utf-8',
QgsCoordinateTransform(), # No tranformation
driver_mapping[self._default_vector_format])
if save_style:
style_path = QFileInfo(self.uri.filePath(layer_name + '.qml'))
vector_layer.saveNamedStyle(style_path.absoluteFilePath())
assert output.exists()
return True, output.baseName() | python | def _add_vector_layer(self, vector_layer, layer_name, save_style=False):
"""Add a vector layer to the folder.
:param vector_layer: The layer to add.
:type vector_layer: QgsVectorLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
"""
if not self.is_writable():
return False, 'The destination is not writable.'
output = QFileInfo(
self.uri.filePath(layer_name + '.' + self._default_vector_format))
driver_mapping = {
'shp': 'ESRI Shapefile',
'kml': 'KML',
'geojson': 'GeoJSON',
}
QgsVectorFileWriter.writeAsVectorFormat(
vector_layer,
output.absoluteFilePath(),
'utf-8',
QgsCoordinateTransform(), # No tranformation
driver_mapping[self._default_vector_format])
if save_style:
style_path = QFileInfo(self.uri.filePath(layer_name + '.qml'))
vector_layer.saveNamedStyle(style_path.absoluteFilePath())
assert output.exists()
return True, output.baseName() | ['def', '_add_vector_layer', '(', 'self', ',', 'vector_layer', ',', 'layer_name', ',', 'save_style', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'is_writable', '(', ')', ':', 'return', 'False', ',', "'The destination is not writable.'", 'output', '=', 'QFileInfo', '(', 'self', '.', 'uri', '.', 'filePath', '(', 'layer_name', '+', "'.'", '+', 'self', '.', '_default_vector_format', ')', ')', 'driver_mapping', '=', '{', "'shp'", ':', "'ESRI Shapefile'", ',', "'kml'", ':', "'KML'", ',', "'geojson'", ':', "'GeoJSON'", ',', '}', 'QgsVectorFileWriter', '.', 'writeAsVectorFormat', '(', 'vector_layer', ',', 'output', '.', 'absoluteFilePath', '(', ')', ',', "'utf-8'", ',', 'QgsCoordinateTransform', '(', ')', ',', '# No tranformation', 'driver_mapping', '[', 'self', '.', '_default_vector_format', ']', ')', 'if', 'save_style', ':', 'style_path', '=', 'QFileInfo', '(', 'self', '.', 'uri', '.', 'filePath', '(', 'layer_name', '+', "'.qml'", ')', ')', 'vector_layer', '.', 'saveNamedStyle', '(', 'style_path', '.', 'absoluteFilePath', '(', ')', ')', 'assert', 'output', '.', 'exists', '(', ')', 'return', 'True', ',', 'output', '.', 'baseName', '(', ')'] | Add a vector layer to the folder.
:param vector_layer: The layer to add.
:type vector_layer: QgsVectorLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0 | ['Add', 'a', 'vector', 'layer', 'to', 'the', 'folder', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/datastore/folder.py#L182-L226 |
7,784 | mlperf/training | object_detection/pytorch/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py | keep_only_positive_boxes | def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds | python | def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds | ['def', 'keep_only_positive_boxes', '(', 'boxes', ')', ':', 'assert', 'isinstance', '(', 'boxes', ',', '(', 'list', ',', 'tuple', ')', ')', 'assert', 'isinstance', '(', 'boxes', '[', '0', ']', ',', 'BoxList', ')', 'assert', 'boxes', '[', '0', ']', '.', 'has_field', '(', '"labels"', ')', 'positive_boxes', '=', '[', ']', 'positive_inds', '=', '[', ']', 'num_boxes', '=', '0', 'for', 'boxes_per_image', 'in', 'boxes', ':', 'labels', '=', 'boxes_per_image', '.', 'get_field', '(', '"labels"', ')', 'inds_mask', '=', 'labels', '>', '0', 'inds', '=', 'inds_mask', '.', 'nonzero', '(', ')', '.', 'squeeze', '(', '1', ')', 'positive_boxes', '.', 'append', '(', 'boxes_per_image', '[', 'inds', ']', ')', 'positive_inds', '.', 'append', '(', 'inds_mask', ')', 'return', 'positive_boxes', ',', 'positive_inds'] | Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList) | ['Given', 'a', 'set', 'of', 'BoxList', 'containing', 'the', 'labels', 'field', 'return', 'a', 'set', 'of', 'BoxList', 'for', 'which', 'labels', '>', '0', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py#L13-L33 |
7,785 | bdauvergne/python-oath | oath/_hotp.py | hotp | def hotp(key,counter,format='dec6',hash=hashlib.sha1):
'''
Compute a HOTP value as prescribed by RFC4226
:param key:
the HOTP secret key given as an hexadecimal string
:param counter:
the OTP generation counter
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
Examples:
>>> hotp('343434', 2, format='dec6')
'791903'
'''
bin_hotp = __hotp(key, counter, hash)
if format == 'dec4':
return dec(bin_hotp, 4)
elif format == 'dec6':
return dec(bin_hotp, 6)
elif format == 'dec7':
return dec(bin_hotp, 7)
elif format == 'dec8':
return dec(bin_hotp, 8)
elif format == 'hex':
return '%x' % truncated_value(bin_hotp)
elif format == 'hex-notrunc':
return _utils.tohex(bin_hotp)
elif format == 'bin':
return bin_hotp
elif format == 'dec':
return str(truncated_value(bin_hotp))
else:
raise ValueError('unknown format') | python | def hotp(key,counter,format='dec6',hash=hashlib.sha1):
'''
Compute a HOTP value as prescribed by RFC4226
:param key:
the HOTP secret key given as an hexadecimal string
:param counter:
the OTP generation counter
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
Examples:
>>> hotp('343434', 2, format='dec6')
'791903'
'''
bin_hotp = __hotp(key, counter, hash)
if format == 'dec4':
return dec(bin_hotp, 4)
elif format == 'dec6':
return dec(bin_hotp, 6)
elif format == 'dec7':
return dec(bin_hotp, 7)
elif format == 'dec8':
return dec(bin_hotp, 8)
elif format == 'hex':
return '%x' % truncated_value(bin_hotp)
elif format == 'hex-notrunc':
return _utils.tohex(bin_hotp)
elif format == 'bin':
return bin_hotp
elif format == 'dec':
return str(truncated_value(bin_hotp))
else:
raise ValueError('unknown format') | ['def', 'hotp', '(', 'key', ',', 'counter', ',', 'format', '=', "'dec6'", ',', 'hash', '=', 'hashlib', '.', 'sha1', ')', ':', 'bin_hotp', '=', '__hotp', '(', 'key', ',', 'counter', ',', 'hash', ')', 'if', 'format', '==', "'dec4'", ':', 'return', 'dec', '(', 'bin_hotp', ',', '4', ')', 'elif', 'format', '==', "'dec6'", ':', 'return', 'dec', '(', 'bin_hotp', ',', '6', ')', 'elif', 'format', '==', "'dec7'", ':', 'return', 'dec', '(', 'bin_hotp', ',', '7', ')', 'elif', 'format', '==', "'dec8'", ':', 'return', 'dec', '(', 'bin_hotp', ',', '8', ')', 'elif', 'format', '==', "'hex'", ':', 'return', "'%x'", '%', 'truncated_value', '(', 'bin_hotp', ')', 'elif', 'format', '==', "'hex-notrunc'", ':', 'return', '_utils', '.', 'tohex', '(', 'bin_hotp', ')', 'elif', 'format', '==', "'bin'", ':', 'return', 'bin_hotp', 'elif', 'format', '==', "'dec'", ':', 'return', 'str', '(', 'truncated_value', '(', 'bin_hotp', ')', ')', 'else', ':', 'raise', 'ValueError', '(', "'unknown format'", ')'] | Compute a HOTP value as prescribed by RFC4226
:param key:
the HOTP secret key given as an hexadecimal string
:param counter:
the OTP generation counter
:param format:
the output format, can be:
- hex, for a variable length hexadecimal format,
- hex-notrunc, for a 40 characters hexadecimal non-truncated format,
- dec4, for a 4 characters decimal format,
- dec6,
- dec7, or
- dec8
it defaults to dec6.
:param hash:
the hash module (usually from the hashlib package) to use,
it defaults to hashlib.sha1.
:returns:
a string representation of the OTP value (as instructed by the format parameter).
Examples:
>>> hotp('343434', 2, format='dec6')
'791903' | ['Compute', 'a', 'HOTP', 'value', 'as', 'prescribed', 'by', 'RFC4226'] | train | https://github.com/bdauvergne/python-oath/blob/c37cd63880b39032b9ba69cd1516e6fb06923e46/oath/_hotp.py#L43-L91 |
7,786 | housecanary/hc-api-python | housecanary/apiclient.py | PropertyComponentWrapper.rental_report | def rental_report(self, address, zipcode, format_type="json"):
"""Call the rental_report component
Rental Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- format_type - "json", "xlsx" or "all". Default is "json".
"""
# only json is supported by rental report.
query_params = {
"format": format_type,
"address": address,
"zipcode": zipcode
}
return self._api_client.fetch_synchronous("property/rental_report", query_params) | python | def rental_report(self, address, zipcode, format_type="json"):
"""Call the rental_report component
Rental Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- format_type - "json", "xlsx" or "all". Default is "json".
"""
# only json is supported by rental report.
query_params = {
"format": format_type,
"address": address,
"zipcode": zipcode
}
return self._api_client.fetch_synchronous("property/rental_report", query_params) | ['def', 'rental_report', '(', 'self', ',', 'address', ',', 'zipcode', ',', 'format_type', '=', '"json"', ')', ':', '# only json is supported by rental report.', 'query_params', '=', '{', '"format"', ':', 'format_type', ',', '"address"', ':', 'address', ',', '"zipcode"', ':', 'zipcode', '}', 'return', 'self', '.', '_api_client', '.', 'fetch_synchronous', '(', '"property/rental_report"', ',', 'query_params', ')'] | Call the rental_report component
Rental Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- format_type - "json", "xlsx" or "all". Default is "json". | ['Call', 'the', 'rental_report', 'component'] | train | https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/apiclient.py#L435-L455 |
7,787 | kensho-technologies/graphql-compiler | graphql_compiler/query_formatting/gremlin_formatting.py | _safe_gremlin_string | def _safe_gremlin_string(value):
"""Sanitize and represent a string argument in Gremlin."""
if not isinstance(value, six.string_types):
if isinstance(value, bytes): # should only happen in py3
value = value.decode('utf-8')
else:
raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: '
u'{}'.format(value))
# Using JSON encoding means that all unicode literals and special chars
# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.
# However, the quoted result is wrapped in double quotes, and $ signs are not escaped,
# so that would allow arbitrary code execution in Gremlin.
# We will therefore turn the double-quoted string into a single-quoted one to avoid this risk.
escaped_and_quoted = json.dumps(value)
# Double-quoted string literals in Gremlin/Groovy allow
# arbitrary code execution via string interpolation and closures.
# To avoid this, we perform the following steps:
# - we strip the wrapping double quotes;
# - we un-escape any double-quotes in the string, by replacing \" with ";
# - we escape any single-quotes in the string, by replacing ' with \';
# - finally, we wrap the string in single quotes.
# http://www.groovy-lang.org/syntax.html#_double_quoted_string
if not escaped_and_quoted[0] == escaped_and_quoted[-1] == '"':
raise AssertionError(u'Unreachable state reached: {} {}'.format(value, escaped_and_quoted))
no_quotes = escaped_and_quoted[1:-1]
re_escaped = no_quotes.replace('\\"', '"').replace('\'', '\\\'')
final_escaped_value = '\'' + re_escaped + '\''
return final_escaped_value | python | def _safe_gremlin_string(value):
"""Sanitize and represent a string argument in Gremlin."""
if not isinstance(value, six.string_types):
if isinstance(value, bytes): # should only happen in py3
value = value.decode('utf-8')
else:
raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: '
u'{}'.format(value))
# Using JSON encoding means that all unicode literals and special chars
# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.
# However, the quoted result is wrapped in double quotes, and $ signs are not escaped,
# so that would allow arbitrary code execution in Gremlin.
# We will therefore turn the double-quoted string into a single-quoted one to avoid this risk.
escaped_and_quoted = json.dumps(value)
# Double-quoted string literals in Gremlin/Groovy allow
# arbitrary code execution via string interpolation and closures.
# To avoid this, we perform the following steps:
# - we strip the wrapping double quotes;
# - we un-escape any double-quotes in the string, by replacing \" with ";
# - we escape any single-quotes in the string, by replacing ' with \';
# - finally, we wrap the string in single quotes.
# http://www.groovy-lang.org/syntax.html#_double_quoted_string
if not escaped_and_quoted[0] == escaped_and_quoted[-1] == '"':
raise AssertionError(u'Unreachable state reached: {} {}'.format(value, escaped_and_quoted))
no_quotes = escaped_and_quoted[1:-1]
re_escaped = no_quotes.replace('\\"', '"').replace('\'', '\\\'')
final_escaped_value = '\'' + re_escaped + '\''
return final_escaped_value | ['def', '_safe_gremlin_string', '(', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'bytes', ')', ':', '# should only happen in py3', 'value', '=', 'value', '.', 'decode', '(', "'utf-8'", ')', 'else', ':', 'raise', 'GraphQLInvalidArgumentError', '(', "u'Attempting to convert a non-string into a string: '", "u'{}'", '.', 'format', '(', 'value', ')', ')', '# Using JSON encoding means that all unicode literals and special chars', '# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.', '# However, the quoted result is wrapped in double quotes, and $ signs are not escaped,', '# so that would allow arbitrary code execution in Gremlin.', '# We will therefore turn the double-quoted string into a single-quoted one to avoid this risk.', 'escaped_and_quoted', '=', 'json', '.', 'dumps', '(', 'value', ')', '# Double-quoted string literals in Gremlin/Groovy allow', '# arbitrary code execution via string interpolation and closures.', '# To avoid this, we perform the following steps:', '# - we strip the wrapping double quotes;', '# - we un-escape any double-quotes in the string, by replacing \\" with ";', "# - we escape any single-quotes in the string, by replacing ' with \\';", '# - finally, we wrap the string in single quotes.', '# http://www.groovy-lang.org/syntax.html#_double_quoted_string', 'if', 'not', 'escaped_and_quoted', '[', '0', ']', '==', 'escaped_and_quoted', '[', '-', '1', ']', '==', '\'"\'', ':', 'raise', 'AssertionError', '(', "u'Unreachable state reached: {} {}'", '.', 'format', '(', 'value', ',', 'escaped_and_quoted', ')', ')', 'no_quotes', '=', 'escaped_and_quoted', '[', '1', ':', '-', '1', ']', 're_escaped', '=', 'no_quotes', '.', 'replace', '(', '\'\\\\"\'', ',', '\'"\'', ')', '.', 'replace', '(', "'\\''", ',', "'\\\\\\''", ')', 'final_escaped_value', '=', "'\\''", '+', 're_escaped', '+', "'\\''", 'return', 'final_escaped_value'] | Sanitize and represent a string argument in Gremlin. | ['Sanitize', 'and', 'represent', 'a', 'string', 'argument', 'in', 'Gremlin', '.'] | train | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/gremlin_formatting.py#L18-L48 |
7,788 | getpelican/pelican-plugins | filetime_from_git/actions.py | filetime_from_git | def filetime_from_git(content, git_content):
'''
Update modification and creation times from git
'''
if not content.settings['GIT_FILETIME_FROM_GIT']:
# Disabled for everything
return
if not string_to_bool(content.metadata.get('gittime', 'yes')):
# Disable for this content
return
path = content.source_path
fs_creation_time = datetime_from_timestamp(os.stat(path).st_ctime, content)
fs_modified_time = datetime_from_timestamp(os.stat(path).st_mtime, content)
# 1. file is not managed by git
# date: fs time
# 2. file is staged, but has no commits
# date: fs time
# 3. file is managed, and clean
# date: first commit time, update: last commit time or None
# 4. file is managed, but dirty
# date: first commit time, update: fs time
if git_content.is_managed_by_git():
if git_content.is_committed():
content.date = git_content.get_oldest_commit_date()
if git_content.is_modified():
content.modified = fs_modified_time
else:
content.modified = git_content.get_newest_commit_date()
else:
# File isn't committed
content.date = fs_creation_time
else:
# file is not managed by git
content.date = fs_creation_time
# Clean up content attributes
if not hasattr(content, 'modified'):
content.modified = content.date
if hasattr(content, 'date'):
content.locale_date = strftime(content.date, content.date_format)
if hasattr(content, 'modified'):
content.locale_modified = strftime(
content.modified, content.date_format) | python | def filetime_from_git(content, git_content):
'''
Update modification and creation times from git
'''
if not content.settings['GIT_FILETIME_FROM_GIT']:
# Disabled for everything
return
if not string_to_bool(content.metadata.get('gittime', 'yes')):
# Disable for this content
return
path = content.source_path
fs_creation_time = datetime_from_timestamp(os.stat(path).st_ctime, content)
fs_modified_time = datetime_from_timestamp(os.stat(path).st_mtime, content)
# 1. file is not managed by git
# date: fs time
# 2. file is staged, but has no commits
# date: fs time
# 3. file is managed, and clean
# date: first commit time, update: last commit time or None
# 4. file is managed, but dirty
# date: first commit time, update: fs time
if git_content.is_managed_by_git():
if git_content.is_committed():
content.date = git_content.get_oldest_commit_date()
if git_content.is_modified():
content.modified = fs_modified_time
else:
content.modified = git_content.get_newest_commit_date()
else:
# File isn't committed
content.date = fs_creation_time
else:
# file is not managed by git
content.date = fs_creation_time
# Clean up content attributes
if not hasattr(content, 'modified'):
content.modified = content.date
if hasattr(content, 'date'):
content.locale_date = strftime(content.date, content.date_format)
if hasattr(content, 'modified'):
content.locale_modified = strftime(
content.modified, content.date_format) | ['def', 'filetime_from_git', '(', 'content', ',', 'git_content', ')', ':', 'if', 'not', 'content', '.', 'settings', '[', "'GIT_FILETIME_FROM_GIT'", ']', ':', '# Disabled for everything', 'return', 'if', 'not', 'string_to_bool', '(', 'content', '.', 'metadata', '.', 'get', '(', "'gittime'", ',', "'yes'", ')', ')', ':', '# Disable for this content', 'return', 'path', '=', 'content', '.', 'source_path', 'fs_creation_time', '=', 'datetime_from_timestamp', '(', 'os', '.', 'stat', '(', 'path', ')', '.', 'st_ctime', ',', 'content', ')', 'fs_modified_time', '=', 'datetime_from_timestamp', '(', 'os', '.', 'stat', '(', 'path', ')', '.', 'st_mtime', ',', 'content', ')', '# 1. file is not managed by git', '# date: fs time', '# 2. file is staged, but has no commits', '# date: fs time', '# 3. file is managed, and clean', '# date: first commit time, update: last commit time or None', '# 4. file is managed, but dirty', '# date: first commit time, update: fs time', 'if', 'git_content', '.', 'is_managed_by_git', '(', ')', ':', 'if', 'git_content', '.', 'is_committed', '(', ')', ':', 'content', '.', 'date', '=', 'git_content', '.', 'get_oldest_commit_date', '(', ')', 'if', 'git_content', '.', 'is_modified', '(', ')', ':', 'content', '.', 'modified', '=', 'fs_modified_time', 'else', ':', 'content', '.', 'modified', '=', 'git_content', '.', 'get_newest_commit_date', '(', ')', 'else', ':', "# File isn't committed", 'content', '.', 'date', '=', 'fs_creation_time', 'else', ':', '# file is not managed by git', 'content', '.', 'date', '=', 'fs_creation_time', '# Clean up content attributes', 'if', 'not', 'hasattr', '(', 'content', ',', "'modified'", ')', ':', 'content', '.', 'modified', '=', 'content', '.', 'date', 'if', 'hasattr', '(', 'content', ',', "'date'", ')', ':', 'content', '.', 'locale_date', '=', 'strftime', '(', 'content', '.', 'date', ',', 'content', '.', 'date_format', ')', 'if', 'hasattr', '(', 'content', ',', "'modified'", ')', ':', 'content', '.', 'locale_modified', '=', 'strftime', '(', 'content', '.', 'modified', ',', 'content', '.', 'date_format', ')'] | Update modification and creation times from git | ['Update', 'modification', 'and', 'creation', 'times', 'from', 'git'] | train | https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/filetime_from_git/actions.py#L18-L66 |
7,789 | mikedh/trimesh | trimesh/util.py | sigfig_round | def sigfig_round(values, sigfig=1):
"""
Round a single value to a specified number of significant figures.
Parameters
----------
values: float, value to be rounded
sigfig: int, number of significant figures to reduce to
Returns
----------
rounded: values, but rounded to the specified number of significant figures
Examples
----------
In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1)
Out[1]: -200000.0
In [2]: trimesh.util.round_sigfig(.00014045456, 1)
Out[2]: 0.0001
In [3]: trimesh.util.round_sigfig(.00014045456, 4)
Out[3]: 0.0001405
"""
as_int, multiplier = sigfig_int(values, sigfig)
rounded = as_int * (10 ** multiplier)
return rounded | python | def sigfig_round(values, sigfig=1):
"""
Round a single value to a specified number of significant figures.
Parameters
----------
values: float, value to be rounded
sigfig: int, number of significant figures to reduce to
Returns
----------
rounded: values, but rounded to the specified number of significant figures
Examples
----------
In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1)
Out[1]: -200000.0
In [2]: trimesh.util.round_sigfig(.00014045456, 1)
Out[2]: 0.0001
In [3]: trimesh.util.round_sigfig(.00014045456, 4)
Out[3]: 0.0001405
"""
as_int, multiplier = sigfig_int(values, sigfig)
rounded = as_int * (10 ** multiplier)
return rounded | ['def', 'sigfig_round', '(', 'values', ',', 'sigfig', '=', '1', ')', ':', 'as_int', ',', 'multiplier', '=', 'sigfig_int', '(', 'values', ',', 'sigfig', ')', 'rounded', '=', 'as_int', '*', '(', '10', '**', 'multiplier', ')', 'return', 'rounded'] | Round a single value to a specified number of significant figures.
Parameters
----------
values: float, value to be rounded
sigfig: int, number of significant figures to reduce to
Returns
----------
rounded: values, but rounded to the specified number of significant figures
Examples
----------
In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1)
Out[1]: -200000.0
In [2]: trimesh.util.round_sigfig(.00014045456, 1)
Out[2]: 0.0001
In [3]: trimesh.util.round_sigfig(.00014045456, 4)
Out[3]: 0.0001405 | ['Round', 'a', 'single', 'value', 'to', 'a', 'specified', 'number', 'of', 'significant', 'figures', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L1555-L1584 |
7,790 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py | convert_softmax | def convert_softmax(builder, layer, input_names, output_names, keras_layer):
"""Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
builder.add_softmax(name = layer, input_name = input_name,
output_name = output_name) | python | def convert_softmax(builder, layer, input_names, output_names, keras_layer):
"""Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
builder.add_softmax(name = layer, input_name = input_name,
output_name = output_name) | ['def', 'convert_softmax', '(', 'builder', ',', 'layer', ',', 'input_names', ',', 'output_names', ',', 'keras_layer', ')', ':', 'input_name', ',', 'output_name', '=', '(', 'input_names', '[', '0', ']', ',', 'output_names', '[', '0', ']', ')', 'builder', '.', 'add_softmax', '(', 'name', '=', 'layer', ',', 'input_name', '=', 'input_name', ',', 'output_name', '=', 'output_name', ')'] | Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | ['Convert', 'a', 'softmax', 'layer', 'from', 'keras', 'to', 'coreml', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py#L902-L916 |
7,791 | viniciuschiele/flask-io | flask_io/tracing.py | Tracer.__default_emit_trace | def __default_emit_trace(self, data):
"""
Writes the given tracing data to Python Logging.
:param data: The tracing data to be written.
"""
message = format_trace_data(data)
self.io.logger.info(message) | python | def __default_emit_trace(self, data):
"""
Writes the given tracing data to Python Logging.
:param data: The tracing data to be written.
"""
message = format_trace_data(data)
self.io.logger.info(message) | ['def', '__default_emit_trace', '(', 'self', ',', 'data', ')', ':', 'message', '=', 'format_trace_data', '(', 'data', ')', 'self', '.', 'io', '.', 'logger', '.', 'info', '(', 'message', ')'] | Writes the given tracing data to Python Logging.
:param data: The tracing data to be written. | ['Writes', 'the', 'given', 'tracing', 'data', 'to', 'Python', 'Logging', '.'] | train | https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/tracing.py#L99-L106 |
7,792 | tamasgal/km3pipe | km3pipe/io/root.py | interpol_hist2d | def interpol_hist2d(h2d, oversamp_factor=10):
"""Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins.
"""
from rootpy import ROOTError
xlim = h2d.bins(axis=0)
ylim = h2d.bins(axis=1)
xn = h2d.nbins(0)
yn = h2d.nbins(1)
x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor)
y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor)
mat = np.zeros((xn, yn))
for xi in range(xn):
for yi in range(yn):
try:
mat[xi, yi] = h2d.interpolate(x[xi], y[yi])
except ROOTError:
continue
return mat, x, y | python | def interpol_hist2d(h2d, oversamp_factor=10):
"""Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins.
"""
from rootpy import ROOTError
xlim = h2d.bins(axis=0)
ylim = h2d.bins(axis=1)
xn = h2d.nbins(0)
yn = h2d.nbins(1)
x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor)
y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor)
mat = np.zeros((xn, yn))
for xi in range(xn):
for yi in range(yn):
try:
mat[xi, yi] = h2d.interpolate(x[xi], y[yi])
except ROOTError:
continue
return mat, x, y | ['def', 'interpol_hist2d', '(', 'h2d', ',', 'oversamp_factor', '=', '10', ')', ':', 'from', 'rootpy', 'import', 'ROOTError', 'xlim', '=', 'h2d', '.', 'bins', '(', 'axis', '=', '0', ')', 'ylim', '=', 'h2d', '.', 'bins', '(', 'axis', '=', '1', ')', 'xn', '=', 'h2d', '.', 'nbins', '(', '0', ')', 'yn', '=', 'h2d', '.', 'nbins', '(', '1', ')', 'x', '=', 'np', '.', 'linspace', '(', 'xlim', '[', '0', ']', ',', 'xlim', '[', '1', ']', ',', 'xn', '*', 'oversamp_factor', ')', 'y', '=', 'np', '.', 'linspace', '(', 'ylim', '[', '0', ']', ',', 'ylim', '[', '1', ']', ',', 'yn', '*', 'oversamp_factor', ')', 'mat', '=', 'np', '.', 'zeros', '(', '(', 'xn', ',', 'yn', ')', ')', 'for', 'xi', 'in', 'range', '(', 'xn', ')', ':', 'for', 'yi', 'in', 'range', '(', 'yn', ')', ':', 'try', ':', 'mat', '[', 'xi', ',', 'yi', ']', '=', 'h2d', '.', 'interpolate', '(', 'x', '[', 'xi', ']', ',', 'y', '[', 'yi', ']', ')', 'except', 'ROOTError', ':', 'continue', 'return', 'mat', ',', 'x', ',', 'y'] | Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins. | ['Sample', 'the', 'interpolator', 'of', 'a', 'root', '2d', 'hist', '.'] | train | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L70-L91 |
7,793 | senaite/senaite.core | bika/lims/browser/worksheet/views/results.py | ManageResultsView.get_wide_interims | def get_wide_interims(self):
"""Returns a dictionary with the analyses services from the current
worksheet which have at least one interim with 'Wide' attribute set to
true and that have not been yet submitted
The structure of the returned dictionary is the following:
<Analysis_keyword>: {
'analysis': <Analysis_name>,
'keyword': <Analysis_keyword>,
'interims': {
<Interim_keyword>: {
'value': <Interim_default_value>,
'keyword': <Interim_key>,
'title': <Interim_title>
}
}
}
"""
outdict = {}
allowed_states = ['assigned', 'unassigned']
for analysis in self.context.getAnalyses():
# TODO Workflow - Analysis Use a query instead of this
if api.get_workflow_status_of(analysis) not in allowed_states:
continue
if analysis.getKeyword() in outdict.keys():
continue
calculation = analysis.getCalculation()
if not calculation:
continue
andict = {
"analysis": analysis.Title(),
"keyword": analysis.getKeyword(),
"interims": {}
}
# Analysis Service interim defaults
for field in analysis.getInterimFields():
if field.get("wide", False):
andict["interims"][field["keyword"]] = field
# Interims from calculation
for field in calculation.getInterimFields():
if field["keyword"] not in andict["interims"].keys() \
and field.get("wide", False):
andict["interims"][field["keyword"]] = field
if andict["interims"]:
outdict[analysis.getKeyword()] = andict
return outdict | python | def get_wide_interims(self):
"""Returns a dictionary with the analyses services from the current
worksheet which have at least one interim with 'Wide' attribute set to
true and that have not been yet submitted
The structure of the returned dictionary is the following:
<Analysis_keyword>: {
'analysis': <Analysis_name>,
'keyword': <Analysis_keyword>,
'interims': {
<Interim_keyword>: {
'value': <Interim_default_value>,
'keyword': <Interim_key>,
'title': <Interim_title>
}
}
}
"""
outdict = {}
allowed_states = ['assigned', 'unassigned']
for analysis in self.context.getAnalyses():
# TODO Workflow - Analysis Use a query instead of this
if api.get_workflow_status_of(analysis) not in allowed_states:
continue
if analysis.getKeyword() in outdict.keys():
continue
calculation = analysis.getCalculation()
if not calculation:
continue
andict = {
"analysis": analysis.Title(),
"keyword": analysis.getKeyword(),
"interims": {}
}
# Analysis Service interim defaults
for field in analysis.getInterimFields():
if field.get("wide", False):
andict["interims"][field["keyword"]] = field
# Interims from calculation
for field in calculation.getInterimFields():
if field["keyword"] not in andict["interims"].keys() \
and field.get("wide", False):
andict["interims"][field["keyword"]] = field
if andict["interims"]:
outdict[analysis.getKeyword()] = andict
return outdict | ['def', 'get_wide_interims', '(', 'self', ')', ':', 'outdict', '=', '{', '}', 'allowed_states', '=', '[', "'assigned'", ',', "'unassigned'", ']', 'for', 'analysis', 'in', 'self', '.', 'context', '.', 'getAnalyses', '(', ')', ':', '# TODO Workflow - Analysis Use a query instead of this', 'if', 'api', '.', 'get_workflow_status_of', '(', 'analysis', ')', 'not', 'in', 'allowed_states', ':', 'continue', 'if', 'analysis', '.', 'getKeyword', '(', ')', 'in', 'outdict', '.', 'keys', '(', ')', ':', 'continue', 'calculation', '=', 'analysis', '.', 'getCalculation', '(', ')', 'if', 'not', 'calculation', ':', 'continue', 'andict', '=', '{', '"analysis"', ':', 'analysis', '.', 'Title', '(', ')', ',', '"keyword"', ':', 'analysis', '.', 'getKeyword', '(', ')', ',', '"interims"', ':', '{', '}', '}', '# Analysis Service interim defaults', 'for', 'field', 'in', 'analysis', '.', 'getInterimFields', '(', ')', ':', 'if', 'field', '.', 'get', '(', '"wide"', ',', 'False', ')', ':', 'andict', '[', '"interims"', ']', '[', 'field', '[', '"keyword"', ']', ']', '=', 'field', '# Interims from calculation', 'for', 'field', 'in', 'calculation', '.', 'getInterimFields', '(', ')', ':', 'if', 'field', '[', '"keyword"', ']', 'not', 'in', 'andict', '[', '"interims"', ']', '.', 'keys', '(', ')', 'and', 'field', '.', 'get', '(', '"wide"', ',', 'False', ')', ':', 'andict', '[', '"interims"', ']', '[', 'field', '[', '"keyword"', ']', ']', '=', 'field', 'if', 'andict', '[', '"interims"', ']', ':', 'outdict', '[', 'analysis', '.', 'getKeyword', '(', ')', ']', '=', 'andict', 'return', 'outdict'] | Returns a dictionary with the analyses services from the current
worksheet which have at least one interim with 'Wide' attribute set to
true and that have not been yet submitted
The structure of the returned dictionary is the following:
<Analysis_keyword>: {
'analysis': <Analysis_name>,
'keyword': <Analysis_keyword>,
'interims': {
<Interim_keyword>: {
'value': <Interim_default_value>,
'keyword': <Interim_key>,
'title': <Interim_title>
}
}
} | ['Returns', 'a', 'dictionary', 'with', 'the', 'analyses', 'services', 'from', 'the', 'current', 'worksheet', 'which', 'have', 'at', 'least', 'one', 'interim', 'with', 'Wide', 'attribute', 'set', 'to', 'true', 'and', 'that', 'have', 'not', 'been', 'yet', 'submitted'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/results.py#L139-L190 |
7,794 | python-cmd2/cmd2 | cmd2/cmd2.py | Cmd.register_cmdfinalization_hook | def register_cmdfinalization_hook(self, func: Callable[[plugin.CommandFinalizationData],
plugin.CommandFinalizationData]) -> None:
"""Register a hook to be called after a command is completed, whether it completes successfully or not."""
self._validate_cmdfinalization_callable(func)
self._cmdfinalization_hooks.append(func) | python | def register_cmdfinalization_hook(self, func: Callable[[plugin.CommandFinalizationData],
plugin.CommandFinalizationData]) -> None:
"""Register a hook to be called after a command is completed, whether it completes successfully or not."""
self._validate_cmdfinalization_callable(func)
self._cmdfinalization_hooks.append(func) | ['def', 'register_cmdfinalization_hook', '(', 'self', ',', 'func', ':', 'Callable', '[', '[', 'plugin', '.', 'CommandFinalizationData', ']', ',', 'plugin', '.', 'CommandFinalizationData', ']', ')', '->', 'None', ':', 'self', '.', '_validate_cmdfinalization_callable', '(', 'func', ')', 'self', '.', '_cmdfinalization_hooks', '.', 'append', '(', 'func', ')'] | Register a hook to be called after a command is completed, whether it completes successfully or not. | ['Register', 'a', 'hook', 'to', 'be', 'called', 'after', 'a', 'command', 'is', 'completed', 'whether', 'it', 'completes', 'successfully', 'or', 'not', '.'] | train | https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L4050-L4054 |
7,795 | brentp/skidmarks | skidmarks.py | auto_correlation | def auto_correlation(sequence):
"""
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
"""
if isinstance(sequence, basestring):
sequence = map(int, sequence)
seq = np.array(list(sequence), dtype=np.float)
dseq = np.column_stack((seq[1:], seq[:-1]))
slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1])
cc = np.corrcoef(dseq, rowvar=0)[0][1]
return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2,
'p': ttp, 'see': see, 'auto_correlation': cc} | python | def auto_correlation(sequence):
"""
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
"""
if isinstance(sequence, basestring):
sequence = map(int, sequence)
seq = np.array(list(sequence), dtype=np.float)
dseq = np.column_stack((seq[1:], seq[:-1]))
slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1])
cc = np.corrcoef(dseq, rowvar=0)[0][1]
return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2,
'p': ttp, 'see': see, 'auto_correlation': cc} | ['def', 'auto_correlation', '(', 'sequence', ')', ':', 'if', 'isinstance', '(', 'sequence', ',', 'basestring', ')', ':', 'sequence', '=', 'map', '(', 'int', ',', 'sequence', ')', 'seq', '=', 'np', '.', 'array', '(', 'list', '(', 'sequence', ')', ',', 'dtype', '=', 'np', '.', 'float', ')', 'dseq', '=', 'np', '.', 'column_stack', '(', '(', 'seq', '[', '1', ':', ']', ',', 'seq', '[', ':', '-', '1', ']', ')', ')', 'slope', ',', 'intercept', ',', 'r', ',', 'ttp', ',', 'see', '=', 'linregress', '(', 'seq', '[', '1', ':', ']', ',', 'seq', '[', ':', '-', '1', ']', ')', 'cc', '=', 'np', '.', 'corrcoef', '(', 'dseq', ',', 'rowvar', '=', '0', ')', '[', '0', ']', '[', '1', ']', 'return', '{', "'slope'", ':', 'slope', ',', "'intercept'", ':', 'intercept', ',', "'r-squared'", ':', 'r', '**', '2', ',', "'p'", ':', 'ttp', ',', "'see'", ':', 'see', ',', "'auto_correlation'", ':', 'cc', '}'] | test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755 | ['test', 'for', 'the', 'autocorrelation', 'of', 'a', 'sequence', 'between', 't', 'and', 't', '-', '1', 'as', 'the', 'auto_correlation', 'it', 'is', 'less', 'likely', 'that', 'the', 'sequence', 'is', 'generated', 'randomly', '.', ':', 'param', 'sequence', ':', 'any', 'iterable', 'with', 'at', 'most', '2', 'values', 'that', 'can', 'be', 'turned', 'into', 'a', 'float', 'via', 'np', '.', 'float', '.', 'e', '.', 'g', '.', '1001001', '[', '1', '0', '1', '0', '1', ']', '[', '1', '.', '2', '.', '1', '.', '5', '1', ']', ':', 'rtype', ':', 'returns', 'a', 'dict', 'of', 'the', 'linear', 'regression', 'stats', 'of', 'sequence', '[', '1', ':', ']', 'vs', '.', 'sequence', '[', ':', '-', '1', ']'] | train | https://github.com/brentp/skidmarks/blob/f63b9f1b822cb47991215b655155b5041e86ea39/skidmarks.py#L102-L129 |
7,796 | glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/transforms/transform_system.py | TransformSystem.dpi | def dpi(self):
""" Physical resolution of the document coordinate system (dots per
inch).
"""
if self._dpi is None:
if self._canvas is None:
return None
else:
return self.canvas.dpi
else:
return self._dpi | python | def dpi(self):
""" Physical resolution of the document coordinate system (dots per
inch).
"""
if self._dpi is None:
if self._canvas is None:
return None
else:
return self.canvas.dpi
else:
return self._dpi | ['def', 'dpi', '(', 'self', ')', ':', 'if', 'self', '.', '_dpi', 'is', 'None', ':', 'if', 'self', '.', '_canvas', 'is', 'None', ':', 'return', 'None', 'else', ':', 'return', 'self', '.', 'canvas', '.', 'dpi', 'else', ':', 'return', 'self', '.', '_dpi'] | Physical resolution of the document coordinate system (dots per
inch). | ['Physical', 'resolution', 'of', 'the', 'document', 'coordinate', 'system', '(', 'dots', 'per', 'inch', ')', '.'] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/transform_system.py#L241-L251 |
7,797 | materialsproject/pymatgen | pymatgen/analysis/path_finder.py | StaticPotential.rescale_field | def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear
interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys
periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally
are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0, 1), (0, 1), (0, 1)), mode='wrap')
ogrid_list = np.array([list(c) for c in list(
np.ndindex(v_dim[0] + 1, v_dim[1] + 1, v_dim[2] + 1))])
v_ogrid = padded_v.reshape(
((v_dim[0] + 1) * (v_dim[1] + 1) * (v_dim[2] + 1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[0: v_dim[0]: v_dim[0] / new_dim[0],
0: v_dim[1]: v_dim[1] / new_dim[1],
0: v_dim[2]: v_dim[2] / new_dim[2]]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid,
(ngrid_a, ngrid_b, ngrid_c),
method='linear').reshape(
(new_dim[0], new_dim[1], new_dim[2]))
self.__v = v_ngrid | python | def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear
interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys
periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally
are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0, 1), (0, 1), (0, 1)), mode='wrap')
ogrid_list = np.array([list(c) for c in list(
np.ndindex(v_dim[0] + 1, v_dim[1] + 1, v_dim[2] + 1))])
v_ogrid = padded_v.reshape(
((v_dim[0] + 1) * (v_dim[1] + 1) * (v_dim[2] + 1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[0: v_dim[0]: v_dim[0] / new_dim[0],
0: v_dim[1]: v_dim[1] / new_dim[1],
0: v_dim[2]: v_dim[2] / new_dim[2]]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid,
(ngrid_a, ngrid_b, ngrid_c),
method='linear').reshape(
(new_dim[0], new_dim[1], new_dim[2]))
self.__v = v_ngrid | ['def', 'rescale_field', '(', 'self', ',', 'new_dim', ')', ':', 'v_dim', '=', 'self', '.', '__v', '.', 'shape', 'padded_v', '=', 'np', '.', 'lib', '.', 'pad', '(', 'self', '.', '__v', ',', '(', '(', '0', ',', '1', ')', ',', '(', '0', ',', '1', ')', ',', '(', '0', ',', '1', ')', ')', ',', 'mode', '=', "'wrap'", ')', 'ogrid_list', '=', 'np', '.', 'array', '(', '[', 'list', '(', 'c', ')', 'for', 'c', 'in', 'list', '(', 'np', '.', 'ndindex', '(', 'v_dim', '[', '0', ']', '+', '1', ',', 'v_dim', '[', '1', ']', '+', '1', ',', 'v_dim', '[', '2', ']', '+', '1', ')', ')', ']', ')', 'v_ogrid', '=', 'padded_v', '.', 'reshape', '(', '(', '(', 'v_dim', '[', '0', ']', '+', '1', ')', '*', '(', 'v_dim', '[', '1', ']', '+', '1', ')', '*', '(', 'v_dim', '[', '2', ']', '+', '1', ')', ',', '-', '1', ')', ')', 'ngrid_a', ',', 'ngrid_b', ',', 'ngrid_c', '=', 'np', '.', 'mgrid', '[', '0', ':', 'v_dim', '[', '0', ']', ':', 'v_dim', '[', '0', ']', '/', 'new_dim', '[', '0', ']', ',', '0', ':', 'v_dim', '[', '1', ']', ':', 'v_dim', '[', '1', ']', '/', 'new_dim', '[', '1', ']', ',', '0', ':', 'v_dim', '[', '2', ']', ':', 'v_dim', '[', '2', ']', '/', 'new_dim', '[', '2', ']', ']', 'v_ngrid', '=', 'scipy', '.', 'interpolate', '.', 'griddata', '(', 'ogrid_list', ',', 'v_ogrid', ',', '(', 'ngrid_a', ',', 'ngrid_b', ',', 'ngrid_c', ')', ',', 'method', '=', "'linear'", ')', '.', 'reshape', '(', '(', 'new_dim', '[', '0', ']', ',', 'new_dim', '[', '1', ']', ',', 'new_dim', '[', '2', ']', ')', ')', 'self', '.', '__v', '=', 'v_ngrid'] | Changes the discretization of the potential field by linear
interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys
periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally
are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid | ['Changes', 'the', 'discretization', 'of', 'the', 'potential', 'field', 'by', 'linear', 'interpolation', '.', 'This', 'is', 'necessary', 'if', 'the', 'potential', 'field', 'obtained', 'from', 'DFT', 'is', 'strangely', 'skewed', 'or', 'is', 'too', 'fine', 'or', 'coarse', '.', 'Obeys', 'periodic', 'boundary', 'conditions', 'at', 'the', 'edges', 'of', 'the', 'cell', '.', 'Alternatively', 'useful', 'for', 'mixing', 'potentials', 'that', 'originally', 'are', 'on', 'different', 'grids', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/path_finder.py#L285-L310 |
7,798 | nion-software/nionswift | nion/swift/Facade.py | ViewTask.close | def close(self) -> None:
"""Close the task.
.. versionadded:: 1.0
This method must be called when the task is no longer needed.
"""
self.__data_channel_buffer.stop()
self.__data_channel_buffer.close()
self.__data_channel_buffer = None
if not self.__was_playing:
self.__hardware_source.stop_playing() | python | def close(self) -> None:
"""Close the task.
.. versionadded:: 1.0
This method must be called when the task is no longer needed.
"""
self.__data_channel_buffer.stop()
self.__data_channel_buffer.close()
self.__data_channel_buffer = None
if not self.__was_playing:
self.__hardware_source.stop_playing() | ['def', 'close', '(', 'self', ')', '->', 'None', ':', 'self', '.', '__data_channel_buffer', '.', 'stop', '(', ')', 'self', '.', '__data_channel_buffer', '.', 'close', '(', ')', 'self', '.', '__data_channel_buffer', '=', 'None', 'if', 'not', 'self', '.', '__was_playing', ':', 'self', '.', '__hardware_source', '.', 'stop_playing', '(', ')'] | Close the task.
.. versionadded:: 1.0
This method must be called when the task is no longer needed. | ['Close', 'the', 'task', '.'] | train | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L1613-L1624 |
7,799 | romanorac/discomll | discomll/ensemble/core/k_medoids.py | fit | def fit(sim_mat, D_len, cidx):
"""
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters
"""
min_energy = np.inf
for j in range(3):
# select indices in each sample that maximizes its dimension
inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat]
cidx = []
energy = 0 # current enengy
for i in np.unique(inds):
indsi = np.where(inds == i)[0] # find indices for every cluster
minind, min_value = 0, 0
for index, idy in enumerate(indsi):
if idy in sim_mat:
# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])
value = 0
for idx in indsi:
value += sim_mat[idy].get(idx, 0)
if value < min_value:
minind, min_value = index, value
energy += min_value
cidx.append(indsi[minind]) # new centers
if energy < min_energy:
min_energy, inds_min, cidx_min = energy, inds, cidx
return inds_min, cidx_min | python | def fit(sim_mat, D_len, cidx):
"""
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters
"""
min_energy = np.inf
for j in range(3):
# select indices in each sample that maximizes its dimension
inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat]
cidx = []
energy = 0 # current enengy
for i in np.unique(inds):
indsi = np.where(inds == i)[0] # find indices for every cluster
minind, min_value = 0, 0
for index, idy in enumerate(indsi):
if idy in sim_mat:
# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])
value = 0
for idx in indsi:
value += sim_mat[idy].get(idx, 0)
if value < min_value:
minind, min_value = index, value
energy += min_value
cidx.append(indsi[minind]) # new centers
if energy < min_energy:
min_energy, inds_min, cidx_min = energy, inds, cidx
return inds_min, cidx_min | ['def', 'fit', '(', 'sim_mat', ',', 'D_len', ',', 'cidx', ')', ':', 'min_energy', '=', 'np', '.', 'inf', 'for', 'j', 'in', 'range', '(', '3', ')', ':', '# select indices in each sample that maximizes its dimension', 'inds', '=', '[', 'np', '.', 'argmin', '(', '[', 'sim_mat', '[', 'idy', ']', '.', 'get', '(', 'idx', ',', '0', ')', 'for', 'idx', 'in', 'cidx', ']', ')', 'for', 'idy', 'in', 'range', '(', 'D_len', ')', 'if', 'idy', 'in', 'sim_mat', ']', 'cidx', '=', '[', ']', 'energy', '=', '0', '# current enengy', 'for', 'i', 'in', 'np', '.', 'unique', '(', 'inds', ')', ':', 'indsi', '=', 'np', '.', 'where', '(', 'inds', '==', 'i', ')', '[', '0', ']', '# find indices for every cluster', 'minind', ',', 'min_value', '=', '0', ',', '0', 'for', 'index', ',', 'idy', 'in', 'enumerate', '(', 'indsi', ')', ':', 'if', 'idy', 'in', 'sim_mat', ':', '# value = sum([sim_mat[idy].get(idx,0) for idx in indsi])', 'value', '=', '0', 'for', 'idx', 'in', 'indsi', ':', 'value', '+=', 'sim_mat', '[', 'idy', ']', '.', 'get', '(', 'idx', ',', '0', ')', 'if', 'value', '<', 'min_value', ':', 'minind', ',', 'min_value', '=', 'index', ',', 'value', 'energy', '+=', 'min_value', 'cidx', '.', 'append', '(', 'indsi', '[', 'minind', ']', ')', '# new centers', 'if', 'energy', '<', 'min_energy', ':', 'min_energy', ',', 'inds_min', ',', 'cidx_min', '=', 'energy', ',', 'inds', ',', 'cidx', 'return', 'inds_min', ',', 'cidx_min'] | Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.
D: numpy array - Symmetric distance matrix
k: int - number of clusters | ['Algorithm', 'maximizes', 'energy', 'between', 'clusters', 'which', 'is', 'distinction', 'in', 'this', 'algorithm', '.', 'Distance', 'matrix', 'contains', 'mostly', '0', 'which', 'are', 'overlooked', 'due', 'to', 'search', 'of', 'maximal', 'distances', '.', 'Algorithm', 'does', 'not', 'try', 'to', 'retain', 'k', 'clusters', '.'] | train | https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/k_medoids.py#L8-L41 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.