repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_code_tokens
sequencelengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
sequencelengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
jborean93/smbprotocol | smbprotocol/open.py | Open.query_directory | def query_directory(self, pattern, file_information_class, flags=None,
file_index=0, max_output=65536, send=True):
"""
Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message
"""
query = SMB2QueryDirectoryRequest()
query['file_information_class'] = file_information_class
query['flags'] = flags
query['file_index'] = file_index
query['file_id'] = self.file_id
query['output_buffer_length'] = max_output
query['buffer'] = pattern.encode('utf-16-le')
if not send:
return query, self._query_directory_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Query "
"Directory Request for directory %s"
% (self.tree_connect.session.username,
self.tree_connect.share_name, self.file_name))
log.debug(str(query))
request = self.connection.send(query,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._query_directory_response(request) | python | def query_directory(self, pattern, file_information_class, flags=None,
file_index=0, max_output=65536, send=True):
"""
Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message
"""
query = SMB2QueryDirectoryRequest()
query['file_information_class'] = file_information_class
query['flags'] = flags
query['file_index'] = file_index
query['file_id'] = self.file_id
query['output_buffer_length'] = max_output
query['buffer'] = pattern.encode('utf-16-le')
if not send:
return query, self._query_directory_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Query "
"Directory Request for directory %s"
% (self.tree_connect.session.username,
self.tree_connect.share_name, self.file_name))
log.debug(str(query))
request = self.connection.send(query,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._query_directory_response(request) | [
"def",
"query_directory",
"(",
"self",
",",
"pattern",
",",
"file_information_class",
",",
"flags",
"=",
"None",
",",
"file_index",
"=",
"0",
",",
"max_output",
"=",
"65536",
",",
"send",
"=",
"True",
")",
":",
"query",
"=",
"SMB2QueryDirectoryRequest",
"(",
")",
"query",
"[",
"'file_information_class'",
"]",
"=",
"file_information_class",
"query",
"[",
"'flags'",
"]",
"=",
"flags",
"query",
"[",
"'file_index'",
"]",
"=",
"file_index",
"query",
"[",
"'file_id'",
"]",
"=",
"self",
".",
"file_id",
"query",
"[",
"'output_buffer_length'",
"]",
"=",
"max_output",
"query",
"[",
"'buffer'",
"]",
"=",
"pattern",
".",
"encode",
"(",
"'utf-16-le'",
")",
"if",
"not",
"send",
":",
"return",
"query",
",",
"self",
".",
"_query_directory_response",
"log",
".",
"info",
"(",
"\"Session: %s, Tree Connect: %s - sending SMB2 Query \"",
"\"Directory Request for directory %s\"",
"%",
"(",
"self",
".",
"tree_connect",
".",
"session",
".",
"username",
",",
"self",
".",
"tree_connect",
".",
"share_name",
",",
"self",
".",
"file_name",
")",
")",
"log",
".",
"debug",
"(",
"str",
"(",
"query",
")",
")",
"request",
"=",
"self",
".",
"connection",
".",
"send",
"(",
"query",
",",
"self",
".",
"tree_connect",
".",
"session",
".",
"session_id",
",",
"self",
".",
"tree_connect",
".",
"tree_connect_id",
")",
"return",
"self",
".",
"_query_directory_response",
"(",
"request",
")"
] | Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message | [
"Run",
"a",
"Query",
"/",
"Find",
"on",
"an",
"opened",
"directory",
"based",
"on",
"the",
"params",
"passed",
"in",
"."
] | train | https://github.com/jborean93/smbprotocol/blob/d8eb00fbc824f97d0f4946e3f768c5e6c723499a/smbprotocol/open.py#L1230-L1276 |
jborean93/smbprotocol | smbprotocol/open.py | Open.close | def close(self, get_attributes=False, send=True):
"""
Closes an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2CloseRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param get_attributes: (Bool) whether to get the latest attributes on
the close and set them on the Open object
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: SMB2CloseResponse message received from the server
"""
# it is already closed and this isn't for an out of band request
if not self._connected and send:
return
close = SMB2CloseRequest()
close['file_id'] = self.file_id
if get_attributes:
close['flags'] = CloseFlags.SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB
if not send:
return close, self._close_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Close Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(close))
request = self.connection.send(close,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._close_response(request) | python | def close(self, get_attributes=False, send=True):
"""
Closes an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2CloseRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param get_attributes: (Bool) whether to get the latest attributes on
the close and set them on the Open object
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: SMB2CloseResponse message received from the server
"""
# it is already closed and this isn't for an out of band request
if not self._connected and send:
return
close = SMB2CloseRequest()
close['file_id'] = self.file_id
if get_attributes:
close['flags'] = CloseFlags.SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB
if not send:
return close, self._close_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Close Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(close))
request = self.connection.send(close,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._close_response(request) | [
"def",
"close",
"(",
"self",
",",
"get_attributes",
"=",
"False",
",",
"send",
"=",
"True",
")",
":",
"# it is already closed and this isn't for an out of band request",
"if",
"not",
"self",
".",
"_connected",
"and",
"send",
":",
"return",
"close",
"=",
"SMB2CloseRequest",
"(",
")",
"close",
"[",
"'file_id'",
"]",
"=",
"self",
".",
"file_id",
"if",
"get_attributes",
":",
"close",
"[",
"'flags'",
"]",
"=",
"CloseFlags",
".",
"SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB",
"if",
"not",
"send",
":",
"return",
"close",
",",
"self",
".",
"_close_response",
"log",
".",
"info",
"(",
"\"Session: %s, Tree Connect: %s - sending SMB2 Close Request \"",
"\"for file %s\"",
"%",
"(",
"self",
".",
"tree_connect",
".",
"session",
".",
"username",
",",
"self",
".",
"tree_connect",
".",
"share_name",
",",
"self",
".",
"file_name",
")",
")",
"log",
".",
"debug",
"(",
"str",
"(",
"close",
")",
")",
"request",
"=",
"self",
".",
"connection",
".",
"send",
"(",
"close",
",",
"self",
".",
"tree_connect",
".",
"session",
".",
"session_id",
",",
"self",
".",
"tree_connect",
".",
"tree_connect_id",
")",
"return",
"self",
".",
"_close_response",
"(",
"request",
")"
] | Closes an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2CloseRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param get_attributes: (Bool) whether to get the latest attributes on
the close and set them on the Open object
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: SMB2CloseResponse message received from the server | [
"Closes",
"an",
"opened",
"file",
"."
] | train | https://github.com/jborean93/smbprotocol/blob/d8eb00fbc824f97d0f4946e3f768c5e6c723499a/smbprotocol/open.py#L1294-L1331 |
jborean93/smbprotocol | smbprotocol/security_descriptor.py | SIDPacket.from_string | def from_string(self, sid_string):
"""
Used to set the structure parameters based on the input string
:param sid_string: String of the sid in S-x-x-x-x form
"""
if not sid_string.startswith("S-"):
raise ValueError("A SID string must start with S-")
sid_entries = sid_string.split("-")
if len(sid_entries) < 3:
raise ValueError("A SID string must start with S and contain a "
"revision and identifier authority, e.g. S-1-0")
revision = int(sid_entries[1])
id_authority = int(sid_entries[2])
sub_authorities = [int(i) for i in sid_entries[3:]]
self['revision'].set_value(revision)
self['identifier_authority'].set_value(id_authority)
self['sub_authorities'] = sub_authorities | python | def from_string(self, sid_string):
"""
Used to set the structure parameters based on the input string
:param sid_string: String of the sid in S-x-x-x-x form
"""
if not sid_string.startswith("S-"):
raise ValueError("A SID string must start with S-")
sid_entries = sid_string.split("-")
if len(sid_entries) < 3:
raise ValueError("A SID string must start with S and contain a "
"revision and identifier authority, e.g. S-1-0")
revision = int(sid_entries[1])
id_authority = int(sid_entries[2])
sub_authorities = [int(i) for i in sid_entries[3:]]
self['revision'].set_value(revision)
self['identifier_authority'].set_value(id_authority)
self['sub_authorities'] = sub_authorities | [
"def",
"from_string",
"(",
"self",
",",
"sid_string",
")",
":",
"if",
"not",
"sid_string",
".",
"startswith",
"(",
"\"S-\"",
")",
":",
"raise",
"ValueError",
"(",
"\"A SID string must start with S-\"",
")",
"sid_entries",
"=",
"sid_string",
".",
"split",
"(",
"\"-\"",
")",
"if",
"len",
"(",
"sid_entries",
")",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"\"A SID string must start with S and contain a \"",
"\"revision and identifier authority, e.g. S-1-0\"",
")",
"revision",
"=",
"int",
"(",
"sid_entries",
"[",
"1",
"]",
")",
"id_authority",
"=",
"int",
"(",
"sid_entries",
"[",
"2",
"]",
")",
"sub_authorities",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"sid_entries",
"[",
"3",
":",
"]",
"]",
"self",
"[",
"'revision'",
"]",
".",
"set_value",
"(",
"revision",
")",
"self",
"[",
"'identifier_authority'",
"]",
".",
"set_value",
"(",
"id_authority",
")",
"self",
"[",
"'sub_authorities'",
"]",
"=",
"sub_authorities"
] | Used to set the structure parameters based on the input string
:param sid_string: String of the sid in S-x-x-x-x form | [
"Used",
"to",
"set",
"the",
"structure",
"parameters",
"based",
"on",
"the",
"input",
"string"
] | train | https://github.com/jborean93/smbprotocol/blob/d8eb00fbc824f97d0f4946e3f768c5e6c723499a/smbprotocol/security_descriptor.py#L160-L180 |
Thibauth/python-pushover | pushover.py | MessageRequest.poll | def poll(self):
"""If the message request has a priority of 2, Pushover keeps sending
the same notification until the client acknowledges it. Calling the
:func:`poll` function fetches the status of the :class:`MessageRequest`
object until the notifications either expires, is acknowledged by the
client, or the callback url is reached. The status is available in the
``status`` dictionary.
Returns ``True`` when the request has expired or been acknowledged and
``False`` otherwise so that a typical handling of a priority-2
notification can look like this::
request = p.message("Urgent!", priority=2, expire=120, retry=60)
while not request.poll():
# do something
time.sleep(5)
print request.status
"""
if not self.status["done"]:
r = Request("get", self.url + ".json", {"token": self.payload["token"]})
for param, when in MessageRequest.params.iteritems():
self.status[param] = bool(r.answer[param])
self.status[when] = int(r.answer[when])
for param in ["acknowledged_by", "acknowledged_by_device"]:
self.status[param] = r.answer[param]
self.status["last_delivered_at"] = int(r.answer["last_delivered_at"])
if any(self.status[param] for param in MessageRequest.params):
self.status["done"] = True
return self.status["done"] | python | def poll(self):
"""If the message request has a priority of 2, Pushover keeps sending
the same notification until the client acknowledges it. Calling the
:func:`poll` function fetches the status of the :class:`MessageRequest`
object until the notifications either expires, is acknowledged by the
client, or the callback url is reached. The status is available in the
``status`` dictionary.
Returns ``True`` when the request has expired or been acknowledged and
``False`` otherwise so that a typical handling of a priority-2
notification can look like this::
request = p.message("Urgent!", priority=2, expire=120, retry=60)
while not request.poll():
# do something
time.sleep(5)
print request.status
"""
if not self.status["done"]:
r = Request("get", self.url + ".json", {"token": self.payload["token"]})
for param, when in MessageRequest.params.iteritems():
self.status[param] = bool(r.answer[param])
self.status[when] = int(r.answer[when])
for param in ["acknowledged_by", "acknowledged_by_device"]:
self.status[param] = r.answer[param]
self.status["last_delivered_at"] = int(r.answer["last_delivered_at"])
if any(self.status[param] for param in MessageRequest.params):
self.status["done"] = True
return self.status["done"] | [
"def",
"poll",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"status",
"[",
"\"done\"",
"]",
":",
"r",
"=",
"Request",
"(",
"\"get\"",
",",
"self",
".",
"url",
"+",
"\".json\"",
",",
"{",
"\"token\"",
":",
"self",
".",
"payload",
"[",
"\"token\"",
"]",
"}",
")",
"for",
"param",
",",
"when",
"in",
"MessageRequest",
".",
"params",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"status",
"[",
"param",
"]",
"=",
"bool",
"(",
"r",
".",
"answer",
"[",
"param",
"]",
")",
"self",
".",
"status",
"[",
"when",
"]",
"=",
"int",
"(",
"r",
".",
"answer",
"[",
"when",
"]",
")",
"for",
"param",
"in",
"[",
"\"acknowledged_by\"",
",",
"\"acknowledged_by_device\"",
"]",
":",
"self",
".",
"status",
"[",
"param",
"]",
"=",
"r",
".",
"answer",
"[",
"param",
"]",
"self",
".",
"status",
"[",
"\"last_delivered_at\"",
"]",
"=",
"int",
"(",
"r",
".",
"answer",
"[",
"\"last_delivered_at\"",
"]",
")",
"if",
"any",
"(",
"self",
".",
"status",
"[",
"param",
"]",
"for",
"param",
"in",
"MessageRequest",
".",
"params",
")",
":",
"self",
".",
"status",
"[",
"\"done\"",
"]",
"=",
"True",
"return",
"self",
".",
"status",
"[",
"\"done\"",
"]"
] | If the message request has a priority of 2, Pushover keeps sending
the same notification until the client acknowledges it. Calling the
:func:`poll` function fetches the status of the :class:`MessageRequest`
object until the notifications either expires, is acknowledged by the
client, or the callback url is reached. The status is available in the
``status`` dictionary.
Returns ``True`` when the request has expired or been acknowledged and
``False`` otherwise so that a typical handling of a priority-2
notification can look like this::
request = p.message("Urgent!", priority=2, expire=120, retry=60)
while not request.poll():
# do something
time.sleep(5)
print request.status | [
"If",
"the",
"message",
"request",
"has",
"a",
"priority",
"of",
"2",
"Pushover",
"keeps",
"sending",
"the",
"same",
"notification",
"until",
"the",
"client",
"acknowledges",
"it",
".",
"Calling",
"the",
":",
"func",
":",
"poll",
"function",
"fetches",
"the",
"status",
"of",
"the",
":",
"class",
":",
"MessageRequest",
"object",
"until",
"the",
"notifications",
"either",
"expires",
"is",
"acknowledged",
"by",
"the",
"client",
"or",
"the",
"callback",
"url",
"is",
"reached",
".",
"The",
"status",
"is",
"available",
"in",
"the",
"status",
"dictionary",
"."
] | train | https://github.com/Thibauth/python-pushover/blob/420bde9a2bd7981b5ea8f0c1cb8875d5f676f368/pushover.py#L90-L119 |
Thibauth/python-pushover | pushover.py | Pushover.sounds | def sounds(self):
"""Return a dictionary of sounds recognized by Pushover and that can be
used in a notification message.
"""
if not Pushover._SOUNDS:
request = Request("get", SOUND_URL, {"token": self.token})
Pushover._SOUNDS = request.answer["sounds"]
return Pushover._SOUNDS | python | def sounds(self):
"""Return a dictionary of sounds recognized by Pushover and that can be
used in a notification message.
"""
if not Pushover._SOUNDS:
request = Request("get", SOUND_URL, {"token": self.token})
Pushover._SOUNDS = request.answer["sounds"]
return Pushover._SOUNDS | [
"def",
"sounds",
"(",
"self",
")",
":",
"if",
"not",
"Pushover",
".",
"_SOUNDS",
":",
"request",
"=",
"Request",
"(",
"\"get\"",
",",
"SOUND_URL",
",",
"{",
"\"token\"",
":",
"self",
".",
"token",
"}",
")",
"Pushover",
".",
"_SOUNDS",
"=",
"request",
".",
"answer",
"[",
"\"sounds\"",
"]",
"return",
"Pushover",
".",
"_SOUNDS"
] | Return a dictionary of sounds recognized by Pushover and that can be
used in a notification message. | [
"Return",
"a",
"dictionary",
"of",
"sounds",
"recognized",
"by",
"Pushover",
"and",
"that",
"can",
"be",
"used",
"in",
"a",
"notification",
"message",
"."
] | train | https://github.com/Thibauth/python-pushover/blob/420bde9a2bd7981b5ea8f0c1cb8875d5f676f368/pushover.py#L163-L170 |
Thibauth/python-pushover | pushover.py | Pushover.verify | def verify(self, user, device=None):
"""Verify that the `user` and optional `device` exist. Returns
`None` when the user/device does not exist or a list of the user's
devices otherwise.
"""
payload = {"user": user, "token": self.token}
if device:
payload["device"] = device
try:
request = Request("post", USER_URL, payload)
except RequestError:
return None
else:
return request.answer["devices"] | python | def verify(self, user, device=None):
"""Verify that the `user` and optional `device` exist. Returns
`None` when the user/device does not exist or a list of the user's
devices otherwise.
"""
payload = {"user": user, "token": self.token}
if device:
payload["device"] = device
try:
request = Request("post", USER_URL, payload)
except RequestError:
return None
else:
return request.answer["devices"] | [
"def",
"verify",
"(",
"self",
",",
"user",
",",
"device",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"\"user\"",
":",
"user",
",",
"\"token\"",
":",
"self",
".",
"token",
"}",
"if",
"device",
":",
"payload",
"[",
"\"device\"",
"]",
"=",
"device",
"try",
":",
"request",
"=",
"Request",
"(",
"\"post\"",
",",
"USER_URL",
",",
"payload",
")",
"except",
"RequestError",
":",
"return",
"None",
"else",
":",
"return",
"request",
".",
"answer",
"[",
"\"devices\"",
"]"
] | Verify that the `user` and optional `device` exist. Returns
`None` when the user/device does not exist or a list of the user's
devices otherwise. | [
"Verify",
"that",
"the",
"user",
"and",
"optional",
"device",
"exist",
".",
"Returns",
"None",
"when",
"the",
"user",
"/",
"device",
"does",
"not",
"exist",
"or",
"a",
"list",
"of",
"the",
"user",
"s",
"devices",
"otherwise",
"."
] | train | https://github.com/Thibauth/python-pushover/blob/420bde9a2bd7981b5ea8f0c1cb8875d5f676f368/pushover.py#L172-L185 |
Thibauth/python-pushover | pushover.py | Pushover.message | def message(self, user, message, **kwargs):
"""Send `message` to the user specified by `user`. It is possible
to specify additional properties of the message by passing keyword
arguments. The list of valid keywords is ``title, priority, sound,
callback, timestamp, url, url_title, device, retry, expire and html``
which are described in the Pushover API documentation.
For convenience, you can simply set ``timestamp=True`` to set the
timestamp to the current timestamp.
An image can be attached to a message by passing a file-like object
to the `attachment` keyword argument.
This method returns a :class:`MessageRequest` object.
"""
payload = {"message": message, "user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.message_keywords:
raise ValueError("{0}: invalid message parameter".format(key))
elif key == "timestamp" and value is True:
payload[key] = int(time.time())
elif key == "sound" and value not in self.sounds:
raise ValueError("{0}: invalid sound".format(value))
else:
payload[key] = value
return MessageRequest(payload) | python | def message(self, user, message, **kwargs):
"""Send `message` to the user specified by `user`. It is possible
to specify additional properties of the message by passing keyword
arguments. The list of valid keywords is ``title, priority, sound,
callback, timestamp, url, url_title, device, retry, expire and html``
which are described in the Pushover API documentation.
For convenience, you can simply set ``timestamp=True`` to set the
timestamp to the current timestamp.
An image can be attached to a message by passing a file-like object
to the `attachment` keyword argument.
This method returns a :class:`MessageRequest` object.
"""
payload = {"message": message, "user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.message_keywords:
raise ValueError("{0}: invalid message parameter".format(key))
elif key == "timestamp" and value is True:
payload[key] = int(time.time())
elif key == "sound" and value not in self.sounds:
raise ValueError("{0}: invalid sound".format(value))
else:
payload[key] = value
return MessageRequest(payload) | [
"def",
"message",
"(",
"self",
",",
"user",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"{",
"\"message\"",
":",
"message",
",",
"\"user\"",
":",
"user",
",",
"\"token\"",
":",
"self",
".",
"token",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"not",
"in",
"Pushover",
".",
"message_keywords",
":",
"raise",
"ValueError",
"(",
"\"{0}: invalid message parameter\"",
".",
"format",
"(",
"key",
")",
")",
"elif",
"key",
"==",
"\"timestamp\"",
"and",
"value",
"is",
"True",
":",
"payload",
"[",
"key",
"]",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"elif",
"key",
"==",
"\"sound\"",
"and",
"value",
"not",
"in",
"self",
".",
"sounds",
":",
"raise",
"ValueError",
"(",
"\"{0}: invalid sound\"",
".",
"format",
"(",
"value",
")",
")",
"else",
":",
"payload",
"[",
"key",
"]",
"=",
"value",
"return",
"MessageRequest",
"(",
"payload",
")"
] | Send `message` to the user specified by `user`. It is possible
to specify additional properties of the message by passing keyword
arguments. The list of valid keywords is ``title, priority, sound,
callback, timestamp, url, url_title, device, retry, expire and html``
which are described in the Pushover API documentation.
For convenience, you can simply set ``timestamp=True`` to set the
timestamp to the current timestamp.
An image can be attached to a message by passing a file-like object
to the `attachment` keyword argument.
This method returns a :class:`MessageRequest` object. | [
"Send",
"message",
"to",
"the",
"user",
"specified",
"by",
"user",
".",
"It",
"is",
"possible",
"to",
"specify",
"additional",
"properties",
"of",
"the",
"message",
"by",
"passing",
"keyword",
"arguments",
".",
"The",
"list",
"of",
"valid",
"keywords",
"is",
"title",
"priority",
"sound",
"callback",
"timestamp",
"url",
"url_title",
"device",
"retry",
"expire",
"and",
"html",
"which",
"are",
"described",
"in",
"the",
"Pushover",
"API",
"documentation",
"."
] | train | https://github.com/Thibauth/python-pushover/blob/420bde9a2bd7981b5ea8f0c1cb8875d5f676f368/pushover.py#L187-L214 |
Thibauth/python-pushover | pushover.py | Pushover.glance | def glance(self, user, **kwargs):
"""Send a glance to the user. The default property is ``text``, as this
is used on most glances, however a valid glance does not need to
require text and can be constructed using any combination of valid
keyword properties. The list of valid keywords is ``title, text,
subtext, count, percent and device`` which are described in the
Pushover Glance API documentation.
This method returns a :class:`GlanceRequest` object.
"""
payload = {"user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.glance_keywords:
raise ValueError("{0}: invalid glance parameter".format(key))
else:
payload[key] = value
return Request("post", GLANCE_URL, payload) | python | def glance(self, user, **kwargs):
"""Send a glance to the user. The default property is ``text``, as this
is used on most glances, however a valid glance does not need to
require text and can be constructed using any combination of valid
keyword properties. The list of valid keywords is ``title, text,
subtext, count, percent and device`` which are described in the
Pushover Glance API documentation.
This method returns a :class:`GlanceRequest` object.
"""
payload = {"user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.glance_keywords:
raise ValueError("{0}: invalid glance parameter".format(key))
else:
payload[key] = value
return Request("post", GLANCE_URL, payload) | [
"def",
"glance",
"(",
"self",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"{",
"\"user\"",
":",
"user",
",",
"\"token\"",
":",
"self",
".",
"token",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"not",
"in",
"Pushover",
".",
"glance_keywords",
":",
"raise",
"ValueError",
"(",
"\"{0}: invalid glance parameter\"",
".",
"format",
"(",
"key",
")",
")",
"else",
":",
"payload",
"[",
"key",
"]",
"=",
"value",
"return",
"Request",
"(",
"\"post\"",
",",
"GLANCE_URL",
",",
"payload",
")"
] | Send a glance to the user. The default property is ``text``, as this
is used on most glances, however a valid glance does not need to
require text and can be constructed using any combination of valid
keyword properties. The list of valid keywords is ``title, text,
subtext, count, percent and device`` which are described in the
Pushover Glance API documentation.
This method returns a :class:`GlanceRequest` object. | [
"Send",
"a",
"glance",
"to",
"the",
"user",
".",
"The",
"default",
"property",
"is",
"text",
"as",
"this",
"is",
"used",
"on",
"most",
"glances",
"however",
"a",
"valid",
"glance",
"does",
"not",
"need",
"to",
"require",
"text",
"and",
"can",
"be",
"constructed",
"using",
"any",
"combination",
"of",
"valid",
"keyword",
"properties",
".",
"The",
"list",
"of",
"valid",
"keywords",
"is",
"title",
"text",
"subtext",
"count",
"percent",
"and",
"device",
"which",
"are",
"described",
"in",
"the",
"Pushover",
"Glance",
"API",
"documentation",
"."
] | train | https://github.com/Thibauth/python-pushover/blob/420bde9a2bd7981b5ea8f0c1cb8875d5f676f368/pushover.py#L216-L234 |
Wramberg/adaptfilt | adaptfilt/misc.py | mswe | def mswe(w, v):
"""
Calculate mean squared weight error between estimated and true filter
coefficients, in respect to iterations.
Parameters
----------
v : array-like
True coefficients used to generate desired signal, must be a
one-dimensional array.
w : array-like
Estimated coefficients from adaptive filtering algorithm. Must be an
N x M matrix where N is the number of iterations, and M is the number
of filter coefficients.
Returns
-------
mswe : numpy.array
One-dimensional array containing the mean-squared weight error for
every iteration.
Raises
------
TypeError
If inputs have wrong dimensions
Note
----
To use this function with the adaptive filter functions set the optional
parameter returnCoeffs to True. This will return a coefficient matrix w
corresponding with the input-parameter w.
"""
# Ensure inputs are numpy arrays
w = np.array(w)
v = np.array(v)
# Check dimensions
if(len(w.shape) != 2):
raise TypeError('Estimated coefficients must be in NxM matrix')
if(len(v.shape) != 1):
raise TypeError('Real coefficients must be in 1d array')
# Ensure equal length between estimated and real coeffs
N, M = w.shape
L = v.size
if(M < L):
v = v[:-(L-M)]
elif(M > L):
v = np.concatenate((v, np.zeros(M-L)))
# Calculate and return MSWE
mswe = np.mean((w - v)**2, axis=1)
return mswe | python | def mswe(w, v):
"""
Calculate mean squared weight error between estimated and true filter
coefficients, in respect to iterations.
Parameters
----------
v : array-like
True coefficients used to generate desired signal, must be a
one-dimensional array.
w : array-like
Estimated coefficients from adaptive filtering algorithm. Must be an
N x M matrix where N is the number of iterations, and M is the number
of filter coefficients.
Returns
-------
mswe : numpy.array
One-dimensional array containing the mean-squared weight error for
every iteration.
Raises
------
TypeError
If inputs have wrong dimensions
Note
----
To use this function with the adaptive filter functions set the optional
parameter returnCoeffs to True. This will return a coefficient matrix w
corresponding with the input-parameter w.
"""
# Ensure inputs are numpy arrays
w = np.array(w)
v = np.array(v)
# Check dimensions
if(len(w.shape) != 2):
raise TypeError('Estimated coefficients must be in NxM matrix')
if(len(v.shape) != 1):
raise TypeError('Real coefficients must be in 1d array')
# Ensure equal length between estimated and real coeffs
N, M = w.shape
L = v.size
if(M < L):
v = v[:-(L-M)]
elif(M > L):
v = np.concatenate((v, np.zeros(M-L)))
# Calculate and return MSWE
mswe = np.mean((w - v)**2, axis=1)
return mswe | [
"def",
"mswe",
"(",
"w",
",",
"v",
")",
":",
"# Ensure inputs are numpy arrays",
"w",
"=",
"np",
".",
"array",
"(",
"w",
")",
"v",
"=",
"np",
".",
"array",
"(",
"v",
")",
"# Check dimensions",
"if",
"(",
"len",
"(",
"w",
".",
"shape",
")",
"!=",
"2",
")",
":",
"raise",
"TypeError",
"(",
"'Estimated coefficients must be in NxM matrix'",
")",
"if",
"(",
"len",
"(",
"v",
".",
"shape",
")",
"!=",
"1",
")",
":",
"raise",
"TypeError",
"(",
"'Real coefficients must be in 1d array'",
")",
"# Ensure equal length between estimated and real coeffs",
"N",
",",
"M",
"=",
"w",
".",
"shape",
"L",
"=",
"v",
".",
"size",
"if",
"(",
"M",
"<",
"L",
")",
":",
"v",
"=",
"v",
"[",
":",
"-",
"(",
"L",
"-",
"M",
")",
"]",
"elif",
"(",
"M",
">",
"L",
")",
":",
"v",
"=",
"np",
".",
"concatenate",
"(",
"(",
"v",
",",
"np",
".",
"zeros",
"(",
"M",
"-",
"L",
")",
")",
")",
"# Calculate and return MSWE",
"mswe",
"=",
"np",
".",
"mean",
"(",
"(",
"w",
"-",
"v",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"return",
"mswe"
] | Calculate mean squared weight error between estimated and true filter
coefficients, in respect to iterations.
Parameters
----------
v : array-like
True coefficients used to generate desired signal, must be a
one-dimensional array.
w : array-like
Estimated coefficients from adaptive filtering algorithm. Must be an
N x M matrix where N is the number of iterations, and M is the number
of filter coefficients.
Returns
-------
mswe : numpy.array
One-dimensional array containing the mean-squared weight error for
every iteration.
Raises
------
TypeError
If inputs have wrong dimensions
Note
----
To use this function with the adaptive filter functions set the optional
parameter returnCoeffs to True. This will return a coefficient matrix w
corresponding with the input-parameter w. | [
"Calculate",
"mean",
"squared",
"weight",
"error",
"between",
"estimated",
"and",
"true",
"filter",
"coefficients",
"in",
"respect",
"to",
"iterations",
"."
] | train | https://github.com/Wramberg/adaptfilt/blob/9bb943bb5e4162e10a8aaabfc68339b8fc06c11a/adaptfilt/misc.py#L7-L57 |
Wramberg/adaptfilt | adaptfilt/nlms.py | nlms | def nlms(u, d, M, step, eps=0.001, leak=0, initCoeffs=None, N=None,
returnCoeffs=False):
"""
Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u)-M+1
_pchk.checkIter(N, len(u)-M+1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M)
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y = np.zeros(N) # Filter output
e = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
leakstep = (1 - step*leak)
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration
# Perform filtering
for n in xrange(N):
x = np.flipud(u[n:n+M]) # Slice to get view of M latest datapoints
y[n] = np.dot(x, w)
e[n] = d[n+M-1] - y[n]
normFactor = 1./(np.dot(x, x) + eps)
w = leakstep * w + step * normFactor * x * e[n]
y[n] = np.dot(x, w)
if returnCoeffs:
W[n] = w
if returnCoeffs:
w = W
return y, e, w | python | def nlms(u, d, M, step, eps=0.001, leak=0, initCoeffs=None, N=None,
returnCoeffs=False):
"""
Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u)-M+1
_pchk.checkIter(N, len(u)-M+1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M)
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y = np.zeros(N) # Filter output
e = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
leakstep = (1 - step*leak)
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration
# Perform filtering
for n in xrange(N):
x = np.flipud(u[n:n+M]) # Slice to get view of M latest datapoints
y[n] = np.dot(x, w)
e[n] = d[n+M-1] - y[n]
normFactor = 1./(np.dot(x, x) + eps)
w = leakstep * w + step * normFactor * x * e[n]
y[n] = np.dot(x, w)
if returnCoeffs:
W[n] = w
if returnCoeffs:
w = W
return y, e, w | [
"def",
"nlms",
"(",
"u",
",",
"d",
",",
"M",
",",
"step",
",",
"eps",
"=",
"0.001",
",",
"leak",
"=",
"0",
",",
"initCoeffs",
"=",
"None",
",",
"N",
"=",
"None",
",",
"returnCoeffs",
"=",
"False",
")",
":",
"# Check epsilon",
"_pchk",
".",
"checkRegFactor",
"(",
"eps",
")",
"# Num taps check",
"_pchk",
".",
"checkNumTaps",
"(",
"M",
")",
"# Max iteration check",
"if",
"N",
"is",
"None",
":",
"N",
"=",
"len",
"(",
"u",
")",
"-",
"M",
"+",
"1",
"_pchk",
".",
"checkIter",
"(",
"N",
",",
"len",
"(",
"u",
")",
"-",
"M",
"+",
"1",
")",
"# Check len(d)",
"_pchk",
".",
"checkDesiredSignal",
"(",
"d",
",",
"N",
",",
"M",
")",
"# Step check",
"_pchk",
".",
"checkStep",
"(",
"step",
")",
"# Leakage check",
"_pchk",
".",
"checkLeakage",
"(",
"leak",
")",
"# Init. coeffs check",
"if",
"initCoeffs",
"is",
"None",
":",
"initCoeffs",
"=",
"np",
".",
"zeros",
"(",
"M",
")",
"else",
":",
"_pchk",
".",
"checkInitCoeffs",
"(",
"initCoeffs",
",",
"M",
")",
"# Initialization",
"y",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"# Filter output",
"e",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"# Error signal",
"w",
"=",
"initCoeffs",
"# Initial filter coeffs",
"leakstep",
"=",
"(",
"1",
"-",
"step",
"*",
"leak",
")",
"if",
"returnCoeffs",
":",
"W",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"M",
")",
")",
"# Matrix to hold coeffs for each iteration",
"# Perform filtering",
"for",
"n",
"in",
"xrange",
"(",
"N",
")",
":",
"x",
"=",
"np",
".",
"flipud",
"(",
"u",
"[",
"n",
":",
"n",
"+",
"M",
"]",
")",
"# Slice to get view of M latest datapoints",
"y",
"[",
"n",
"]",
"=",
"np",
".",
"dot",
"(",
"x",
",",
"w",
")",
"e",
"[",
"n",
"]",
"=",
"d",
"[",
"n",
"+",
"M",
"-",
"1",
"]",
"-",
"y",
"[",
"n",
"]",
"normFactor",
"=",
"1.",
"/",
"(",
"np",
".",
"dot",
"(",
"x",
",",
"x",
")",
"+",
"eps",
")",
"w",
"=",
"leakstep",
"*",
"w",
"+",
"step",
"*",
"normFactor",
"*",
"x",
"*",
"e",
"[",
"n",
"]",
"y",
"[",
"n",
"]",
"=",
"np",
".",
"dot",
"(",
"x",
",",
"w",
")",
"if",
"returnCoeffs",
":",
"W",
"[",
"n",
"]",
"=",
"w",
"if",
"returnCoeffs",
":",
"w",
"=",
"W",
"return",
"y",
",",
"e",
",",
"w"
] | Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True | [
"Perform",
"normalized",
"least",
"-",
"mean",
"-",
"squares",
"(",
"NLMS",
")",
"adaptive",
"filtering",
"on",
"u",
"to",
"minimize",
"error",
"given",
"by",
"e",
"=",
"d",
"-",
"y",
"where",
"y",
"is",
"the",
"output",
"of",
"the",
"adaptive",
"filter",
"."
] | train | https://github.com/Wramberg/adaptfilt/blob/9bb943bb5e4162e10a8aaabfc68339b8fc06c11a/adaptfilt/nlms.py#L5-L154 |
Wramberg/adaptfilt | adaptfilt/ap.py | ap | def ap(u, d, M, step, K, eps=0.001, leak=0, initCoeffs=None, N=None,
returnCoeffs=False):
"""
Perform affine projection (AP) adaptive filtering on u to minimize error
given by e=d-y, where y is the output of the adaptive filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
K : int
Projection order, must be integer larger than zero.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note: len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Check projection order
_pchk.checkProjectOrder(K)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u)-M-K+1
_pchk.checkIter(N, len(u)-M+1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M)
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y_out = np.zeros(N) # Filter output
e_out = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
I = np.identity(K) # Init. identity matrix for faster loop matrix inv.
epsI = eps * np.identity(K) # Init. epsilon identiy matrix
leakstep = (1 - step*leak)
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration
# Perform filtering
for n in xrange(N):
# Generate U matrix and D vector with current data
U = np.zeros((M, K))
for k in np.arange(K):
U[:, (K-k-1)] = u[n+k:n+M+k]
U = np.flipud(U)
D = np.flipud(d[n+M-1:n+M+K-1])
# Filter
y = np.dot(U.T, w)
e = D - y
y_out[n] = y[0]
e_out[n] = e[0]
# Normalization factor
normFactor = np.linalg.solve(epsI + np.dot(U.T, U), I)
# Naive alternative
# normFactor = np.linalg.inv(epsI + np.dot(U.T, U))
w = leakstep * w + step * np.dot(U, np.dot(normFactor, e))
if returnCoeffs:
W[n] = w
if returnCoeffs:
w = W
return y_out, e_out, w | python | def ap(u, d, M, step, K, eps=0.001, leak=0, initCoeffs=None, N=None,
returnCoeffs=False):
"""
Perform affine projection (AP) adaptive filtering on u to minimize error
given by e=d-y, where y is the output of the adaptive filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
K : int
Projection order, must be integer larger than zero.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note: len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Check projection order
_pchk.checkProjectOrder(K)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u)-M-K+1
_pchk.checkIter(N, len(u)-M+1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M)
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y_out = np.zeros(N) # Filter output
e_out = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
I = np.identity(K) # Init. identity matrix for faster loop matrix inv.
epsI = eps * np.identity(K) # Init. epsilon identiy matrix
leakstep = (1 - step*leak)
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration
# Perform filtering
for n in xrange(N):
# Generate U matrix and D vector with current data
U = np.zeros((M, K))
for k in np.arange(K):
U[:, (K-k-1)] = u[n+k:n+M+k]
U = np.flipud(U)
D = np.flipud(d[n+M-1:n+M+K-1])
# Filter
y = np.dot(U.T, w)
e = D - y
y_out[n] = y[0]
e_out[n] = e[0]
# Normalization factor
normFactor = np.linalg.solve(epsI + np.dot(U.T, U), I)
# Naive alternative
# normFactor = np.linalg.inv(epsI + np.dot(U.T, U))
w = leakstep * w + step * np.dot(U, np.dot(normFactor, e))
if returnCoeffs:
W[n] = w
if returnCoeffs:
w = W
return y_out, e_out, w | [
"def",
"ap",
"(",
"u",
",",
"d",
",",
"M",
",",
"step",
",",
"K",
",",
"eps",
"=",
"0.001",
",",
"leak",
"=",
"0",
",",
"initCoeffs",
"=",
"None",
",",
"N",
"=",
"None",
",",
"returnCoeffs",
"=",
"False",
")",
":",
"# Check epsilon",
"_pchk",
".",
"checkRegFactor",
"(",
"eps",
")",
"# Check projection order",
"_pchk",
".",
"checkProjectOrder",
"(",
"K",
")",
"# Num taps check",
"_pchk",
".",
"checkNumTaps",
"(",
"M",
")",
"# Max iteration check",
"if",
"N",
"is",
"None",
":",
"N",
"=",
"len",
"(",
"u",
")",
"-",
"M",
"-",
"K",
"+",
"1",
"_pchk",
".",
"checkIter",
"(",
"N",
",",
"len",
"(",
"u",
")",
"-",
"M",
"+",
"1",
")",
"# Check len(d)",
"_pchk",
".",
"checkDesiredSignal",
"(",
"d",
",",
"N",
",",
"M",
")",
"# Step check",
"_pchk",
".",
"checkStep",
"(",
"step",
")",
"# Leakage check",
"_pchk",
".",
"checkLeakage",
"(",
"leak",
")",
"# Init. coeffs check",
"if",
"initCoeffs",
"is",
"None",
":",
"initCoeffs",
"=",
"np",
".",
"zeros",
"(",
"M",
")",
"else",
":",
"_pchk",
".",
"checkInitCoeffs",
"(",
"initCoeffs",
",",
"M",
")",
"# Initialization",
"y_out",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"# Filter output",
"e_out",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"# Error signal",
"w",
"=",
"initCoeffs",
"# Initial filter coeffs",
"I",
"=",
"np",
".",
"identity",
"(",
"K",
")",
"# Init. identity matrix for faster loop matrix inv.",
"epsI",
"=",
"eps",
"*",
"np",
".",
"identity",
"(",
"K",
")",
"# Init. epsilon identiy matrix",
"leakstep",
"=",
"(",
"1",
"-",
"step",
"*",
"leak",
")",
"if",
"returnCoeffs",
":",
"W",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"M",
")",
")",
"# Matrix to hold coeffs for each iteration",
"# Perform filtering",
"for",
"n",
"in",
"xrange",
"(",
"N",
")",
":",
"# Generate U matrix and D vector with current data",
"U",
"=",
"np",
".",
"zeros",
"(",
"(",
"M",
",",
"K",
")",
")",
"for",
"k",
"in",
"np",
".",
"arange",
"(",
"K",
")",
":",
"U",
"[",
":",
",",
"(",
"K",
"-",
"k",
"-",
"1",
")",
"]",
"=",
"u",
"[",
"n",
"+",
"k",
":",
"n",
"+",
"M",
"+",
"k",
"]",
"U",
"=",
"np",
".",
"flipud",
"(",
"U",
")",
"D",
"=",
"np",
".",
"flipud",
"(",
"d",
"[",
"n",
"+",
"M",
"-",
"1",
":",
"n",
"+",
"M",
"+",
"K",
"-",
"1",
"]",
")",
"# Filter",
"y",
"=",
"np",
".",
"dot",
"(",
"U",
".",
"T",
",",
"w",
")",
"e",
"=",
"D",
"-",
"y",
"y_out",
"[",
"n",
"]",
"=",
"y",
"[",
"0",
"]",
"e_out",
"[",
"n",
"]",
"=",
"e",
"[",
"0",
"]",
"# Normalization factor",
"normFactor",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"epsI",
"+",
"np",
".",
"dot",
"(",
"U",
".",
"T",
",",
"U",
")",
",",
"I",
")",
"# Naive alternative",
"# normFactor = np.linalg.inv(epsI + np.dot(U.T, U))",
"w",
"=",
"leakstep",
"*",
"w",
"+",
"step",
"*",
"np",
".",
"dot",
"(",
"U",
",",
"np",
".",
"dot",
"(",
"normFactor",
",",
"e",
")",
")",
"if",
"returnCoeffs",
":",
"W",
"[",
"n",
"]",
"=",
"w",
"if",
"returnCoeffs",
":",
"w",
"=",
"W",
"return",
"y_out",
",",
"e_out",
",",
"w"
] | Perform affine projection (AP) adaptive filtering on u to minimize error
given by e=d-y, where y is the output of the adaptive filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
K : int
Projection order, must be integer larger than zero.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note: len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> K = 5 # Projection order
>>> y, e, w = ap(u, d, M, step, K, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True | [
"Perform",
"affine",
"projection",
"(",
"AP",
")",
"adaptive",
"filtering",
"on",
"u",
"to",
"minimize",
"error",
"given",
"by",
"e",
"=",
"d",
"-",
"y",
"where",
"y",
"is",
"the",
"output",
"of",
"the",
"adaptive",
"filter",
"."
] | train | https://github.com/Wramberg/adaptfilt/blob/9bb943bb5e4162e10a8aaabfc68339b8fc06c11a/adaptfilt/ap.py#L5-L176 |
vmalyi/adb_android | adb_android/adb_android.py | bugreport | def bugreport(dest_file="default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
# We have to check if device is available or not before executing this command
# as adb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if _isDeviceAvailable():
result = _exec_command_to_file(adb_full_cmd, dest_file_handler)
return (result, "Success: Bug report saved to: " + dest_file)
else:
return (0, "Device Not Found") | python | def bugreport(dest_file="default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
# We have to check if device is available or not before executing this command
# as adb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if _isDeviceAvailable():
result = _exec_command_to_file(adb_full_cmd, dest_file_handler)
return (result, "Success: Bug report saved to: " + dest_file)
else:
return (0, "Device Not Found") | [
"def",
"bugreport",
"(",
"dest_file",
"=",
"\"default.log\"",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_BUGREPORT",
"]",
"try",
":",
"dest_file_handler",
"=",
"open",
"(",
"dest_file",
",",
"\"w\"",
")",
"except",
"IOError",
":",
"print",
"(",
"\"IOError: Failed to create a log file\"",
")",
"# We have to check if device is available or not before executing this command",
"# as adb bugreport will wait-for-device infinitely and does not come out of ",
"# loop",
"# Execute only if device is available only",
"if",
"_isDeviceAvailable",
"(",
")",
":",
"result",
"=",
"_exec_command_to_file",
"(",
"adb_full_cmd",
",",
"dest_file_handler",
")",
"return",
"(",
"result",
",",
"\"Success: Bug report saved to: \"",
"+",
"dest_file",
")",
"else",
":",
"return",
"(",
"0",
",",
"\"Device Not Found\"",
")"
] | Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution | [
"Prints",
"dumpsys",
"dumpstate",
"and",
"logcat",
"data",
"to",
"the",
"screen",
"for",
"the",
"purposes",
"of",
"bug",
"reporting",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L28-L47 |
vmalyi/adb_android | adb_android/adb_android.py | push | def push(src, dest):
"""
Push object from host to target
:param src: string path to source object on host
:param dest: string destination path on target
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PUSH, src, dest]
return _exec_command(adb_full_cmd) | python | def push(src, dest):
"""
Push object from host to target
:param src: string path to source object on host
:param dest: string destination path on target
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PUSH, src, dest]
return _exec_command(adb_full_cmd) | [
"def",
"push",
"(",
"src",
",",
"dest",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_PUSH",
",",
"src",
",",
"dest",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Push object from host to target
:param src: string path to source object on host
:param dest: string destination path on target
:return: result of _exec_command() execution | [
"Push",
"object",
"from",
"host",
"to",
"target",
":",
"param",
"src",
":",
"string",
"path",
"to",
"source",
"object",
"on",
"host",
":",
"param",
"dest",
":",
"string",
"destination",
"path",
"on",
"target",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L50-L58 |
vmalyi/adb_android | adb_android/adb_android.py | pull | def pull(src, dest):
"""
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PULL, src, dest]
return _exec_command(adb_full_cmd) | python | def pull(src, dest):
"""
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PULL, src, dest]
return _exec_command(adb_full_cmd) | [
"def",
"pull",
"(",
"src",
",",
"dest",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_PULL",
",",
"src",
",",
"dest",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution | [
"Pull",
"object",
"from",
"target",
"to",
"host",
":",
"param",
"src",
":",
"string",
"path",
"of",
"object",
"on",
"target",
":",
"param",
"dest",
":",
"string",
"destination",
"path",
"on",
"host",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L61-L69 |
vmalyi/adb_android | adb_android/adb_android.py | devices | def devices(opts=[]):
"""
Get list of all available devices including emulators
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_DEVICES, _convert_opts(opts)]
return _exec_command(adb_full_cmd) | python | def devices(opts=[]):
"""
Get list of all available devices including emulators
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_DEVICES, _convert_opts(opts)]
return _exec_command(adb_full_cmd) | [
"def",
"devices",
"(",
"opts",
"=",
"[",
"]",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_DEVICES",
",",
"_convert_opts",
"(",
"opts",
")",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Get list of all available devices including emulators
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution | [
"Get",
"list",
"of",
"all",
"available",
"devices",
"including",
"emulators",
":",
"param",
"opts",
":",
"list",
"command",
"options",
"(",
"e",
".",
"g",
".",
"[",
"-",
"r",
"-",
"a",
"]",
")",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L72-L79 |
vmalyi/adb_android | adb_android/adb_android.py | shell | def shell(cmd):
"""
Execute shell command on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_SHELL, cmd]
return _exec_command(adb_full_cmd) | python | def shell(cmd):
"""
Execute shell command on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_SHELL, cmd]
return _exec_command(adb_full_cmd) | [
"def",
"shell",
"(",
"cmd",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_SHELL",
",",
"cmd",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Execute shell command on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution | [
"Execute",
"shell",
"command",
"on",
"target",
":",
"param",
"cmd",
":",
"string",
"shell",
"command",
"to",
"execute",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L82-L89 |
vmalyi/adb_android | adb_android/adb_android.py | install | def install(apk, opts=[]):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_INSTALL, _convert_opts(opts), apk]
return _exec_command(adb_full_cmd) | python | def install(apk, opts=[]):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_INSTALL, _convert_opts(opts), apk]
return _exec_command(adb_full_cmd) | [
"def",
"install",
"(",
"apk",
",",
"opts",
"=",
"[",
"]",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_INSTALL",
",",
"_convert_opts",
"(",
"opts",
")",
",",
"apk",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution | [
"Install",
"*",
".",
"apk",
"on",
"target",
":",
"param",
"apk",
":",
"string",
"path",
"to",
"apk",
"on",
"host",
"to",
"install",
":",
"param",
"opts",
":",
"list",
"command",
"options",
"(",
"e",
".",
"g",
".",
"[",
"-",
"r",
"-",
"a",
"]",
")",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L92-L100 |
vmalyi/adb_android | adb_android/adb_android.py | uninstall | def uninstall(app, opts=[]):
"""
Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_UNINSTALL, _convert_opts(opts), app]
return _exec_command(adb_full_cmd) | python | def uninstall(app, opts=[]):
"""
Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_UNINSTALL, _convert_opts(opts), app]
return _exec_command(adb_full_cmd) | [
"def",
"uninstall",
"(",
"app",
",",
"opts",
"=",
"[",
"]",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_UNINSTALL",
",",
"_convert_opts",
"(",
"opts",
")",
",",
"app",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution | [
"Uninstall",
"app",
"from",
"target",
":",
"param",
"app",
":",
"app",
"name",
"to",
"uninstall",
"from",
"target",
"(",
"e",
".",
"g",
".",
"com",
".",
"example",
".",
"android",
".",
"valid",
")",
":",
"param",
"opts",
":",
"list",
"command",
"options",
"(",
"e",
".",
"g",
".",
"[",
"-",
"r",
"-",
"a",
"]",
")",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L103-L111 |
vmalyi/adb_android | adb_android/adb_android.py | sync | def sync():
"""
Copy host->device only if changed
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_SHELL ,v.ADB_COMMAND_SYNC]
return _exec_command(adb_full_cmd) | python | def sync():
"""
Copy host->device only if changed
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_SHELL ,v.ADB_COMMAND_SYNC]
return _exec_command(adb_full_cmd) | [
"def",
"sync",
"(",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_SHELL",
",",
"v",
".",
"ADB_COMMAND_SYNC",
"]",
"return",
"_exec_command",
"(",
"adb_full_cmd",
")"
] | Copy host->device only if changed
:return: result of _exec_command() execution | [
"Copy",
"host",
"-",
">",
"device",
"only",
"if",
"changed",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L132-L138 |
vmalyi/adb_android | adb_android/adb_android.py | _exec_command | def _exec_command(adb_cmd):
"""
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = check_output(final_adb_cmd, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = 0, output
print('\n' + result[1])
return result | python | def _exec_command(adb_cmd):
"""
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = check_output(final_adb_cmd, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = 0, output
print('\n' + result[1])
return result | [
"def",
"_exec_command",
"(",
"adb_cmd",
")",
":",
"t",
"=",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"final_adb_cmd",
"=",
"[",
"]",
"for",
"e",
"in",
"adb_cmd",
":",
"if",
"e",
"!=",
"''",
":",
"# avoid items with empty string...",
"final_adb_cmd",
".",
"append",
"(",
"e",
")",
"# ... so that final command doesn't",
"# contain extra spaces",
"print",
"(",
"'\\n*** Executing '",
"+",
"' '",
".",
"join",
"(",
"adb_cmd",
")",
"+",
"' '",
"+",
"'command'",
")",
"try",
":",
"output",
"=",
"check_output",
"(",
"final_adb_cmd",
",",
"stderr",
"=",
"t",
")",
"except",
"CalledProcessError",
"as",
"e",
":",
"t",
".",
"seek",
"(",
"0",
")",
"result",
"=",
"e",
".",
"returncode",
",",
"t",
".",
"read",
"(",
")",
"else",
":",
"result",
"=",
"0",
",",
"output",
"print",
"(",
"'\\n'",
"+",
"result",
"[",
"1",
"]",
")",
"return",
"result"
] | Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code | [
"Format",
"adb",
"command",
"and",
"execute",
"it",
"in",
"shell",
":",
"param",
"adb_cmd",
":",
"list",
"adb",
"command",
"to",
"execute",
":",
"return",
":",
"string",
"0",
"and",
"shell",
"command",
"output",
"if",
"successful",
"otherwise",
"raise",
"CalledProcessError",
"exception",
"and",
"return",
"error",
"code"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L178-L202 |
vmalyi/adb_android | adb_android/adb_android.py | _exec_command_to_file | def _exec_command_to_file(adb_cmd, dest_file_handler):
"""
Format adb command and execute it in shell and redirects to a file
:param adb_cmd: list adb command to execute
:param dest_file_handler: file handler to which output will be redirected
:return: string '0' and writes shell command output to file if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = call(final_adb_cmd, stdout=dest_file_handler, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = output
dest_file_handler.close()
return result | python | def _exec_command_to_file(adb_cmd, dest_file_handler):
"""
Format adb command and execute it in shell and redirects to a file
:param adb_cmd: list adb command to execute
:param dest_file_handler: file handler to which output will be redirected
:return: string '0' and writes shell command output to file if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = call(final_adb_cmd, stdout=dest_file_handler, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = output
dest_file_handler.close()
return result | [
"def",
"_exec_command_to_file",
"(",
"adb_cmd",
",",
"dest_file_handler",
")",
":",
"t",
"=",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"final_adb_cmd",
"=",
"[",
"]",
"for",
"e",
"in",
"adb_cmd",
":",
"if",
"e",
"!=",
"''",
":",
"# avoid items with empty string...",
"final_adb_cmd",
".",
"append",
"(",
"e",
")",
"# ... so that final command doesn't",
"# contain extra spaces",
"print",
"(",
"'\\n*** Executing '",
"+",
"' '",
".",
"join",
"(",
"adb_cmd",
")",
"+",
"' '",
"+",
"'command'",
")",
"try",
":",
"output",
"=",
"call",
"(",
"final_adb_cmd",
",",
"stdout",
"=",
"dest_file_handler",
",",
"stderr",
"=",
"t",
")",
"except",
"CalledProcessError",
"as",
"e",
":",
"t",
".",
"seek",
"(",
"0",
")",
"result",
"=",
"e",
".",
"returncode",
",",
"t",
".",
"read",
"(",
")",
"else",
":",
"result",
"=",
"output",
"dest_file_handler",
".",
"close",
"(",
")",
"return",
"result"
] | Format adb command and execute it in shell and redirects to a file
:param adb_cmd: list adb command to execute
:param dest_file_handler: file handler to which output will be redirected
:return: string '0' and writes shell command output to file if successful, otherwise
raise CalledProcessError exception and return error code | [
"Format",
"adb",
"command",
"and",
"execute",
"it",
"in",
"shell",
"and",
"redirects",
"to",
"a",
"file",
":",
"param",
"adb_cmd",
":",
"list",
"adb",
"command",
"to",
"execute",
":",
"param",
"dest_file_handler",
":",
"file",
"handler",
"to",
"which",
"output",
"will",
"be",
"redirected",
":",
"return",
":",
"string",
"0",
"and",
"writes",
"shell",
"command",
"output",
"to",
"file",
"if",
"successful",
"otherwise",
"raise",
"CalledProcessError",
"exception",
"and",
"return",
"error",
"code"
] | train | https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L205-L230 |
mlavin/django-selectable | selectable/forms/fields.py | BaseAutoCompleteField.has_changed | def has_changed(self, initial, data):
"Detects if the data was changed. This is added in 1.6."
if initial is None and data is None:
return False
if data and not hasattr(data, '__iter__'):
data = self.widget.decompress(data)
initial = self.to_python(initial)
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
if isinstance(data, Model) and isinstance(initial, Model):
return model_vars(data) != model_vars(initial)
else:
return data != initial | python | def has_changed(self, initial, data):
"Detects if the data was changed. This is added in 1.6."
if initial is None and data is None:
return False
if data and not hasattr(data, '__iter__'):
data = self.widget.decompress(data)
initial = self.to_python(initial)
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
if isinstance(data, Model) and isinstance(initial, Model):
return model_vars(data) != model_vars(initial)
else:
return data != initial | [
"def",
"has_changed",
"(",
"self",
",",
"initial",
",",
"data",
")",
":",
"if",
"initial",
"is",
"None",
"and",
"data",
"is",
"None",
":",
"return",
"False",
"if",
"data",
"and",
"not",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"data",
"=",
"self",
".",
"widget",
".",
"decompress",
"(",
"data",
")",
"initial",
"=",
"self",
".",
"to_python",
"(",
"initial",
")",
"data",
"=",
"self",
".",
"to_python",
"(",
"data",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_coerce'",
")",
":",
"data",
"=",
"self",
".",
"_coerce",
"(",
"data",
")",
"if",
"isinstance",
"(",
"data",
",",
"Model",
")",
"and",
"isinstance",
"(",
"initial",
",",
"Model",
")",
":",
"return",
"model_vars",
"(",
"data",
")",
"!=",
"model_vars",
"(",
"initial",
")",
"else",
":",
"return",
"data",
"!=",
"initial"
] | Detects if the data was changed. This is added in 1.6. | [
"Detects",
"if",
"the",
"data",
"was",
"changed",
".",
"This",
"is",
"added",
"in",
"1",
".",
"6",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/forms/fields.py#L29-L42 |
mlavin/django-selectable | selectable/decorators.py | results_decorator | def results_decorator(func):
"""
Helper for constructing simple decorators around Lookup.results.
func is a function which takes a request as the first parameter. If func
returns an HttpReponse it is returned otherwise the original Lookup.results
is returned.
"""
# Wrap function to maintian the original doc string, etc
@wraps(func)
def decorator(lookup_cls):
# Construct a class decorator from the original function
original = lookup_cls.results
def inner(self, request):
# Wrap lookup_cls.results by first calling func and checking the result
result = func(request)
if isinstance(result, HttpResponse):
return result
return original(self, request)
# Replace original lookup_cls.results with wrapped version
lookup_cls.results = inner
return lookup_cls
# Return the constructed decorator
return decorator | python | def results_decorator(func):
"""
Helper for constructing simple decorators around Lookup.results.
func is a function which takes a request as the first parameter. If func
returns an HttpReponse it is returned otherwise the original Lookup.results
is returned.
"""
# Wrap function to maintian the original doc string, etc
@wraps(func)
def decorator(lookup_cls):
# Construct a class decorator from the original function
original = lookup_cls.results
def inner(self, request):
# Wrap lookup_cls.results by first calling func and checking the result
result = func(request)
if isinstance(result, HttpResponse):
return result
return original(self, request)
# Replace original lookup_cls.results with wrapped version
lookup_cls.results = inner
return lookup_cls
# Return the constructed decorator
return decorator | [
"def",
"results_decorator",
"(",
"func",
")",
":",
"# Wrap function to maintian the original doc string, etc",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"lookup_cls",
")",
":",
"# Construct a class decorator from the original function",
"original",
"=",
"lookup_cls",
".",
"results",
"def",
"inner",
"(",
"self",
",",
"request",
")",
":",
"# Wrap lookup_cls.results by first calling func and checking the result",
"result",
"=",
"func",
"(",
"request",
")",
"if",
"isinstance",
"(",
"result",
",",
"HttpResponse",
")",
":",
"return",
"result",
"return",
"original",
"(",
"self",
",",
"request",
")",
"# Replace original lookup_cls.results with wrapped version",
"lookup_cls",
".",
"results",
"=",
"inner",
"return",
"lookup_cls",
"# Return the constructed decorator",
"return",
"decorator"
] | Helper for constructing simple decorators around Lookup.results.
func is a function which takes a request as the first parameter. If func
returns an HttpReponse it is returned otherwise the original Lookup.results
is returned. | [
"Helper",
"for",
"constructing",
"simple",
"decorators",
"around",
"Lookup",
".",
"results",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/decorators.py#L16-L39 |
mlavin/django-selectable | selectable/decorators.py | login_required | def login_required(request):
"Lookup decorator to require the user to be authenticated."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) | python | def login_required(request):
"Lookup decorator to require the user to be authenticated."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) | [
"def",
"login_required",
"(",
"request",
")",
":",
"user",
"=",
"getattr",
"(",
"request",
",",
"'user'",
",",
"None",
")",
"if",
"user",
"is",
"None",
"or",
"not",
"user",
".",
"is_authenticated",
":",
"return",
"HttpResponse",
"(",
"status",
"=",
"401",
")"
] | Lookup decorator to require the user to be authenticated. | [
"Lookup",
"decorator",
"to",
"require",
"the",
"user",
"to",
"be",
"authenticated",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/decorators.py#L50-L54 |
mlavin/django-selectable | selectable/decorators.py | staff_member_required | def staff_member_required(request):
"Lookup decorator to require the user is a staff member."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) # Unauthorized
elif not user.is_staff:
return HttpResponseForbidden() | python | def staff_member_required(request):
"Lookup decorator to require the user is a staff member."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) # Unauthorized
elif not user.is_staff:
return HttpResponseForbidden() | [
"def",
"staff_member_required",
"(",
"request",
")",
":",
"user",
"=",
"getattr",
"(",
"request",
",",
"'user'",
",",
"None",
")",
"if",
"user",
"is",
"None",
"or",
"not",
"user",
".",
"is_authenticated",
":",
"return",
"HttpResponse",
"(",
"status",
"=",
"401",
")",
"# Unauthorized",
"elif",
"not",
"user",
".",
"is_staff",
":",
"return",
"HttpResponseForbidden",
"(",
")"
] | Lookup decorator to require the user is a staff member. | [
"Lookup",
"decorator",
"to",
"require",
"the",
"user",
"is",
"a",
"staff",
"member",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/decorators.py#L58-L64 |
mlavin/django-selectable | selectable/base.py | LookupBase.format_item | def format_item(self, item):
"Construct result dictionary for the match item."
result = {
'id': self.get_item_id(item),
'value': self.get_item_value(item),
'label': self.get_item_label(item),
}
for key in settings.SELECTABLE_ESCAPED_KEYS:
if key in result:
result[key] = conditional_escape(result[key])
return result | python | def format_item(self, item):
"Construct result dictionary for the match item."
result = {
'id': self.get_item_id(item),
'value': self.get_item_value(item),
'label': self.get_item_label(item),
}
for key in settings.SELECTABLE_ESCAPED_KEYS:
if key in result:
result[key] = conditional_escape(result[key])
return result | [
"def",
"format_item",
"(",
"self",
",",
"item",
")",
":",
"result",
"=",
"{",
"'id'",
":",
"self",
".",
"get_item_id",
"(",
"item",
")",
",",
"'value'",
":",
"self",
".",
"get_item_value",
"(",
"item",
")",
",",
"'label'",
":",
"self",
".",
"get_item_label",
"(",
"item",
")",
",",
"}",
"for",
"key",
"in",
"settings",
".",
"SELECTABLE_ESCAPED_KEYS",
":",
"if",
"key",
"in",
"result",
":",
"result",
"[",
"key",
"]",
"=",
"conditional_escape",
"(",
"result",
"[",
"key",
"]",
")",
"return",
"result"
] | Construct result dictionary for the match item. | [
"Construct",
"result",
"dictionary",
"for",
"the",
"match",
"item",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/base.py#L67-L77 |
mlavin/django-selectable | selectable/base.py | LookupBase.paginate_results | def paginate_results(self, results, options):
"Return a django.core.paginator.Page of results."
limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT)
paginator = Paginator(results, limit)
page = options.get('page', 1)
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
return results | python | def paginate_results(self, results, options):
"Return a django.core.paginator.Page of results."
limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT)
paginator = Paginator(results, limit)
page = options.get('page', 1)
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
return results | [
"def",
"paginate_results",
"(",
"self",
",",
"results",
",",
"options",
")",
":",
"limit",
"=",
"options",
".",
"get",
"(",
"'limit'",
",",
"settings",
".",
"SELECTABLE_MAX_LIMIT",
")",
"paginator",
"=",
"Paginator",
"(",
"results",
",",
"limit",
")",
"page",
"=",
"options",
".",
"get",
"(",
"'page'",
",",
"1",
")",
"try",
":",
"results",
"=",
"paginator",
".",
"page",
"(",
"page",
")",
"except",
"(",
"EmptyPage",
",",
"InvalidPage",
")",
":",
"results",
"=",
"paginator",
".",
"page",
"(",
"paginator",
".",
"num_pages",
")",
"return",
"results"
] | Return a django.core.paginator.Page of results. | [
"Return",
"a",
"django",
".",
"core",
".",
"paginator",
".",
"Page",
"of",
"results",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/base.py#L79-L88 |
mlavin/django-selectable | selectable/base.py | LookupBase.results | def results(self, request):
"Match results to given term and return the serialized HttpResponse."
results = {}
form = self.form(request.GET)
if form.is_valid():
options = form.cleaned_data
term = options.get('term', '')
raw_data = self.get_query(request, term)
results = self.format_results(raw_data, options)
return self.response(results) | python | def results(self, request):
"Match results to given term and return the serialized HttpResponse."
results = {}
form = self.form(request.GET)
if form.is_valid():
options = form.cleaned_data
term = options.get('term', '')
raw_data = self.get_query(request, term)
results = self.format_results(raw_data, options)
return self.response(results) | [
"def",
"results",
"(",
"self",
",",
"request",
")",
":",
"results",
"=",
"{",
"}",
"form",
"=",
"self",
".",
"form",
"(",
"request",
".",
"GET",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"options",
"=",
"form",
".",
"cleaned_data",
"term",
"=",
"options",
".",
"get",
"(",
"'term'",
",",
"''",
")",
"raw_data",
"=",
"self",
".",
"get_query",
"(",
"request",
",",
"term",
")",
"results",
"=",
"self",
".",
"format_results",
"(",
"raw_data",
",",
"options",
")",
"return",
"self",
".",
"response",
"(",
"results",
")"
] | Match results to given term and return the serialized HttpResponse. | [
"Match",
"results",
"to",
"given",
"term",
"and",
"return",
"the",
"serialized",
"HttpResponse",
"."
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/base.py#L90-L99 |
mlavin/django-selectable | selectable/base.py | LookupBase.format_results | def format_results(self, raw_data, options):
'''
Returns a python structure that later gets serialized.
raw_data
full list of objects matching the search term
options
a dictionary of the given options
'''
page_data = self.paginate_results(raw_data, options)
results = {}
meta = options.copy()
meta['more'] = _('Show more results')
if page_data and page_data.has_next():
meta['next_page'] = page_data.next_page_number()
if page_data and page_data.has_previous():
meta['prev_page'] = page_data.previous_page_number()
results['data'] = [self.format_item(item) for item in page_data.object_list]
results['meta'] = meta
return results | python | def format_results(self, raw_data, options):
'''
Returns a python structure that later gets serialized.
raw_data
full list of objects matching the search term
options
a dictionary of the given options
'''
page_data = self.paginate_results(raw_data, options)
results = {}
meta = options.copy()
meta['more'] = _('Show more results')
if page_data and page_data.has_next():
meta['next_page'] = page_data.next_page_number()
if page_data and page_data.has_previous():
meta['prev_page'] = page_data.previous_page_number()
results['data'] = [self.format_item(item) for item in page_data.object_list]
results['meta'] = meta
return results | [
"def",
"format_results",
"(",
"self",
",",
"raw_data",
",",
"options",
")",
":",
"page_data",
"=",
"self",
".",
"paginate_results",
"(",
"raw_data",
",",
"options",
")",
"results",
"=",
"{",
"}",
"meta",
"=",
"options",
".",
"copy",
"(",
")",
"meta",
"[",
"'more'",
"]",
"=",
"_",
"(",
"'Show more results'",
")",
"if",
"page_data",
"and",
"page_data",
".",
"has_next",
"(",
")",
":",
"meta",
"[",
"'next_page'",
"]",
"=",
"page_data",
".",
"next_page_number",
"(",
")",
"if",
"page_data",
"and",
"page_data",
".",
"has_previous",
"(",
")",
":",
"meta",
"[",
"'prev_page'",
"]",
"=",
"page_data",
".",
"previous_page_number",
"(",
")",
"results",
"[",
"'data'",
"]",
"=",
"[",
"self",
".",
"format_item",
"(",
"item",
")",
"for",
"item",
"in",
"page_data",
".",
"object_list",
"]",
"results",
"[",
"'meta'",
"]",
"=",
"meta",
"return",
"results"
] | Returns a python structure that later gets serialized.
raw_data
full list of objects matching the search term
options
a dictionary of the given options | [
"Returns",
"a",
"python",
"structure",
"that",
"later",
"gets",
"serialized",
".",
"raw_data",
"full",
"list",
"of",
"objects",
"matching",
"the",
"search",
"term",
"options",
"a",
"dictionary",
"of",
"the",
"given",
"options"
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/base.py#L101-L119 |
mlavin/django-selectable | selectable/forms/base.py | import_lookup_class | def import_lookup_class(lookup_class):
"""
Import lookup_class as a dotted base and ensure it extends LookupBase
"""
from selectable.base import LookupBase
if isinstance(lookup_class, string_types):
mod_str, cls_str = lookup_class.rsplit('.', 1)
mod = import_module(mod_str)
lookup_class = getattr(mod, cls_str)
if not issubclass(lookup_class, LookupBase):
raise TypeError('lookup_class must extend from selectable.base.LookupBase')
return lookup_class | python | def import_lookup_class(lookup_class):
"""
Import lookup_class as a dotted base and ensure it extends LookupBase
"""
from selectable.base import LookupBase
if isinstance(lookup_class, string_types):
mod_str, cls_str = lookup_class.rsplit('.', 1)
mod = import_module(mod_str)
lookup_class = getattr(mod, cls_str)
if not issubclass(lookup_class, LookupBase):
raise TypeError('lookup_class must extend from selectable.base.LookupBase')
return lookup_class | [
"def",
"import_lookup_class",
"(",
"lookup_class",
")",
":",
"from",
"selectable",
".",
"base",
"import",
"LookupBase",
"if",
"isinstance",
"(",
"lookup_class",
",",
"string_types",
")",
":",
"mod_str",
",",
"cls_str",
"=",
"lookup_class",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"mod",
"=",
"import_module",
"(",
"mod_str",
")",
"lookup_class",
"=",
"getattr",
"(",
"mod",
",",
"cls_str",
")",
"if",
"not",
"issubclass",
"(",
"lookup_class",
",",
"LookupBase",
")",
":",
"raise",
"TypeError",
"(",
"'lookup_class must extend from selectable.base.LookupBase'",
")",
"return",
"lookup_class"
] | Import lookup_class as a dotted base and ensure it extends LookupBase | [
"Import",
"lookup_class",
"as",
"a",
"dotted",
"base",
"and",
"ensure",
"it",
"extends",
"LookupBase"
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/forms/base.py#L34-L45 |
mlavin/django-selectable | selectable/forms/base.py | BaseLookupForm.clean_limit | def clean_limit(self):
"Ensure given limit is less than default if defined"
limit = self.cleaned_data.get('limit', None)
if (settings.SELECTABLE_MAX_LIMIT is not None and
(not limit or limit > settings.SELECTABLE_MAX_LIMIT)):
limit = settings.SELECTABLE_MAX_LIMIT
return limit | python | def clean_limit(self):
"Ensure given limit is less than default if defined"
limit = self.cleaned_data.get('limit', None)
if (settings.SELECTABLE_MAX_LIMIT is not None and
(not limit or limit > settings.SELECTABLE_MAX_LIMIT)):
limit = settings.SELECTABLE_MAX_LIMIT
return limit | [
"def",
"clean_limit",
"(",
"self",
")",
":",
"limit",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'limit'",
",",
"None",
")",
"if",
"(",
"settings",
".",
"SELECTABLE_MAX_LIMIT",
"is",
"not",
"None",
"and",
"(",
"not",
"limit",
"or",
"limit",
">",
"settings",
".",
"SELECTABLE_MAX_LIMIT",
")",
")",
":",
"limit",
"=",
"settings",
".",
"SELECTABLE_MAX_LIMIT",
"return",
"limit"
] | Ensure given limit is less than default if defined | [
"Ensure",
"given",
"limit",
"is",
"less",
"than",
"default",
"if",
"defined"
] | train | https://github.com/mlavin/django-selectable/blob/3d7b8db0526dd924a774c599f0c665eff98fb375/selectable/forms/base.py#L21-L27 |
desbma/sacad | sacad/sources/google_images.py | GoogleImagesWebScrapeCoverSource.getSearchUrl | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
# build request url
params = collections.OrderedDict()
params["gbv"] = "2"
params["q"] = "\"%s\" \"%s\" front cover" % (artist, album)
if abs(self.target_size - 500) < 300:
params["tbs"] = "isz:m"
elif self.target_size > 800:
params["tbs"] = "isz:l"
return __class__.assembleUrl(__class__.BASE_URL, params) | python | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
# build request url
params = collections.OrderedDict()
params["gbv"] = "2"
params["q"] = "\"%s\" \"%s\" front cover" % (artist, album)
if abs(self.target_size - 500) < 300:
params["tbs"] = "isz:m"
elif self.target_size > 800:
params["tbs"] = "isz:l"
return __class__.assembleUrl(__class__.BASE_URL, params) | [
"def",
"getSearchUrl",
"(",
"self",
",",
"album",
",",
"artist",
")",
":",
"# build request url",
"params",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"params",
"[",
"\"gbv\"",
"]",
"=",
"\"2\"",
"params",
"[",
"\"q\"",
"]",
"=",
"\"\\\"%s\\\" \\\"%s\\\" front cover\"",
"%",
"(",
"artist",
",",
"album",
")",
"if",
"abs",
"(",
"self",
".",
"target_size",
"-",
"500",
")",
"<",
"300",
":",
"params",
"[",
"\"tbs\"",
"]",
"=",
"\"isz:m\"",
"elif",
"self",
".",
"target_size",
">",
"800",
":",
"params",
"[",
"\"tbs\"",
"]",
"=",
"\"isz:l\"",
"return",
"__class__",
".",
"assembleUrl",
"(",
"__class__",
".",
"BASE_URL",
",",
"params",
")"
] | See CoverSource.getSearchUrl. | [
"See",
"CoverSource",
".",
"getSearchUrl",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/google_images.py#L33-L44 |
desbma/sacad | sacad/sources/google_images.py | GoogleImagesWebScrapeCoverSource.parseResults | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse HTML and get results
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("latin-1"), parser)
for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1):
# extract url
metadata_div = result.find("div")
metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text")
metadata_json = json.loads(metadata_json)
google_url = result.find("a").get("href")
if google_url is not None:
query = urllib.parse.urlsplit(google_url).query
else:
query = None
if not query:
img_url = metadata_json["ou"]
else:
query = urllib.parse.parse_qs(query)
img_url = query["imgurl"][0]
# extract format
check_metadata = CoverImageMetadata.NONE
format = metadata_json["ity"].lower()
try:
format = SUPPORTED_IMG_FORMATS[format]
except KeyError:
# format could not be identified or is unknown
format = None
check_metadata = CoverImageMetadata.FORMAT
# extract size
if not query:
size = metadata_json["ow"], metadata_json["oh"]
else:
size = tuple(map(int, (query["w"][0], query["h"][0])))
# extract thumbnail url
thumbnail_url = metadata_json["tu"]
# result
results.append(GoogleImagesCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | python | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse HTML and get results
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("latin-1"), parser)
for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1):
# extract url
metadata_div = result.find("div")
metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text")
metadata_json = json.loads(metadata_json)
google_url = result.find("a").get("href")
if google_url is not None:
query = urllib.parse.urlsplit(google_url).query
else:
query = None
if not query:
img_url = metadata_json["ou"]
else:
query = urllib.parse.parse_qs(query)
img_url = query["imgurl"][0]
# extract format
check_metadata = CoverImageMetadata.NONE
format = metadata_json["ity"].lower()
try:
format = SUPPORTED_IMG_FORMATS[format]
except KeyError:
# format could not be identified or is unknown
format = None
check_metadata = CoverImageMetadata.FORMAT
# extract size
if not query:
size = metadata_json["ow"], metadata_json["oh"]
else:
size = tuple(map(int, (query["w"][0], query["h"][0])))
# extract thumbnail url
thumbnail_url = metadata_json["tu"]
# result
results.append(GoogleImagesCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | [
"async",
"def",
"parseResults",
"(",
"self",
",",
"api_data",
")",
":",
"results",
"=",
"[",
"]",
"# parse HTML and get results",
"parser",
"=",
"lxml",
".",
"etree",
".",
"HTMLParser",
"(",
")",
"html",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"api_data",
".",
"decode",
"(",
"\"latin-1\"",
")",
",",
"parser",
")",
"for",
"rank",
",",
"result",
"in",
"enumerate",
"(",
"__class__",
".",
"RESULTS_SELECTOR",
"(",
"html",
")",
",",
"1",
")",
":",
"# extract url",
"metadata_div",
"=",
"result",
".",
"find",
"(",
"\"div\"",
")",
"metadata_json",
"=",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"metadata_div",
",",
"encoding",
"=",
"\"unicode\"",
",",
"method",
"=",
"\"text\"",
")",
"metadata_json",
"=",
"json",
".",
"loads",
"(",
"metadata_json",
")",
"google_url",
"=",
"result",
".",
"find",
"(",
"\"a\"",
")",
".",
"get",
"(",
"\"href\"",
")",
"if",
"google_url",
"is",
"not",
"None",
":",
"query",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"google_url",
")",
".",
"query",
"else",
":",
"query",
"=",
"None",
"if",
"not",
"query",
":",
"img_url",
"=",
"metadata_json",
"[",
"\"ou\"",
"]",
"else",
":",
"query",
"=",
"urllib",
".",
"parse",
".",
"parse_qs",
"(",
"query",
")",
"img_url",
"=",
"query",
"[",
"\"imgurl\"",
"]",
"[",
"0",
"]",
"# extract format",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"NONE",
"format",
"=",
"metadata_json",
"[",
"\"ity\"",
"]",
".",
"lower",
"(",
")",
"try",
":",
"format",
"=",
"SUPPORTED_IMG_FORMATS",
"[",
"format",
"]",
"except",
"KeyError",
":",
"# format could not be identified or is unknown",
"format",
"=",
"None",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"FORMAT",
"# extract size",
"if",
"not",
"query",
":",
"size",
"=",
"metadata_json",
"[",
"\"ow\"",
"]",
",",
"metadata_json",
"[",
"\"oh\"",
"]",
"else",
":",
"size",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"(",
"query",
"[",
"\"w\"",
"]",
"[",
"0",
"]",
",",
"query",
"[",
"\"h\"",
"]",
"[",
"0",
"]",
")",
")",
")",
"# extract thumbnail url",
"thumbnail_url",
"=",
"metadata_json",
"[",
"\"tu\"",
"]",
"# result",
"results",
".",
"append",
"(",
"GoogleImagesCoverSourceResult",
"(",
"img_url",
",",
"size",
",",
"format",
",",
"thumbnail_url",
"=",
"thumbnail_url",
",",
"source",
"=",
"self",
",",
"rank",
"=",
"rank",
",",
"check_metadata",
"=",
"check_metadata",
")",
")",
"return",
"results"
] | See CoverSource.parseResults. | [
"See",
"CoverSource",
".",
"parseResults",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/google_images.py#L50-L98 |
desbma/sacad | sacad/rate_watcher.py | AccessRateWatcher.waitAccessAsync | async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break | python | async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break | [
"async",
"def",
"waitAccessAsync",
"(",
"self",
")",
":",
"async",
"with",
"self",
".",
"lock",
":",
"while",
"True",
":",
"last_access_ts",
"=",
"self",
".",
"__getLastAccess",
"(",
")",
"if",
"last_access_ts",
"is",
"not",
"None",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"last_access_ts",
"=",
"last_access_ts",
"[",
"0",
"]",
"time_since_last_access",
"=",
"now",
"-",
"last_access_ts",
"if",
"time_since_last_access",
"<",
"self",
".",
"min_delay_between_accesses",
":",
"time_to_wait",
"=",
"self",
".",
"min_delay_between_accesses",
"-",
"time_since_last_access",
"if",
"self",
".",
"jitter_range_ms",
"is",
"not",
"None",
":",
"time_to_wait",
"+=",
"random",
".",
"randint",
"(",
"*",
"self",
".",
"jitter_range_ms",
")",
"/",
"1000",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Sleeping for %.2fms because of rate limit for domain %s\"",
"%",
"(",
"time_to_wait",
"*",
"1000",
",",
"self",
".",
"domain",
")",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"time_to_wait",
")",
"access_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"__access",
"(",
"access_time",
")",
"# now we should be good... except if another process did the same query at the same time",
"# the database serves as an atomic lock, query again to be sure the last row is the one",
"# we just inserted",
"last_access_ts",
"=",
"self",
".",
"__getLastAccess",
"(",
")",
"if",
"last_access_ts",
"[",
"0",
"]",
"==",
"access_time",
":",
"break"
] | Wait the needed time before sending a request to honor rate limit. | [
"Wait",
"the",
"needed",
"time",
"before",
"sending",
"a",
"request",
"to",
"honor",
"rate",
"limit",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/rate_watcher.py#L28-L53 |
desbma/sacad | sacad/rate_watcher.py | AccessRateWatcher.__access | def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain)) | python | def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain)) | [
"def",
"__access",
"(",
"self",
",",
"ts",
")",
":",
"with",
"self",
".",
"connection",
":",
"self",
".",
"connection",
".",
"execute",
"(",
"\"INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)\"",
",",
"(",
"ts",
",",
"self",
".",
"domain",
")",
")"
] | Record an API access. | [
"Record",
"an",
"API",
"access",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/rate_watcher.py#L62-L66 |
desbma/sacad | sacad/http_helpers.py | aiohttp_socket_timeout | def aiohttp_socket_timeout(socket_timeout_s):
""" Return a aiohttp.ClientTimeout object with only socket timeouts set. """
return aiohttp.ClientTimeout(total=None,
connect=None,
sock_connect=socket_timeout_s,
sock_read=socket_timeout_s) | python | def aiohttp_socket_timeout(socket_timeout_s):
""" Return a aiohttp.ClientTimeout object with only socket timeouts set. """
return aiohttp.ClientTimeout(total=None,
connect=None,
sock_connect=socket_timeout_s,
sock_read=socket_timeout_s) | [
"def",
"aiohttp_socket_timeout",
"(",
"socket_timeout_s",
")",
":",
"return",
"aiohttp",
".",
"ClientTimeout",
"(",
"total",
"=",
"None",
",",
"connect",
"=",
"None",
",",
"sock_connect",
"=",
"socket_timeout_s",
",",
"sock_read",
"=",
"socket_timeout_s",
")"
] | Return a aiohttp.ClientTimeout object with only socket timeouts set. | [
"Return",
"a",
"aiohttp",
".",
"ClientTimeout",
"object",
"with",
"only",
"socket",
"timeouts",
"set",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/http_helpers.py#L15-L20 |
desbma/sacad | sacad/http_helpers.py | Http.query | async def query(self, url, *, post_data=None, headers=None, verify=True, cache=None, pre_cache_callback=None):
""" Send a GET/POST request or get data from cache, retry if it fails, and return a tuple of store in cache callback, response content. """
async def store_in_cache_callback():
pass
if cache is not None:
# try from cache first
if post_data is not None:
if (url, post_data) in cache:
self.logger.debug("Got data for URL '%s' %s from cache" % (url, dict(post_data)))
return store_in_cache_callback, cache[(url, post_data)]
elif url in cache:
self.logger.debug("Got data for URL '%s' from cache" % (url))
return store_in_cache_callback, cache[url]
domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath,
url,
self.min_delay_between_accesses,
jitter_range_ms=self.jitter_range_ms,
logger=self.logger)
for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS,
sleeptime=1,
max_sleeptime=HTTP_MAX_RETRY_SLEEP_S,
sleepscale=1.5),
1):
await domain_rate_watcher.waitAccessAsync()
try:
if post_data is not None:
async with self.session.post(url,
data=post_data,
headers=self._buildHeaders(headers),
timeout=HTTP_NORMAL_TIMEOUT,
ssl=verify) as response:
content = await response.read()
else:
async with self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_NORMAL_TIMEOUT,
ssl=verify) as response:
content = await response.read()
if cache is not None:
async def store_in_cache_callback():
if pre_cache_callback is not None:
# process
try:
data = await pre_cache_callback(content)
except Exception:
data = content
else:
data = content
# add to cache
if post_data is not None:
cache[(url, post_data)] = data
else:
cache[url] = data
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
self.logger.warning("Querying '%s' failed (attempt %u/%u): %s %s" % (url,
attempt,
HTTP_MAX_ATTEMPTS,
e.__class__.__qualname__,
e))
if attempt == HTTP_MAX_ATTEMPTS:
raise
else:
self.logger.debug("Retrying in %.3fs" % (time_to_sleep))
await asyncio.sleep(time_to_sleep)
else:
break # http retry loop
response.raise_for_status()
return store_in_cache_callback, content | python | async def query(self, url, *, post_data=None, headers=None, verify=True, cache=None, pre_cache_callback=None):
""" Send a GET/POST request or get data from cache, retry if it fails, and return a tuple of store in cache callback, response content. """
async def store_in_cache_callback():
pass
if cache is not None:
# try from cache first
if post_data is not None:
if (url, post_data) in cache:
self.logger.debug("Got data for URL '%s' %s from cache" % (url, dict(post_data)))
return store_in_cache_callback, cache[(url, post_data)]
elif url in cache:
self.logger.debug("Got data for URL '%s' from cache" % (url))
return store_in_cache_callback, cache[url]
domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath,
url,
self.min_delay_between_accesses,
jitter_range_ms=self.jitter_range_ms,
logger=self.logger)
for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS,
sleeptime=1,
max_sleeptime=HTTP_MAX_RETRY_SLEEP_S,
sleepscale=1.5),
1):
await domain_rate_watcher.waitAccessAsync()
try:
if post_data is not None:
async with self.session.post(url,
data=post_data,
headers=self._buildHeaders(headers),
timeout=HTTP_NORMAL_TIMEOUT,
ssl=verify) as response:
content = await response.read()
else:
async with self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_NORMAL_TIMEOUT,
ssl=verify) as response:
content = await response.read()
if cache is not None:
async def store_in_cache_callback():
if pre_cache_callback is not None:
# process
try:
data = await pre_cache_callback(content)
except Exception:
data = content
else:
data = content
# add to cache
if post_data is not None:
cache[(url, post_data)] = data
else:
cache[url] = data
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
self.logger.warning("Querying '%s' failed (attempt %u/%u): %s %s" % (url,
attempt,
HTTP_MAX_ATTEMPTS,
e.__class__.__qualname__,
e))
if attempt == HTTP_MAX_ATTEMPTS:
raise
else:
self.logger.debug("Retrying in %.3fs" % (time_to_sleep))
await asyncio.sleep(time_to_sleep)
else:
break # http retry loop
response.raise_for_status()
return store_in_cache_callback, content | [
"async",
"def",
"query",
"(",
"self",
",",
"url",
",",
"*",
",",
"post_data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cache",
"=",
"None",
",",
"pre_cache_callback",
"=",
"None",
")",
":",
"async",
"def",
"store_in_cache_callback",
"(",
")",
":",
"pass",
"if",
"cache",
"is",
"not",
"None",
":",
"# try from cache first",
"if",
"post_data",
"is",
"not",
"None",
":",
"if",
"(",
"url",
",",
"post_data",
")",
"in",
"cache",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Got data for URL '%s' %s from cache\"",
"%",
"(",
"url",
",",
"dict",
"(",
"post_data",
")",
")",
")",
"return",
"store_in_cache_callback",
",",
"cache",
"[",
"(",
"url",
",",
"post_data",
")",
"]",
"elif",
"url",
"in",
"cache",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Got data for URL '%s' from cache\"",
"%",
"(",
"url",
")",
")",
"return",
"store_in_cache_callback",
",",
"cache",
"[",
"url",
"]",
"domain_rate_watcher",
"=",
"rate_watcher",
".",
"AccessRateWatcher",
"(",
"self",
".",
"watcher_db_filepath",
",",
"url",
",",
"self",
".",
"min_delay_between_accesses",
",",
"jitter_range_ms",
"=",
"self",
".",
"jitter_range_ms",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"for",
"attempt",
",",
"time_to_sleep",
"in",
"enumerate",
"(",
"redo",
".",
"retrier",
"(",
"max_attempts",
"=",
"HTTP_MAX_ATTEMPTS",
",",
"sleeptime",
"=",
"1",
",",
"max_sleeptime",
"=",
"HTTP_MAX_RETRY_SLEEP_S",
",",
"sleepscale",
"=",
"1.5",
")",
",",
"1",
")",
":",
"await",
"domain_rate_watcher",
".",
"waitAccessAsync",
"(",
")",
"try",
":",
"if",
"post_data",
"is",
"not",
"None",
":",
"async",
"with",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"post_data",
",",
"headers",
"=",
"self",
".",
"_buildHeaders",
"(",
"headers",
")",
",",
"timeout",
"=",
"HTTP_NORMAL_TIMEOUT",
",",
"ssl",
"=",
"verify",
")",
"as",
"response",
":",
"content",
"=",
"await",
"response",
".",
"read",
"(",
")",
"else",
":",
"async",
"with",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"_buildHeaders",
"(",
"headers",
")",
",",
"timeout",
"=",
"HTTP_NORMAL_TIMEOUT",
",",
"ssl",
"=",
"verify",
")",
"as",
"response",
":",
"content",
"=",
"await",
"response",
".",
"read",
"(",
")",
"if",
"cache",
"is",
"not",
"None",
":",
"async",
"def",
"store_in_cache_callback",
"(",
")",
":",
"if",
"pre_cache_callback",
"is",
"not",
"None",
":",
"# process",
"try",
":",
"data",
"=",
"await",
"pre_cache_callback",
"(",
"content",
")",
"except",
"Exception",
":",
"data",
"=",
"content",
"else",
":",
"data",
"=",
"content",
"# add to cache",
"if",
"post_data",
"is",
"not",
"None",
":",
"cache",
"[",
"(",
"url",
",",
"post_data",
")",
"]",
"=",
"data",
"else",
":",
"cache",
"[",
"url",
"]",
"=",
"data",
"except",
"(",
"asyncio",
".",
"TimeoutError",
",",
"aiohttp",
".",
"ClientError",
")",
"as",
"e",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Querying '%s' failed (attempt %u/%u): %s %s\"",
"%",
"(",
"url",
",",
"attempt",
",",
"HTTP_MAX_ATTEMPTS",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"if",
"attempt",
"==",
"HTTP_MAX_ATTEMPTS",
":",
"raise",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Retrying in %.3fs\"",
"%",
"(",
"time_to_sleep",
")",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"time_to_sleep",
")",
"else",
":",
"break",
"# http retry loop",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"store_in_cache_callback",
",",
"content"
] | Send a GET/POST request or get data from cache, retry if it fails, and return a tuple of store in cache callback, response content. | [
"Send",
"a",
"GET",
"/",
"POST",
"request",
"or",
"get",
"data",
"from",
"cache",
"retry",
"if",
"it",
"fails",
"and",
"return",
"a",
"tuple",
"of",
"store",
"in",
"cache",
"callback",
"response",
"content",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/http_helpers.py#L52-L128 |
desbma/sacad | sacad/http_helpers.py | Http.isReachable | async def isReachable(self, url, *, headers=None, verify=True, response_headers=None, cache=None):
""" Send a HEAD request with short timeout or get data from cache, return True if ressource has 2xx status code, False instead. """
if (cache is not None) and (url in cache):
# try from cache first
self.logger.debug("Got headers for URL '%s' from cache" % (url))
resp_ok, response_headers = pickle.loads(cache[url])
return resp_ok
domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath,
url,
self.min_delay_between_accesses,
jitter_range_ms=self.jitter_range_ms,
logger=self.logger)
resp_ok = True
try:
for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS,
sleeptime=0.5,
max_sleeptime=HTTP_MAX_RETRY_SLEEP_SHORT_S,
sleepscale=1.5),
1):
await domain_rate_watcher.waitAccessAsync()
try:
async with self.session.head(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify) as response:
pass
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
self.logger.warning("Probing '%s' failed (attempt %u/%u): %s %s" % (url,
attempt,
HTTP_MAX_ATTEMPTS,
e.__class__.__qualname__,
e))
if attempt == HTTP_MAX_ATTEMPTS:
resp_ok = False
else:
self.logger.debug("Retrying in %.3fs" % (time_to_sleep))
await asyncio.sleep(time_to_sleep)
else:
response.raise_for_status()
if response_headers is not None:
response_headers.update(response.headers)
break # http retry loop
except aiohttp.ClientResponseError as e:
self.logger.debug("Probing '%s' failed: %s %s" % (url, e.__class__.__qualname__, e))
resp_ok = False
if cache is not None:
# store in cache
cache[url] = pickle.dumps((resp_ok, response_headers))
return resp_ok | python | async def isReachable(self, url, *, headers=None, verify=True, response_headers=None, cache=None):
""" Send a HEAD request with short timeout or get data from cache, return True if ressource has 2xx status code, False instead. """
if (cache is not None) and (url in cache):
# try from cache first
self.logger.debug("Got headers for URL '%s' from cache" % (url))
resp_ok, response_headers = pickle.loads(cache[url])
return resp_ok
domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath,
url,
self.min_delay_between_accesses,
jitter_range_ms=self.jitter_range_ms,
logger=self.logger)
resp_ok = True
try:
for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS,
sleeptime=0.5,
max_sleeptime=HTTP_MAX_RETRY_SLEEP_SHORT_S,
sleepscale=1.5),
1):
await domain_rate_watcher.waitAccessAsync()
try:
async with self.session.head(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify) as response:
pass
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
self.logger.warning("Probing '%s' failed (attempt %u/%u): %s %s" % (url,
attempt,
HTTP_MAX_ATTEMPTS,
e.__class__.__qualname__,
e))
if attempt == HTTP_MAX_ATTEMPTS:
resp_ok = False
else:
self.logger.debug("Retrying in %.3fs" % (time_to_sleep))
await asyncio.sleep(time_to_sleep)
else:
response.raise_for_status()
if response_headers is not None:
response_headers.update(response.headers)
break # http retry loop
except aiohttp.ClientResponseError as e:
self.logger.debug("Probing '%s' failed: %s %s" % (url, e.__class__.__qualname__, e))
resp_ok = False
if cache is not None:
# store in cache
cache[url] = pickle.dumps((resp_ok, response_headers))
return resp_ok | [
"async",
"def",
"isReachable",
"(",
"self",
",",
"url",
",",
"*",
",",
"headers",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"response_headers",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"if",
"(",
"cache",
"is",
"not",
"None",
")",
"and",
"(",
"url",
"in",
"cache",
")",
":",
"# try from cache first",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Got headers for URL '%s' from cache\"",
"%",
"(",
"url",
")",
")",
"resp_ok",
",",
"response_headers",
"=",
"pickle",
".",
"loads",
"(",
"cache",
"[",
"url",
"]",
")",
"return",
"resp_ok",
"domain_rate_watcher",
"=",
"rate_watcher",
".",
"AccessRateWatcher",
"(",
"self",
".",
"watcher_db_filepath",
",",
"url",
",",
"self",
".",
"min_delay_between_accesses",
",",
"jitter_range_ms",
"=",
"self",
".",
"jitter_range_ms",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"resp_ok",
"=",
"True",
"try",
":",
"for",
"attempt",
",",
"time_to_sleep",
"in",
"enumerate",
"(",
"redo",
".",
"retrier",
"(",
"max_attempts",
"=",
"HTTP_MAX_ATTEMPTS",
",",
"sleeptime",
"=",
"0.5",
",",
"max_sleeptime",
"=",
"HTTP_MAX_RETRY_SLEEP_SHORT_S",
",",
"sleepscale",
"=",
"1.5",
")",
",",
"1",
")",
":",
"await",
"domain_rate_watcher",
".",
"waitAccessAsync",
"(",
")",
"try",
":",
"async",
"with",
"self",
".",
"session",
".",
"head",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"_buildHeaders",
"(",
"headers",
")",
",",
"timeout",
"=",
"HTTP_SHORT_TIMEOUT",
",",
"ssl",
"=",
"verify",
")",
"as",
"response",
":",
"pass",
"except",
"(",
"asyncio",
".",
"TimeoutError",
",",
"aiohttp",
".",
"ClientError",
")",
"as",
"e",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Probing '%s' failed (attempt %u/%u): %s %s\"",
"%",
"(",
"url",
",",
"attempt",
",",
"HTTP_MAX_ATTEMPTS",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"if",
"attempt",
"==",
"HTTP_MAX_ATTEMPTS",
":",
"resp_ok",
"=",
"False",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Retrying in %.3fs\"",
"%",
"(",
"time_to_sleep",
")",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"time_to_sleep",
")",
"else",
":",
"response",
".",
"raise_for_status",
"(",
")",
"if",
"response_headers",
"is",
"not",
"None",
":",
"response_headers",
".",
"update",
"(",
"response",
".",
"headers",
")",
"break",
"# http retry loop",
"except",
"aiohttp",
".",
"ClientResponseError",
"as",
"e",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Probing '%s' failed: %s %s\"",
"%",
"(",
"url",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"resp_ok",
"=",
"False",
"if",
"cache",
"is",
"not",
"None",
":",
"# store in cache",
"cache",
"[",
"url",
"]",
"=",
"pickle",
".",
"dumps",
"(",
"(",
"resp_ok",
",",
"response_headers",
")",
")",
"return",
"resp_ok"
] | Send a HEAD request with short timeout or get data from cache, return True if ressource has 2xx status code, False instead. | [
"Send",
"a",
"HEAD",
"request",
"with",
"short",
"timeout",
"or",
"get",
"data",
"from",
"cache",
"return",
"True",
"if",
"ressource",
"has",
"2xx",
"status",
"code",
"False",
"instead",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/http_helpers.py#L130-L187 |
desbma/sacad | sacad/http_helpers.py | Http.fastStreamedQuery | async def fastStreamedQuery(self, url, *, headers=None, verify=True):
""" Send a GET request with short timeout, do not retry, and return streamed response. """
response = await self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify)
response.raise_for_status()
return response | python | async def fastStreamedQuery(self, url, *, headers=None, verify=True):
""" Send a GET request with short timeout, do not retry, and return streamed response. """
response = await self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify)
response.raise_for_status()
return response | [
"async",
"def",
"fastStreamedQuery",
"(",
"self",
",",
"url",
",",
"*",
",",
"headers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"response",
"=",
"await",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"_buildHeaders",
"(",
"headers",
")",
",",
"timeout",
"=",
"HTTP_SHORT_TIMEOUT",
",",
"ssl",
"=",
"verify",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response"
] | Send a GET request with short timeout, do not retry, and return streamed response. | [
"Send",
"a",
"GET",
"request",
"with",
"short",
"timeout",
"do",
"not",
"retry",
"and",
"return",
"streamed",
"response",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/http_helpers.py#L189-L198 |
desbma/sacad | sacad/sources/lastfm.py | LastFmCoverSource.getSearchUrl | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
# build request url
params = collections.OrderedDict()
params["method"] = "album.getinfo"
params["api_key"] = __class__.API_KEY
params["album"] = album
params["artist"] = artist
return __class__.assembleUrl(__class__.BASE_URL, params) | python | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
# build request url
params = collections.OrderedDict()
params["method"] = "album.getinfo"
params["api_key"] = __class__.API_KEY
params["album"] = album
params["artist"] = artist
return __class__.assembleUrl(__class__.BASE_URL, params) | [
"def",
"getSearchUrl",
"(",
"self",
",",
"album",
",",
"artist",
")",
":",
"# build request url",
"params",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"params",
"[",
"\"method\"",
"]",
"=",
"\"album.getinfo\"",
"params",
"[",
"\"api_key\"",
"]",
"=",
"__class__",
".",
"API_KEY",
"params",
"[",
"\"album\"",
"]",
"=",
"album",
"params",
"[",
"\"artist\"",
"]",
"=",
"artist",
"return",
"__class__",
".",
"assembleUrl",
"(",
"__class__",
".",
"BASE_URL",
",",
"params",
")"
] | See CoverSource.getSearchUrl. | [
"See",
"CoverSource",
".",
"getSearchUrl",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/lastfm.py#L36-L45 |
desbma/sacad | sacad/sources/lastfm.py | LastFmCoverSource.processQueryString | def processQueryString(self, s):
""" See CoverSource.processQueryString. """
char_blacklist = set(string.punctuation)
char_blacklist.remove("'")
char_blacklist.remove("&")
char_blacklist = frozenset(char_blacklist)
return __class__.unpunctuate(s.lower(), char_blacklist=char_blacklist) | python | def processQueryString(self, s):
""" See CoverSource.processQueryString. """
char_blacklist = set(string.punctuation)
char_blacklist.remove("'")
char_blacklist.remove("&")
char_blacklist = frozenset(char_blacklist)
return __class__.unpunctuate(s.lower(), char_blacklist=char_blacklist) | [
"def",
"processQueryString",
"(",
"self",
",",
"s",
")",
":",
"char_blacklist",
"=",
"set",
"(",
"string",
".",
"punctuation",
")",
"char_blacklist",
".",
"remove",
"(",
"\"'\"",
")",
"char_blacklist",
".",
"remove",
"(",
"\"&\"",
")",
"char_blacklist",
"=",
"frozenset",
"(",
"char_blacklist",
")",
"return",
"__class__",
".",
"unpunctuate",
"(",
"s",
".",
"lower",
"(",
")",
",",
"char_blacklist",
"=",
"char_blacklist",
")"
] | See CoverSource.processQueryString. | [
"See",
"CoverSource",
".",
"processQueryString",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/lastfm.py#L47-L53 |
desbma/sacad | sacad/sources/lastfm.py | LastFmCoverSource.parseResults | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# get xml results list
xml_text = api_data.decode("utf-8")
xml_root = xml.etree.ElementTree.fromstring(xml_text)
status = xml_root.get("status")
if status != "ok":
raise Exception("Unexpected Last.fm response status: %s" % (status))
img_elements = xml_root.findall("album/image")
# build results from xml
thumbnail_url = None
thumbnail_size = None
for img_element in img_elements:
img_url = img_element.text
if not img_url:
# last.fm returns empty image tag for size it does not have
continue
lfm_size = img_element.get("size")
if lfm_size == "mega":
check_metadata = CoverImageMetadata.SIZE
else:
check_metadata = CoverImageMetadata.NONE
try:
size = __class__.SIZES[lfm_size]
except KeyError:
continue
if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)):
thumbnail_url = img_url
thumbnail_size = size[0]
format = os.path.splitext(img_url)[1][1:].lower()
format = SUPPORTED_IMG_FORMATS[format]
results.append(LastFmCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
check_metadata=check_metadata))
return results | python | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# get xml results list
xml_text = api_data.decode("utf-8")
xml_root = xml.etree.ElementTree.fromstring(xml_text)
status = xml_root.get("status")
if status != "ok":
raise Exception("Unexpected Last.fm response status: %s" % (status))
img_elements = xml_root.findall("album/image")
# build results from xml
thumbnail_url = None
thumbnail_size = None
for img_element in img_elements:
img_url = img_element.text
if not img_url:
# last.fm returns empty image tag for size it does not have
continue
lfm_size = img_element.get("size")
if lfm_size == "mega":
check_metadata = CoverImageMetadata.SIZE
else:
check_metadata = CoverImageMetadata.NONE
try:
size = __class__.SIZES[lfm_size]
except KeyError:
continue
if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)):
thumbnail_url = img_url
thumbnail_size = size[0]
format = os.path.splitext(img_url)[1][1:].lower()
format = SUPPORTED_IMG_FORMATS[format]
results.append(LastFmCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
check_metadata=check_metadata))
return results | [
"async",
"def",
"parseResults",
"(",
"self",
",",
"api_data",
")",
":",
"results",
"=",
"[",
"]",
"# get xml results list",
"xml_text",
"=",
"api_data",
".",
"decode",
"(",
"\"utf-8\"",
")",
"xml_root",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"fromstring",
"(",
"xml_text",
")",
"status",
"=",
"xml_root",
".",
"get",
"(",
"\"status\"",
")",
"if",
"status",
"!=",
"\"ok\"",
":",
"raise",
"Exception",
"(",
"\"Unexpected Last.fm response status: %s\"",
"%",
"(",
"status",
")",
")",
"img_elements",
"=",
"xml_root",
".",
"findall",
"(",
"\"album/image\"",
")",
"# build results from xml",
"thumbnail_url",
"=",
"None",
"thumbnail_size",
"=",
"None",
"for",
"img_element",
"in",
"img_elements",
":",
"img_url",
"=",
"img_element",
".",
"text",
"if",
"not",
"img_url",
":",
"# last.fm returns empty image tag for size it does not have",
"continue",
"lfm_size",
"=",
"img_element",
".",
"get",
"(",
"\"size\"",
")",
"if",
"lfm_size",
"==",
"\"mega\"",
":",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"SIZE",
"else",
":",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"NONE",
"try",
":",
"size",
"=",
"__class__",
".",
"SIZES",
"[",
"lfm_size",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"(",
"size",
"[",
"0",
"]",
"<=",
"MAX_THUMBNAIL_SIZE",
")",
"and",
"(",
"(",
"thumbnail_size",
"is",
"None",
")",
"or",
"(",
"size",
"[",
"0",
"]",
"<",
"thumbnail_size",
")",
")",
":",
"thumbnail_url",
"=",
"img_url",
"thumbnail_size",
"=",
"size",
"[",
"0",
"]",
"format",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"img_url",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"format",
"=",
"SUPPORTED_IMG_FORMATS",
"[",
"format",
"]",
"results",
".",
"append",
"(",
"LastFmCoverSourceResult",
"(",
"img_url",
",",
"size",
",",
"format",
",",
"thumbnail_url",
"=",
"thumbnail_url",
",",
"source",
"=",
"self",
",",
"check_metadata",
"=",
"check_metadata",
")",
")",
"return",
"results"
] | See CoverSource.parseResults. | [
"See",
"CoverSource",
".",
"parseResults",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/lastfm.py#L55-L96 |
desbma/sacad | sacad/__init__.py | search_and_download | async def search_and_download(album, artist, format, size, out_filepath, *, size_tolerance_prct, amazon_tlds, no_lq_sources,
async_loop):
""" Search and download a cover, return True if success, False instead. """
# register sources
source_args = (size, size_tolerance_prct)
cover_sources = [sources.LastFmCoverSource(*source_args),
sources.AmazonCdCoverSource(*source_args),
sources.AmazonDigitalCoverSource(*source_args)]
for tld in amazon_tlds:
cover_sources.append(sources.AmazonCdCoverSource(*source_args, tld=tld))
if not no_lq_sources:
cover_sources.append(sources.GoogleImagesWebScrapeCoverSource(*source_args))
# schedule search work
search_futures = []
for cover_source in cover_sources:
coroutine = cover_source.search(album, artist)
future = asyncio.ensure_future(coroutine, loop=async_loop)
search_futures.append(future)
# wait for it
await asyncio.wait(search_futures, loop=async_loop)
# get results
results = []
for future in search_futures:
source_results = future.result()
results.extend(source_results)
# sort results
results = await CoverSourceResult.preProcessForComparison(results, size, size_tolerance_prct)
results.sort(reverse=True,
key=functools.cmp_to_key(functools.partial(CoverSourceResult.compare,
target_size=size,
size_tolerance_prct=size_tolerance_prct)))
if not results:
logging.getLogger("Main").info("No results")
# download
for result in results:
try:
await result.get(format, size, size_tolerance_prct, out_filepath)
except Exception as e:
logging.getLogger("Main").warning("Download of %s failed: %s %s" % (result,
e.__class__.__qualname__,
e))
continue
else:
return True
return False | python | async def search_and_download(album, artist, format, size, out_filepath, *, size_tolerance_prct, amazon_tlds, no_lq_sources,
async_loop):
""" Search and download a cover, return True if success, False instead. """
# register sources
source_args = (size, size_tolerance_prct)
cover_sources = [sources.LastFmCoverSource(*source_args),
sources.AmazonCdCoverSource(*source_args),
sources.AmazonDigitalCoverSource(*source_args)]
for tld in amazon_tlds:
cover_sources.append(sources.AmazonCdCoverSource(*source_args, tld=tld))
if not no_lq_sources:
cover_sources.append(sources.GoogleImagesWebScrapeCoverSource(*source_args))
# schedule search work
search_futures = []
for cover_source in cover_sources:
coroutine = cover_source.search(album, artist)
future = asyncio.ensure_future(coroutine, loop=async_loop)
search_futures.append(future)
# wait for it
await asyncio.wait(search_futures, loop=async_loop)
# get results
results = []
for future in search_futures:
source_results = future.result()
results.extend(source_results)
# sort results
results = await CoverSourceResult.preProcessForComparison(results, size, size_tolerance_prct)
results.sort(reverse=True,
key=functools.cmp_to_key(functools.partial(CoverSourceResult.compare,
target_size=size,
size_tolerance_prct=size_tolerance_prct)))
if not results:
logging.getLogger("Main").info("No results")
# download
for result in results:
try:
await result.get(format, size, size_tolerance_prct, out_filepath)
except Exception as e:
logging.getLogger("Main").warning("Download of %s failed: %s %s" % (result,
e.__class__.__qualname__,
e))
continue
else:
return True
return False | [
"async",
"def",
"search_and_download",
"(",
"album",
",",
"artist",
",",
"format",
",",
"size",
",",
"out_filepath",
",",
"*",
",",
"size_tolerance_prct",
",",
"amazon_tlds",
",",
"no_lq_sources",
",",
"async_loop",
")",
":",
"# register sources",
"source_args",
"=",
"(",
"size",
",",
"size_tolerance_prct",
")",
"cover_sources",
"=",
"[",
"sources",
".",
"LastFmCoverSource",
"(",
"*",
"source_args",
")",
",",
"sources",
".",
"AmazonCdCoverSource",
"(",
"*",
"source_args",
")",
",",
"sources",
".",
"AmazonDigitalCoverSource",
"(",
"*",
"source_args",
")",
"]",
"for",
"tld",
"in",
"amazon_tlds",
":",
"cover_sources",
".",
"append",
"(",
"sources",
".",
"AmazonCdCoverSource",
"(",
"*",
"source_args",
",",
"tld",
"=",
"tld",
")",
")",
"if",
"not",
"no_lq_sources",
":",
"cover_sources",
".",
"append",
"(",
"sources",
".",
"GoogleImagesWebScrapeCoverSource",
"(",
"*",
"source_args",
")",
")",
"# schedule search work",
"search_futures",
"=",
"[",
"]",
"for",
"cover_source",
"in",
"cover_sources",
":",
"coroutine",
"=",
"cover_source",
".",
"search",
"(",
"album",
",",
"artist",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"coroutine",
",",
"loop",
"=",
"async_loop",
")",
"search_futures",
".",
"append",
"(",
"future",
")",
"# wait for it",
"await",
"asyncio",
".",
"wait",
"(",
"search_futures",
",",
"loop",
"=",
"async_loop",
")",
"# get results",
"results",
"=",
"[",
"]",
"for",
"future",
"in",
"search_futures",
":",
"source_results",
"=",
"future",
".",
"result",
"(",
")",
"results",
".",
"extend",
"(",
"source_results",
")",
"# sort results",
"results",
"=",
"await",
"CoverSourceResult",
".",
"preProcessForComparison",
"(",
"results",
",",
"size",
",",
"size_tolerance_prct",
")",
"results",
".",
"sort",
"(",
"reverse",
"=",
"True",
",",
"key",
"=",
"functools",
".",
"cmp_to_key",
"(",
"functools",
".",
"partial",
"(",
"CoverSourceResult",
".",
"compare",
",",
"target_size",
"=",
"size",
",",
"size_tolerance_prct",
"=",
"size_tolerance_prct",
")",
")",
")",
"if",
"not",
"results",
":",
"logging",
".",
"getLogger",
"(",
"\"Main\"",
")",
".",
"info",
"(",
"\"No results\"",
")",
"# download",
"for",
"result",
"in",
"results",
":",
"try",
":",
"await",
"result",
".",
"get",
"(",
"format",
",",
"size",
",",
"size_tolerance_prct",
",",
"out_filepath",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"\"Main\"",
")",
".",
"warning",
"(",
"\"Download of %s failed: %s %s\"",
"%",
"(",
"result",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"continue",
"else",
":",
"return",
"True",
"return",
"False"
] | Search and download a cover, return True if success, False instead. | [
"Search",
"and",
"download",
"a",
"cover",
"return",
"True",
"if",
"success",
"False",
"instead",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/__init__.py#L20-L70 |
desbma/sacad | sacad/sources/amazondigital.py | AmazonDigitalCoverSource.getSearchUrl | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
url = "%s/search" % (__class__.BASE_URL)
params = collections.OrderedDict()
params["search-alias"] = "digital-music"
params["field-keywords"] = " ".join((artist, album))
params["sort"] = "relevancerank"
return __class__.assembleUrl(url, params) | python | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
url = "%s/search" % (__class__.BASE_URL)
params = collections.OrderedDict()
params["search-alias"] = "digital-music"
params["field-keywords"] = " ".join((artist, album))
params["sort"] = "relevancerank"
return __class__.assembleUrl(url, params) | [
"def",
"getSearchUrl",
"(",
"self",
",",
"album",
",",
"artist",
")",
":",
"url",
"=",
"\"%s/search\"",
"%",
"(",
"__class__",
".",
"BASE_URL",
")",
"params",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"params",
"[",
"\"search-alias\"",
"]",
"=",
"\"digital-music\"",
"params",
"[",
"\"field-keywords\"",
"]",
"=",
"\" \"",
".",
"join",
"(",
"(",
"artist",
",",
"album",
")",
")",
"params",
"[",
"\"sort\"",
"]",
"=",
"\"relevancerank\"",
"return",
"__class__",
".",
"assembleUrl",
"(",
"url",
",",
"params",
")"
] | See CoverSource.getSearchUrl. | [
"See",
"CoverSource",
".",
"getSearchUrl",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/amazondigital.py#L58-L65 |
desbma/sacad | sacad/sources/amazondigital.py | AmazonDigitalCoverSource.parseResults | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("utf-8"), parser)
for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break
for rank, result_node in enumerate(result_nodes, 1):
# get thumbnail & full image url
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0]
thumbnail_url = img_node.get("src")
thumbnail_url = thumbnail_url.replace("Stripe-Prime-Only", "")
url_parts = thumbnail_url.rsplit(".", 2)
img_url = ".".join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
# try to get higher res image...
if self.target_size > size[0]: # ...but only if needed
self.logger.debug("Looking for optimal subimages configuration...")
product_url = __class__.LINK_SELECTOR(result_node)[0].get("href")
product_url = urllib.parse.urlsplit(product_url)
product_id = product_url.path.split("/")[3]
# TODO don't pick up highest res image if user asked less?
for amazon_img_format in AMAZON_DIGITAL_IMAGE_FORMATS:
# TODO review this, it seem to always fail now
self.logger.debug("Trying %u subimages..." % (amazon_img_format.slice_count ** 2))
urls = tuple(self.generateImgUrls(product_id,
__class__.DYNAPI_KEY,
amazon_img_format.id,
amazon_img_format.slice_count))
url_ok = await self.probeUrl(urls[-1])
if not url_ok:
# images at this size are not available
continue
# images at this size are available
img_url = urls
size = (amazon_img_format.total_res,) * 2
break
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonDigitalCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=CoverImageMetadata.SIZE))
return results | python | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("utf-8"), parser)
for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break
for rank, result_node in enumerate(result_nodes, 1):
# get thumbnail & full image url
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0]
thumbnail_url = img_node.get("src")
thumbnail_url = thumbnail_url.replace("Stripe-Prime-Only", "")
url_parts = thumbnail_url.rsplit(".", 2)
img_url = ".".join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
# try to get higher res image...
if self.target_size > size[0]: # ...but only if needed
self.logger.debug("Looking for optimal subimages configuration...")
product_url = __class__.LINK_SELECTOR(result_node)[0].get("href")
product_url = urllib.parse.urlsplit(product_url)
product_id = product_url.path.split("/")[3]
# TODO don't pick up highest res image if user asked less?
for amazon_img_format in AMAZON_DIGITAL_IMAGE_FORMATS:
# TODO review this, it seem to always fail now
self.logger.debug("Trying %u subimages..." % (amazon_img_format.slice_count ** 2))
urls = tuple(self.generateImgUrls(product_id,
__class__.DYNAPI_KEY,
amazon_img_format.id,
amazon_img_format.slice_count))
url_ok = await self.probeUrl(urls[-1])
if not url_ok:
# images at this size are not available
continue
# images at this size are available
img_url = urls
size = (amazon_img_format.total_res,) * 2
break
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonDigitalCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=CoverImageMetadata.SIZE))
return results | [
"async",
"def",
"parseResults",
"(",
"self",
",",
"api_data",
")",
":",
"results",
"=",
"[",
"]",
"# parse page",
"parser",
"=",
"lxml",
".",
"etree",
".",
"HTMLParser",
"(",
")",
"html",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"api_data",
".",
"decode",
"(",
"\"utf-8\"",
")",
",",
"parser",
")",
"for",
"page_struct_version",
",",
"result_selector",
"in",
"enumerate",
"(",
"__class__",
".",
"RESULTS_SELECTORS",
")",
":",
"result_nodes",
"=",
"result_selector",
"(",
"html",
")",
"if",
"result_nodes",
":",
"break",
"for",
"rank",
",",
"result_node",
"in",
"enumerate",
"(",
"result_nodes",
",",
"1",
")",
":",
"# get thumbnail & full image url",
"img_node",
"=",
"__class__",
".",
"IMG_SELECTORS",
"[",
"page_struct_version",
"]",
"(",
"result_node",
")",
"[",
"0",
"]",
"thumbnail_url",
"=",
"img_node",
".",
"get",
"(",
"\"src\"",
")",
"thumbnail_url",
"=",
"thumbnail_url",
".",
"replace",
"(",
"\"Stripe-Prime-Only\"",
",",
"\"\"",
")",
"url_parts",
"=",
"thumbnail_url",
".",
"rsplit",
"(",
"\".\"",
",",
"2",
")",
"img_url",
"=",
"\".\"",
".",
"join",
"(",
"(",
"url_parts",
"[",
"0",
"]",
",",
"url_parts",
"[",
"2",
"]",
")",
")",
"# assume size is fixed",
"size",
"=",
"(",
"500",
",",
"500",
")",
"# try to get higher res image...",
"if",
"self",
".",
"target_size",
">",
"size",
"[",
"0",
"]",
":",
"# ...but only if needed",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Looking for optimal subimages configuration...\"",
")",
"product_url",
"=",
"__class__",
".",
"LINK_SELECTOR",
"(",
"result_node",
")",
"[",
"0",
"]",
".",
"get",
"(",
"\"href\"",
")",
"product_url",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"product_url",
")",
"product_id",
"=",
"product_url",
".",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
"]",
"# TODO don't pick up highest res image if user asked less?",
"for",
"amazon_img_format",
"in",
"AMAZON_DIGITAL_IMAGE_FORMATS",
":",
"# TODO review this, it seem to always fail now",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Trying %u subimages...\"",
"%",
"(",
"amazon_img_format",
".",
"slice_count",
"**",
"2",
")",
")",
"urls",
"=",
"tuple",
"(",
"self",
".",
"generateImgUrls",
"(",
"product_id",
",",
"__class__",
".",
"DYNAPI_KEY",
",",
"amazon_img_format",
".",
"id",
",",
"amazon_img_format",
".",
"slice_count",
")",
")",
"url_ok",
"=",
"await",
"self",
".",
"probeUrl",
"(",
"urls",
"[",
"-",
"1",
"]",
")",
"if",
"not",
"url_ok",
":",
"# images at this size are not available",
"continue",
"# images at this size are available",
"img_url",
"=",
"urls",
"size",
"=",
"(",
"amazon_img_format",
".",
"total_res",
",",
")",
"*",
"2",
"break",
"# assume format is always jpg",
"format",
"=",
"CoverImageFormat",
".",
"JPEG",
"# add result",
"results",
".",
"append",
"(",
"AmazonDigitalCoverSourceResult",
"(",
"img_url",
",",
"size",
",",
"format",
",",
"thumbnail_url",
"=",
"thumbnail_url",
",",
"source",
"=",
"self",
",",
"rank",
"=",
"rank",
",",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"SIZE",
")",
")",
"return",
"results"
] | See CoverSource.parseResults. | [
"See",
"CoverSource",
".",
"parseResults",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/amazondigital.py#L71-L132 |
desbma/sacad | sacad/sources/amazondigital.py | AmazonDigitalCoverSource.generateImgUrls | def generateImgUrls(self, product_id, dynapi_key, format_id, slice_count):
""" Generate URLs for slice_count^2 subimages of a product. """
for x in range(slice_count):
for y in range(slice_count):
yield ("http://z2-ec2.images-amazon.com/R/1/a=" + product_id +
"+c=" + dynapi_key +
"+d=_SCR%28" + str(format_id) + "," + str(x) + "," + str(y) + "%29_=.jpg") | python | def generateImgUrls(self, product_id, dynapi_key, format_id, slice_count):
""" Generate URLs for slice_count^2 subimages of a product. """
for x in range(slice_count):
for y in range(slice_count):
yield ("http://z2-ec2.images-amazon.com/R/1/a=" + product_id +
"+c=" + dynapi_key +
"+d=_SCR%28" + str(format_id) + "," + str(x) + "," + str(y) + "%29_=.jpg") | [
"def",
"generateImgUrls",
"(",
"self",
",",
"product_id",
",",
"dynapi_key",
",",
"format_id",
",",
"slice_count",
")",
":",
"for",
"x",
"in",
"range",
"(",
"slice_count",
")",
":",
"for",
"y",
"in",
"range",
"(",
"slice_count",
")",
":",
"yield",
"(",
"\"http://z2-ec2.images-amazon.com/R/1/a=\"",
"+",
"product_id",
"+",
"\"+c=\"",
"+",
"dynapi_key",
"+",
"\"+d=_SCR%28\"",
"+",
"str",
"(",
"format_id",
")",
"+",
"\",\"",
"+",
"str",
"(",
"x",
")",
"+",
"\",\"",
"+",
"str",
"(",
"y",
")",
"+",
"\"%29_=.jpg\"",
")"
] | Generate URLs for slice_count^2 subimages of a product. | [
"Generate",
"URLs",
"for",
"slice_count^2",
"subimages",
"of",
"a",
"product",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/amazondigital.py#L134-L140 |
desbma/sacad | sacad/redo.py | retrier | def retrier(*, max_attempts, sleeptime, max_sleeptime, sleepscale=1.5, jitter=0.2):
""" Generator yielding time to wait for, after the attempt, if it failed. """
assert(max_attempts > 1)
assert(sleeptime >= 0)
assert(0 <= jitter <= sleeptime)
assert(sleepscale >= 1)
cur_sleeptime = min(max_sleeptime, sleeptime)
for attempt in range(max_attempts):
cur_jitter = random.randint(int(-jitter * 1000), int(jitter * 1000)) / 1000
yield max(0, cur_sleeptime + cur_jitter)
cur_sleeptime = min(max_sleeptime, cur_sleeptime * sleepscale) | python | def retrier(*, max_attempts, sleeptime, max_sleeptime, sleepscale=1.5, jitter=0.2):
""" Generator yielding time to wait for, after the attempt, if it failed. """
assert(max_attempts > 1)
assert(sleeptime >= 0)
assert(0 <= jitter <= sleeptime)
assert(sleepscale >= 1)
cur_sleeptime = min(max_sleeptime, sleeptime)
for attempt in range(max_attempts):
cur_jitter = random.randint(int(-jitter * 1000), int(jitter * 1000)) / 1000
yield max(0, cur_sleeptime + cur_jitter)
cur_sleeptime = min(max_sleeptime, cur_sleeptime * sleepscale) | [
"def",
"retrier",
"(",
"*",
",",
"max_attempts",
",",
"sleeptime",
",",
"max_sleeptime",
",",
"sleepscale",
"=",
"1.5",
",",
"jitter",
"=",
"0.2",
")",
":",
"assert",
"(",
"max_attempts",
">",
"1",
")",
"assert",
"(",
"sleeptime",
">=",
"0",
")",
"assert",
"(",
"0",
"<=",
"jitter",
"<=",
"sleeptime",
")",
"assert",
"(",
"sleepscale",
">=",
"1",
")",
"cur_sleeptime",
"=",
"min",
"(",
"max_sleeptime",
",",
"sleeptime",
")",
"for",
"attempt",
"in",
"range",
"(",
"max_attempts",
")",
":",
"cur_jitter",
"=",
"random",
".",
"randint",
"(",
"int",
"(",
"-",
"jitter",
"*",
"1000",
")",
",",
"int",
"(",
"jitter",
"*",
"1000",
")",
")",
"/",
"1000",
"yield",
"max",
"(",
"0",
",",
"cur_sleeptime",
"+",
"cur_jitter",
")",
"cur_sleeptime",
"=",
"min",
"(",
"max_sleeptime",
",",
"cur_sleeptime",
"*",
"sleepscale",
")"
] | Generator yielding time to wait for, after the attempt, if it failed. | [
"Generator",
"yielding",
"time",
"to",
"wait",
"for",
"after",
"the",
"attempt",
"if",
"it",
"failed",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/redo.py#L6-L18 |
desbma/sacad | sacad/cover.py | CoverSourceResult.get | async def get(self, target_format, target_size, size_tolerance_prct, out_filepath):
""" Download cover and process it. """
if self.source_quality.value <= CoverSourceQuality.LOW.value:
logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search")
images_data = []
for i, url in enumerate(self.urls):
# download
logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls)))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, self.format)
store_in_cache_callback, image_data = await self.source.http.query(url,
headers=headers,
verify=False,
cache=__class__.image_cache,
pre_cache_callback=pre_cache_callback)
# store immediately in cache
await store_in_cache_callback()
# append for multi images
images_data.append(image_data)
need_format_change = (self.format != target_format)
need_size_change = ((max(self.size) > target_size) and
(abs(max(self.size) - target_size) >
target_size * size_tolerance_prct / 100))
need_join = len(images_data) > 1
if need_join or need_format_change or need_size_change:
# post process
image_data = self.postProcess(images_data,
target_format if need_format_change else None,
target_size if need_size_change else None)
# crunch image again
image_data = await __class__.crunch(image_data, target_format)
# write it
with open(out_filepath, "wb") as file:
file.write(image_data) | python | async def get(self, target_format, target_size, size_tolerance_prct, out_filepath):
""" Download cover and process it. """
if self.source_quality.value <= CoverSourceQuality.LOW.value:
logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search")
images_data = []
for i, url in enumerate(self.urls):
# download
logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls)))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, self.format)
store_in_cache_callback, image_data = await self.source.http.query(url,
headers=headers,
verify=False,
cache=__class__.image_cache,
pre_cache_callback=pre_cache_callback)
# store immediately in cache
await store_in_cache_callback()
# append for multi images
images_data.append(image_data)
need_format_change = (self.format != target_format)
need_size_change = ((max(self.size) > target_size) and
(abs(max(self.size) - target_size) >
target_size * size_tolerance_prct / 100))
need_join = len(images_data) > 1
if need_join or need_format_change or need_size_change:
# post process
image_data = self.postProcess(images_data,
target_format if need_format_change else None,
target_size if need_size_change else None)
# crunch image again
image_data = await __class__.crunch(image_data, target_format)
# write it
with open(out_filepath, "wb") as file:
file.write(image_data) | [
"async",
"def",
"get",
"(",
"self",
",",
"target_format",
",",
"target_size",
",",
"size_tolerance_prct",
",",
"out_filepath",
")",
":",
"if",
"self",
".",
"source_quality",
".",
"value",
"<=",
"CoverSourceQuality",
".",
"LOW",
".",
"value",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Cover is from a potentially unreliable source and may be unrelated to the search\"",
")",
"images_data",
"=",
"[",
"]",
"for",
"i",
",",
"url",
"in",
"enumerate",
"(",
"self",
".",
"urls",
")",
":",
"# download",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Downloading cover '%s' (part %u/%u)...\"",
"%",
"(",
"url",
",",
"i",
"+",
"1",
",",
"len",
"(",
"self",
".",
"urls",
")",
")",
")",
"headers",
"=",
"{",
"}",
"self",
".",
"source",
".",
"updateHttpHeaders",
"(",
"headers",
")",
"async",
"def",
"pre_cache_callback",
"(",
"img_data",
")",
":",
"return",
"await",
"__class__",
".",
"crunch",
"(",
"img_data",
",",
"self",
".",
"format",
")",
"store_in_cache_callback",
",",
"image_data",
"=",
"await",
"self",
".",
"source",
".",
"http",
".",
"query",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"verify",
"=",
"False",
",",
"cache",
"=",
"__class__",
".",
"image_cache",
",",
"pre_cache_callback",
"=",
"pre_cache_callback",
")",
"# store immediately in cache",
"await",
"store_in_cache_callback",
"(",
")",
"# append for multi images",
"images_data",
".",
"append",
"(",
"image_data",
")",
"need_format_change",
"=",
"(",
"self",
".",
"format",
"!=",
"target_format",
")",
"need_size_change",
"=",
"(",
"(",
"max",
"(",
"self",
".",
"size",
")",
">",
"target_size",
")",
"and",
"(",
"abs",
"(",
"max",
"(",
"self",
".",
"size",
")",
"-",
"target_size",
")",
">",
"target_size",
"*",
"size_tolerance_prct",
"/",
"100",
")",
")",
"need_join",
"=",
"len",
"(",
"images_data",
")",
">",
"1",
"if",
"need_join",
"or",
"need_format_change",
"or",
"need_size_change",
":",
"# post process",
"image_data",
"=",
"self",
".",
"postProcess",
"(",
"images_data",
",",
"target_format",
"if",
"need_format_change",
"else",
"None",
",",
"target_size",
"if",
"need_size_change",
"else",
"None",
")",
"# crunch image again",
"image_data",
"=",
"await",
"__class__",
".",
"crunch",
"(",
"image_data",
",",
"target_format",
")",
"# write it",
"with",
"open",
"(",
"out_filepath",
",",
"\"wb\"",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"image_data",
")"
] | Download cover and process it. | [
"Download",
"cover",
"and",
"process",
"it",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L114-L157 |
desbma/sacad | sacad/cover.py | CoverSourceResult.postProcess | def postProcess(self, images_data, new_format, new_size):
""" Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. """
if len(images_data) == 1:
in_bytes = io.BytesIO(images_data[0])
img = PIL.Image.open(in_bytes)
if img.mode != "RGB":
img = img.convert("RGB")
else:
# images need to be joined before further processing
logging.getLogger("Cover").info("Joining %u images..." % (len(images_data)))
# TODO find a way to do this losslessly for JPEG
new_img = PIL.Image.new("RGB", self.size)
assert(is_square(len(images_data)))
sq = int(math.sqrt(len(images_data)))
images_data_it = iter(images_data)
img_sizes = {}
for x in range(sq):
for y in range(sq):
current_image_data = next(images_data_it)
img_stream = io.BytesIO(current_image_data)
img = PIL.Image.open(img_stream)
img_sizes[(x, y)] = img.size
box = [0, 0]
if x > 0:
for px in range(x):
box[0] += img_sizes[(px, y)][0]
if y > 0:
for py in range(y):
box[1] += img_sizes[(x, py)][1]
box.extend((box[0] + img.size[0], box[1] + img.size[1]))
new_img.paste(img, box=tuple(box))
img = new_img
out_bytes = io.BytesIO()
if new_size is not None:
logging.getLogger("Cover").info("Resizing from %ux%u to %ux%u..." % (self.size[0], self.size[1], new_size, new_size))
img = img.resize((new_size, new_size), PIL.Image.LANCZOS)
# apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02)
# we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over
# parameters
unsharper = PIL.ImageFilter.UnsharpMask(radius=1.5, percent=70, threshold=5)
img = img.filter(unsharper)
if new_format is not None:
logging.getLogger("Cover").info("Converting to %s..." % (new_format.name.upper()))
target_format = new_format
else:
target_format = self.format
img.save(out_bytes,
format=target_format.name,
quality=90,
optimize=True)
return out_bytes.getvalue() | python | def postProcess(self, images_data, new_format, new_size):
""" Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. """
if len(images_data) == 1:
in_bytes = io.BytesIO(images_data[0])
img = PIL.Image.open(in_bytes)
if img.mode != "RGB":
img = img.convert("RGB")
else:
# images need to be joined before further processing
logging.getLogger("Cover").info("Joining %u images..." % (len(images_data)))
# TODO find a way to do this losslessly for JPEG
new_img = PIL.Image.new("RGB", self.size)
assert(is_square(len(images_data)))
sq = int(math.sqrt(len(images_data)))
images_data_it = iter(images_data)
img_sizes = {}
for x in range(sq):
for y in range(sq):
current_image_data = next(images_data_it)
img_stream = io.BytesIO(current_image_data)
img = PIL.Image.open(img_stream)
img_sizes[(x, y)] = img.size
box = [0, 0]
if x > 0:
for px in range(x):
box[0] += img_sizes[(px, y)][0]
if y > 0:
for py in range(y):
box[1] += img_sizes[(x, py)][1]
box.extend((box[0] + img.size[0], box[1] + img.size[1]))
new_img.paste(img, box=tuple(box))
img = new_img
out_bytes = io.BytesIO()
if new_size is not None:
logging.getLogger("Cover").info("Resizing from %ux%u to %ux%u..." % (self.size[0], self.size[1], new_size, new_size))
img = img.resize((new_size, new_size), PIL.Image.LANCZOS)
# apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02)
# we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over
# parameters
unsharper = PIL.ImageFilter.UnsharpMask(radius=1.5, percent=70, threshold=5)
img = img.filter(unsharper)
if new_format is not None:
logging.getLogger("Cover").info("Converting to %s..." % (new_format.name.upper()))
target_format = new_format
else:
target_format = self.format
img.save(out_bytes,
format=target_format.name,
quality=90,
optimize=True)
return out_bytes.getvalue() | [
"def",
"postProcess",
"(",
"self",
",",
"images_data",
",",
"new_format",
",",
"new_size",
")",
":",
"if",
"len",
"(",
"images_data",
")",
"==",
"1",
":",
"in_bytes",
"=",
"io",
".",
"BytesIO",
"(",
"images_data",
"[",
"0",
"]",
")",
"img",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"in_bytes",
")",
"if",
"img",
".",
"mode",
"!=",
"\"RGB\"",
":",
"img",
"=",
"img",
".",
"convert",
"(",
"\"RGB\"",
")",
"else",
":",
"# images need to be joined before further processing",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Joining %u images...\"",
"%",
"(",
"len",
"(",
"images_data",
")",
")",
")",
"# TODO find a way to do this losslessly for JPEG",
"new_img",
"=",
"PIL",
".",
"Image",
".",
"new",
"(",
"\"RGB\"",
",",
"self",
".",
"size",
")",
"assert",
"(",
"is_square",
"(",
"len",
"(",
"images_data",
")",
")",
")",
"sq",
"=",
"int",
"(",
"math",
".",
"sqrt",
"(",
"len",
"(",
"images_data",
")",
")",
")",
"images_data_it",
"=",
"iter",
"(",
"images_data",
")",
"img_sizes",
"=",
"{",
"}",
"for",
"x",
"in",
"range",
"(",
"sq",
")",
":",
"for",
"y",
"in",
"range",
"(",
"sq",
")",
":",
"current_image_data",
"=",
"next",
"(",
"images_data_it",
")",
"img_stream",
"=",
"io",
".",
"BytesIO",
"(",
"current_image_data",
")",
"img",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"img_stream",
")",
"img_sizes",
"[",
"(",
"x",
",",
"y",
")",
"]",
"=",
"img",
".",
"size",
"box",
"=",
"[",
"0",
",",
"0",
"]",
"if",
"x",
">",
"0",
":",
"for",
"px",
"in",
"range",
"(",
"x",
")",
":",
"box",
"[",
"0",
"]",
"+=",
"img_sizes",
"[",
"(",
"px",
",",
"y",
")",
"]",
"[",
"0",
"]",
"if",
"y",
">",
"0",
":",
"for",
"py",
"in",
"range",
"(",
"y",
")",
":",
"box",
"[",
"1",
"]",
"+=",
"img_sizes",
"[",
"(",
"x",
",",
"py",
")",
"]",
"[",
"1",
"]",
"box",
".",
"extend",
"(",
"(",
"box",
"[",
"0",
"]",
"+",
"img",
".",
"size",
"[",
"0",
"]",
",",
"box",
"[",
"1",
"]",
"+",
"img",
".",
"size",
"[",
"1",
"]",
")",
")",
"new_img",
".",
"paste",
"(",
"img",
",",
"box",
"=",
"tuple",
"(",
"box",
")",
")",
"img",
"=",
"new_img",
"out_bytes",
"=",
"io",
".",
"BytesIO",
"(",
")",
"if",
"new_size",
"is",
"not",
"None",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Resizing from %ux%u to %ux%u...\"",
"%",
"(",
"self",
".",
"size",
"[",
"0",
"]",
",",
"self",
".",
"size",
"[",
"1",
"]",
",",
"new_size",
",",
"new_size",
")",
")",
"img",
"=",
"img",
".",
"resize",
"(",
"(",
"new_size",
",",
"new_size",
")",
",",
"PIL",
".",
"Image",
".",
"LANCZOS",
")",
"# apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02)",
"# we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over",
"# parameters",
"unsharper",
"=",
"PIL",
".",
"ImageFilter",
".",
"UnsharpMask",
"(",
"radius",
"=",
"1.5",
",",
"percent",
"=",
"70",
",",
"threshold",
"=",
"5",
")",
"img",
"=",
"img",
".",
"filter",
"(",
"unsharper",
")",
"if",
"new_format",
"is",
"not",
"None",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Converting to %s...\"",
"%",
"(",
"new_format",
".",
"name",
".",
"upper",
"(",
")",
")",
")",
"target_format",
"=",
"new_format",
"else",
":",
"target_format",
"=",
"self",
".",
"format",
"img",
".",
"save",
"(",
"out_bytes",
",",
"format",
"=",
"target_format",
".",
"name",
",",
"quality",
"=",
"90",
",",
"optimize",
"=",
"True",
")",
"return",
"out_bytes",
".",
"getvalue",
"(",
")"
] | Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. | [
"Convert",
"image",
"binary",
"data",
"to",
"a",
"target",
"format",
"and",
"/",
"or",
"size",
"(",
"None",
"if",
"no",
"conversion",
"needed",
")",
"and",
"return",
"the",
"processed",
"data",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L159-L212 |
desbma/sacad | sacad/cover.py | CoverSourceResult.updateImageMetadata | async def updateImageMetadata(self):
""" Partially download image file(s) to get its real metadata, or get it from cache. """
assert(self.needMetadataUpdate())
width_sum, height_sum = 0, 0
# only download metadata for the needed images to get full size
idxs = []
assert(is_square(len(self.urls)))
sq = int(math.sqrt(len(self.urls)))
for x in range(sq):
for y in range(sq):
if x == y:
idxs.append((x * sq + y, x, y))
for idx, x, y in idxs:
url = self.urls[idx]
format, width, height = None, None, None
try:
format, width, height = pickle.loads(__class__.metadata_cache[url])
except KeyError:
# cache miss
pass
except Exception as e:
logging.getLogger("Cover").warning("Unable to load metadata for URL '%s' from cache: %s %s" % (url,
e.__class__.__qualname__,
e))
else:
# cache hit
logging.getLogger("Cover").debug("Got metadata for URL '%s' from cache" % (url))
if format is not None:
self.setFormatMetadata(format)
if (self.needMetadataUpdate(CoverImageMetadata.FORMAT) or
(self.needMetadataUpdate(CoverImageMetadata.SIZE) and ((width is None) or (height is None)))):
# download
logging.getLogger("Cover").debug("Downloading file header for URL '%s'..." % (url))
try:
headers = {}
self.source.updateHttpHeaders(headers)
response = await self.source.http.fastStreamedQuery(url,
headers=headers,
verify=False)
try:
if self.needMetadataUpdate(CoverImageMetadata.FORMAT):
# try to get format from response
format = __class__.guessImageFormatFromHttpResponse(response)
if format is not None:
self.setFormatMetadata(format)
if self.needMetadataUpdate():
# try to get metadata from HTTP data
metadata = await __class__.guessImageMetadataFromHttpData(response)
if metadata is not None:
format, width, height = metadata
if format is not None:
self.setFormatMetadata(format)
finally:
response.release()
except Exception as e:
logging.getLogger("Cover").warning("Failed to get file metadata for URL '%s' "
"(%s %s)" % (url,
e.__class__.__qualname__,
e))
if self.needMetadataUpdate(): # did we fail to get needed metadata at this point?
if ((self.format is None) or
((self.size is None) and ((width is None) or (height is None)))):
# if we get here, file is probably not reachable, or not even an image
logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', "
"skipping this result" % (url))
return
if ((self.format is not None) and
((self.size is not None) and (width is None) and (height is None))):
logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', "
"falling back to API data" % (url))
self.check_metadata = CoverImageMetadata.NONE
self.reliable_metadata = False
return
# save it to cache
__class__.metadata_cache[url] = pickle.dumps((format, width, height))
# sum sizes
if (width is not None) and (height is not None):
width_sum += width
height_sum += height
if self.needMetadataUpdate(CoverImageMetadata.SIZE) and (width_sum > 0) and (height_sum > 0):
self.setSizeMetadata((width_sum, height_sum)) | python | async def updateImageMetadata(self):
""" Partially download image file(s) to get its real metadata, or get it from cache. """
assert(self.needMetadataUpdate())
width_sum, height_sum = 0, 0
# only download metadata for the needed images to get full size
idxs = []
assert(is_square(len(self.urls)))
sq = int(math.sqrt(len(self.urls)))
for x in range(sq):
for y in range(sq):
if x == y:
idxs.append((x * sq + y, x, y))
for idx, x, y in idxs:
url = self.urls[idx]
format, width, height = None, None, None
try:
format, width, height = pickle.loads(__class__.metadata_cache[url])
except KeyError:
# cache miss
pass
except Exception as e:
logging.getLogger("Cover").warning("Unable to load metadata for URL '%s' from cache: %s %s" % (url,
e.__class__.__qualname__,
e))
else:
# cache hit
logging.getLogger("Cover").debug("Got metadata for URL '%s' from cache" % (url))
if format is not None:
self.setFormatMetadata(format)
if (self.needMetadataUpdate(CoverImageMetadata.FORMAT) or
(self.needMetadataUpdate(CoverImageMetadata.SIZE) and ((width is None) or (height is None)))):
# download
logging.getLogger("Cover").debug("Downloading file header for URL '%s'..." % (url))
try:
headers = {}
self.source.updateHttpHeaders(headers)
response = await self.source.http.fastStreamedQuery(url,
headers=headers,
verify=False)
try:
if self.needMetadataUpdate(CoverImageMetadata.FORMAT):
# try to get format from response
format = __class__.guessImageFormatFromHttpResponse(response)
if format is not None:
self.setFormatMetadata(format)
if self.needMetadataUpdate():
# try to get metadata from HTTP data
metadata = await __class__.guessImageMetadataFromHttpData(response)
if metadata is not None:
format, width, height = metadata
if format is not None:
self.setFormatMetadata(format)
finally:
response.release()
except Exception as e:
logging.getLogger("Cover").warning("Failed to get file metadata for URL '%s' "
"(%s %s)" % (url,
e.__class__.__qualname__,
e))
if self.needMetadataUpdate(): # did we fail to get needed metadata at this point?
if ((self.format is None) or
((self.size is None) and ((width is None) or (height is None)))):
# if we get here, file is probably not reachable, or not even an image
logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', "
"skipping this result" % (url))
return
if ((self.format is not None) and
((self.size is not None) and (width is None) and (height is None))):
logging.getLogger("Cover").debug("Unable to get file metadata from file or HTTP headers for URL '%s', "
"falling back to API data" % (url))
self.check_metadata = CoverImageMetadata.NONE
self.reliable_metadata = False
return
# save it to cache
__class__.metadata_cache[url] = pickle.dumps((format, width, height))
# sum sizes
if (width is not None) and (height is not None):
width_sum += width
height_sum += height
if self.needMetadataUpdate(CoverImageMetadata.SIZE) and (width_sum > 0) and (height_sum > 0):
self.setSizeMetadata((width_sum, height_sum)) | [
"async",
"def",
"updateImageMetadata",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"needMetadataUpdate",
"(",
")",
")",
"width_sum",
",",
"height_sum",
"=",
"0",
",",
"0",
"# only download metadata for the needed images to get full size",
"idxs",
"=",
"[",
"]",
"assert",
"(",
"is_square",
"(",
"len",
"(",
"self",
".",
"urls",
")",
")",
")",
"sq",
"=",
"int",
"(",
"math",
".",
"sqrt",
"(",
"len",
"(",
"self",
".",
"urls",
")",
")",
")",
"for",
"x",
"in",
"range",
"(",
"sq",
")",
":",
"for",
"y",
"in",
"range",
"(",
"sq",
")",
":",
"if",
"x",
"==",
"y",
":",
"idxs",
".",
"append",
"(",
"(",
"x",
"*",
"sq",
"+",
"y",
",",
"x",
",",
"y",
")",
")",
"for",
"idx",
",",
"x",
",",
"y",
"in",
"idxs",
":",
"url",
"=",
"self",
".",
"urls",
"[",
"idx",
"]",
"format",
",",
"width",
",",
"height",
"=",
"None",
",",
"None",
",",
"None",
"try",
":",
"format",
",",
"width",
",",
"height",
"=",
"pickle",
".",
"loads",
"(",
"__class__",
".",
"metadata_cache",
"[",
"url",
"]",
")",
"except",
"KeyError",
":",
"# cache miss",
"pass",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Unable to load metadata for URL '%s' from cache: %s %s\"",
"%",
"(",
"url",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"else",
":",
"# cache hit",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Got metadata for URL '%s' from cache\"",
"%",
"(",
"url",
")",
")",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"setFormatMetadata",
"(",
"format",
")",
"if",
"(",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"FORMAT",
")",
"or",
"(",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"SIZE",
")",
"and",
"(",
"(",
"width",
"is",
"None",
")",
"or",
"(",
"height",
"is",
"None",
")",
")",
")",
")",
":",
"# download",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Downloading file header for URL '%s'...\"",
"%",
"(",
"url",
")",
")",
"try",
":",
"headers",
"=",
"{",
"}",
"self",
".",
"source",
".",
"updateHttpHeaders",
"(",
"headers",
")",
"response",
"=",
"await",
"self",
".",
"source",
".",
"http",
".",
"fastStreamedQuery",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"verify",
"=",
"False",
")",
"try",
":",
"if",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"FORMAT",
")",
":",
"# try to get format from response",
"format",
"=",
"__class__",
".",
"guessImageFormatFromHttpResponse",
"(",
"response",
")",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"setFormatMetadata",
"(",
"format",
")",
"if",
"self",
".",
"needMetadataUpdate",
"(",
")",
":",
"# try to get metadata from HTTP data",
"metadata",
"=",
"await",
"__class__",
".",
"guessImageMetadataFromHttpData",
"(",
"response",
")",
"if",
"metadata",
"is",
"not",
"None",
":",
"format",
",",
"width",
",",
"height",
"=",
"metadata",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"setFormatMetadata",
"(",
"format",
")",
"finally",
":",
"response",
".",
"release",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Failed to get file metadata for URL '%s' \"",
"\"(%s %s)\"",
"%",
"(",
"url",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"if",
"self",
".",
"needMetadataUpdate",
"(",
")",
":",
"# did we fail to get needed metadata at this point?",
"if",
"(",
"(",
"self",
".",
"format",
"is",
"None",
")",
"or",
"(",
"(",
"self",
".",
"size",
"is",
"None",
")",
"and",
"(",
"(",
"width",
"is",
"None",
")",
"or",
"(",
"height",
"is",
"None",
")",
")",
")",
")",
":",
"# if we get here, file is probably not reachable, or not even an image",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Unable to get file metadata from file or HTTP headers for URL '%s', \"",
"\"skipping this result\"",
"%",
"(",
"url",
")",
")",
"return",
"if",
"(",
"(",
"self",
".",
"format",
"is",
"not",
"None",
")",
"and",
"(",
"(",
"self",
".",
"size",
"is",
"not",
"None",
")",
"and",
"(",
"width",
"is",
"None",
")",
"and",
"(",
"height",
"is",
"None",
")",
")",
")",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Unable to get file metadata from file or HTTP headers for URL '%s', \"",
"\"falling back to API data\"",
"%",
"(",
"url",
")",
")",
"self",
".",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"NONE",
"self",
".",
"reliable_metadata",
"=",
"False",
"return",
"# save it to cache",
"__class__",
".",
"metadata_cache",
"[",
"url",
"]",
"=",
"pickle",
".",
"dumps",
"(",
"(",
"format",
",",
"width",
",",
"height",
")",
")",
"# sum sizes",
"if",
"(",
"width",
"is",
"not",
"None",
")",
"and",
"(",
"height",
"is",
"not",
"None",
")",
":",
"width_sum",
"+=",
"width",
"height_sum",
"+=",
"height",
"if",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"SIZE",
")",
"and",
"(",
"width_sum",
">",
"0",
")",
"and",
"(",
"height_sum",
">",
"0",
")",
":",
"self",
".",
"setSizeMetadata",
"(",
"(",
"width_sum",
",",
"height_sum",
")",
")"
] | Partially download image file(s) to get its real metadata, or get it from cache. | [
"Partially",
"download",
"image",
"file",
"(",
"s",
")",
"to",
"get",
"its",
"real",
"metadata",
"or",
"get",
"it",
"from",
"cache",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L214-L307 |
desbma/sacad | sacad/cover.py | CoverSourceResult.setFormatMetadata | def setFormatMetadata(self, format):
""" Set format image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.FORMAT)) or
(self.format is format))
self.format = format
self.check_metadata &= ~CoverImageMetadata.FORMAT | python | def setFormatMetadata(self, format):
""" Set format image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.FORMAT)) or
(self.format is format))
self.format = format
self.check_metadata &= ~CoverImageMetadata.FORMAT | [
"def",
"setFormatMetadata",
"(",
"self",
",",
"format",
")",
":",
"assert",
"(",
"(",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"FORMAT",
")",
")",
"or",
"(",
"self",
".",
"format",
"is",
"format",
")",
")",
"self",
".",
"format",
"=",
"format",
"self",
".",
"check_metadata",
"&=",
"~",
"CoverImageMetadata",
".",
"FORMAT"
] | Set format image metadata to what has been reliably identified. | [
"Set",
"format",
"image",
"metadata",
"to",
"what",
"has",
"been",
"reliably",
"identified",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L313-L318 |
desbma/sacad | sacad/cover.py | CoverSourceResult.setSizeMetadata | def setSizeMetadata(self, size):
""" Set size image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.SIZE)) or
(self.size == size))
self.size = size
self.check_metadata &= ~CoverImageMetadata.SIZE | python | def setSizeMetadata(self, size):
""" Set size image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.SIZE)) or
(self.size == size))
self.size = size
self.check_metadata &= ~CoverImageMetadata.SIZE | [
"def",
"setSizeMetadata",
"(",
"self",
",",
"size",
")",
":",
"assert",
"(",
"(",
"self",
".",
"needMetadataUpdate",
"(",
"CoverImageMetadata",
".",
"SIZE",
")",
")",
"or",
"(",
"self",
".",
"size",
"==",
"size",
")",
")",
"self",
".",
"size",
"=",
"size",
"self",
".",
"check_metadata",
"&=",
"~",
"CoverImageMetadata",
".",
"SIZE"
] | Set size image metadata to what has been reliably identified. | [
"Set",
"size",
"image",
"metadata",
"to",
"what",
"has",
"been",
"reliably",
"identified",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L320-L325 |
desbma/sacad | sacad/cover.py | CoverSourceResult.updateSignature | async def updateSignature(self):
""" Calculate a cover's "signature" using its thumbnail url. """
assert(self.thumbnail_sig is None)
if self.thumbnail_url is None:
logging.getLogger("Cover").warning("No thumbnail available for %s" % (self))
return
# download
logging.getLogger("Cover").debug("Downloading cover thumbnail '%s'..." % (self.thumbnail_url))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, CoverImageFormat.JPEG, silent=True)
try:
store_in_cache_callback, image_data = await self.source.http.query(self.thumbnail_url,
cache=__class__.image_cache,
headers=headers,
pre_cache_callback=pre_cache_callback)
except Exception as e:
logging.getLogger("Cover").warning("Download of '%s' failed: %s %s" % (self.thumbnail_url,
e.__class__.__qualname__,
e))
return
# compute sig
logging.getLogger("Cover").debug("Computing signature of %s..." % (self))
try:
self.thumbnail_sig = __class__.computeImgSignature(image_data)
except Exception as e:
logging.getLogger("Cover").warning("Failed to compute signature of '%s': %s %s" % (self,
e.__class__.__qualname__,
e))
else:
await store_in_cache_callback() | python | async def updateSignature(self):
""" Calculate a cover's "signature" using its thumbnail url. """
assert(self.thumbnail_sig is None)
if self.thumbnail_url is None:
logging.getLogger("Cover").warning("No thumbnail available for %s" % (self))
return
# download
logging.getLogger("Cover").debug("Downloading cover thumbnail '%s'..." % (self.thumbnail_url))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, CoverImageFormat.JPEG, silent=True)
try:
store_in_cache_callback, image_data = await self.source.http.query(self.thumbnail_url,
cache=__class__.image_cache,
headers=headers,
pre_cache_callback=pre_cache_callback)
except Exception as e:
logging.getLogger("Cover").warning("Download of '%s' failed: %s %s" % (self.thumbnail_url,
e.__class__.__qualname__,
e))
return
# compute sig
logging.getLogger("Cover").debug("Computing signature of %s..." % (self))
try:
self.thumbnail_sig = __class__.computeImgSignature(image_data)
except Exception as e:
logging.getLogger("Cover").warning("Failed to compute signature of '%s': %s %s" % (self,
e.__class__.__qualname__,
e))
else:
await store_in_cache_callback() | [
"async",
"def",
"updateSignature",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"thumbnail_sig",
"is",
"None",
")",
"if",
"self",
".",
"thumbnail_url",
"is",
"None",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"No thumbnail available for %s\"",
"%",
"(",
"self",
")",
")",
"return",
"# download",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Downloading cover thumbnail '%s'...\"",
"%",
"(",
"self",
".",
"thumbnail_url",
")",
")",
"headers",
"=",
"{",
"}",
"self",
".",
"source",
".",
"updateHttpHeaders",
"(",
"headers",
")",
"async",
"def",
"pre_cache_callback",
"(",
"img_data",
")",
":",
"return",
"await",
"__class__",
".",
"crunch",
"(",
"img_data",
",",
"CoverImageFormat",
".",
"JPEG",
",",
"silent",
"=",
"True",
")",
"try",
":",
"store_in_cache_callback",
",",
"image_data",
"=",
"await",
"self",
".",
"source",
".",
"http",
".",
"query",
"(",
"self",
".",
"thumbnail_url",
",",
"cache",
"=",
"__class__",
".",
"image_cache",
",",
"headers",
"=",
"headers",
",",
"pre_cache_callback",
"=",
"pre_cache_callback",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Download of '%s' failed: %s %s\"",
"%",
"(",
"self",
".",
"thumbnail_url",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"return",
"# compute sig",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Computing signature of %s...\"",
"%",
"(",
"self",
")",
")",
"try",
":",
"self",
".",
"thumbnail_sig",
"=",
"__class__",
".",
"computeImgSignature",
"(",
"image_data",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Failed to compute signature of '%s': %s %s\"",
"%",
"(",
"self",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"else",
":",
"await",
"store_in_cache_callback",
"(",
")"
] | Calculate a cover's "signature" using its thumbnail url. | [
"Calculate",
"a",
"cover",
"s",
"signature",
"using",
"its",
"thumbnail",
"url",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L327-L363 |
desbma/sacad | sacad/cover.py | CoverSourceResult.compare | def compare(first, second, *, target_size, size_tolerance_prct):
"""
Compare cover relevance/quality.
Return -1 if first is a worst match than second, 1 otherwise, or 0 if cover can't be discriminated.
This code is responsible for comparing two cover results to identify the best one, and is used to sort all results.
It is probably the most important piece of code of this tool.
Covers with sizes under the target size (+- configured tolerance) are excluded before comparison.
The following factors are used in order:
1. Prefer approximately square covers
2. Prefer covers similar to the reference cover
3. Prefer size above target size
4. If both below target size, prefer closest
5. Prefer covers of most reliable source
6. Prefer best ranked cover
7. Prefer covers with reliable metadata
If all previous factors do not allow sorting of two results (very unlikely):
8. Prefer covers with less images to join
9. Prefer covers having the target size
10. Prefer PNG covers
11. Prefer exactly square covers
We don't overload the __lt__ operator because we need to pass the target_size parameter.
"""
for c in (first, second):
assert(c.format is not None)
assert(isinstance(c.size[0], int) and isinstance(c.size[1], int))
# prefer square covers #1
delta_ratio1 = abs(first.size[0] / first.size[1] - 1)
delta_ratio2 = abs(second.size[0] / second.size[1] - 1)
if abs(delta_ratio1 - delta_ratio2) > 0.15:
return -1 if (delta_ratio1 > delta_ratio2) else 1
# prefer similar to reference
sr1 = first.is_similar_to_reference
sr2 = second.is_similar_to_reference
if sr1 and (not sr2):
return 1
if (not sr1) and sr2:
return -1
# prefer size above preferred
delta_size1 = ((first.size[0] + first.size[1]) / 2) - target_size
delta_size2 = ((second.size[0] + second.size[1]) / 2) - target_size
if (((delta_size1 < 0) and (delta_size2 >= 0)) or
(delta_size1 >= 0) and (delta_size2 < 0)):
return -1 if (delta_size1 < delta_size2) else 1
# if both below target size, prefer closest
if (delta_size1 < 0) and (delta_size2 < 0) and (delta_size1 != delta_size2):
return -1 if (delta_size1 < delta_size2) else 1
# prefer covers of most reliable source
qs1 = first.source_quality.value
qs2 = second.source_quality.value
if qs1 != qs2:
return -1 if (qs1 < qs2) else 1
# prefer best ranked
if ((first.rank is not None) and
(second.rank is not None) and
(first.__class__ is second.__class__) and
(first.rank != second.rank)):
return -1 if (first.rank > second.rank) else 1
# prefer reliable metadata
if first.reliable_metadata != second.reliable_metadata:
return 1 if first.reliable_metadata else -1
# prefer covers with less images to join
ic1 = len(first.urls)
ic2 = len(second.urls)
if ic1 != ic2:
return -1 if (ic1 > ic2) else 1
# prefer the preferred size
if abs(delta_size1) != abs(delta_size2):
return -1 if (abs(delta_size1) > abs(delta_size2)) else 1
# prefer png
if first.format != second.format:
return -1 if (second.format is CoverImageFormat.PNG) else 1
# prefer square covers #2
if (delta_ratio1 != delta_ratio2):
return -1 if (delta_ratio1 > delta_ratio2) else 1
# fuck, they are the same!
return 0 | python | def compare(first, second, *, target_size, size_tolerance_prct):
"""
Compare cover relevance/quality.
Return -1 if first is a worst match than second, 1 otherwise, or 0 if cover can't be discriminated.
This code is responsible for comparing two cover results to identify the best one, and is used to sort all results.
It is probably the most important piece of code of this tool.
Covers with sizes under the target size (+- configured tolerance) are excluded before comparison.
The following factors are used in order:
1. Prefer approximately square covers
2. Prefer covers similar to the reference cover
3. Prefer size above target size
4. If both below target size, prefer closest
5. Prefer covers of most reliable source
6. Prefer best ranked cover
7. Prefer covers with reliable metadata
If all previous factors do not allow sorting of two results (very unlikely):
8. Prefer covers with less images to join
9. Prefer covers having the target size
10. Prefer PNG covers
11. Prefer exactly square covers
We don't overload the __lt__ operator because we need to pass the target_size parameter.
"""
for c in (first, second):
assert(c.format is not None)
assert(isinstance(c.size[0], int) and isinstance(c.size[1], int))
# prefer square covers #1
delta_ratio1 = abs(first.size[0] / first.size[1] - 1)
delta_ratio2 = abs(second.size[0] / second.size[1] - 1)
if abs(delta_ratio1 - delta_ratio2) > 0.15:
return -1 if (delta_ratio1 > delta_ratio2) else 1
# prefer similar to reference
sr1 = first.is_similar_to_reference
sr2 = second.is_similar_to_reference
if sr1 and (not sr2):
return 1
if (not sr1) and sr2:
return -1
# prefer size above preferred
delta_size1 = ((first.size[0] + first.size[1]) / 2) - target_size
delta_size2 = ((second.size[0] + second.size[1]) / 2) - target_size
if (((delta_size1 < 0) and (delta_size2 >= 0)) or
(delta_size1 >= 0) and (delta_size2 < 0)):
return -1 if (delta_size1 < delta_size2) else 1
# if both below target size, prefer closest
if (delta_size1 < 0) and (delta_size2 < 0) and (delta_size1 != delta_size2):
return -1 if (delta_size1 < delta_size2) else 1
# prefer covers of most reliable source
qs1 = first.source_quality.value
qs2 = second.source_quality.value
if qs1 != qs2:
return -1 if (qs1 < qs2) else 1
# prefer best ranked
if ((first.rank is not None) and
(second.rank is not None) and
(first.__class__ is second.__class__) and
(first.rank != second.rank)):
return -1 if (first.rank > second.rank) else 1
# prefer reliable metadata
if first.reliable_metadata != second.reliable_metadata:
return 1 if first.reliable_metadata else -1
# prefer covers with less images to join
ic1 = len(first.urls)
ic2 = len(second.urls)
if ic1 != ic2:
return -1 if (ic1 > ic2) else 1
# prefer the preferred size
if abs(delta_size1) != abs(delta_size2):
return -1 if (abs(delta_size1) > abs(delta_size2)) else 1
# prefer png
if first.format != second.format:
return -1 if (second.format is CoverImageFormat.PNG) else 1
# prefer square covers #2
if (delta_ratio1 != delta_ratio2):
return -1 if (delta_ratio1 > delta_ratio2) else 1
# fuck, they are the same!
return 0 | [
"def",
"compare",
"(",
"first",
",",
"second",
",",
"*",
",",
"target_size",
",",
"size_tolerance_prct",
")",
":",
"for",
"c",
"in",
"(",
"first",
",",
"second",
")",
":",
"assert",
"(",
"c",
".",
"format",
"is",
"not",
"None",
")",
"assert",
"(",
"isinstance",
"(",
"c",
".",
"size",
"[",
"0",
"]",
",",
"int",
")",
"and",
"isinstance",
"(",
"c",
".",
"size",
"[",
"1",
"]",
",",
"int",
")",
")",
"# prefer square covers #1",
"delta_ratio1",
"=",
"abs",
"(",
"first",
".",
"size",
"[",
"0",
"]",
"/",
"first",
".",
"size",
"[",
"1",
"]",
"-",
"1",
")",
"delta_ratio2",
"=",
"abs",
"(",
"second",
".",
"size",
"[",
"0",
"]",
"/",
"second",
".",
"size",
"[",
"1",
"]",
"-",
"1",
")",
"if",
"abs",
"(",
"delta_ratio1",
"-",
"delta_ratio2",
")",
">",
"0.15",
":",
"return",
"-",
"1",
"if",
"(",
"delta_ratio1",
">",
"delta_ratio2",
")",
"else",
"1",
"# prefer similar to reference",
"sr1",
"=",
"first",
".",
"is_similar_to_reference",
"sr2",
"=",
"second",
".",
"is_similar_to_reference",
"if",
"sr1",
"and",
"(",
"not",
"sr2",
")",
":",
"return",
"1",
"if",
"(",
"not",
"sr1",
")",
"and",
"sr2",
":",
"return",
"-",
"1",
"# prefer size above preferred",
"delta_size1",
"=",
"(",
"(",
"first",
".",
"size",
"[",
"0",
"]",
"+",
"first",
".",
"size",
"[",
"1",
"]",
")",
"/",
"2",
")",
"-",
"target_size",
"delta_size2",
"=",
"(",
"(",
"second",
".",
"size",
"[",
"0",
"]",
"+",
"second",
".",
"size",
"[",
"1",
"]",
")",
"/",
"2",
")",
"-",
"target_size",
"if",
"(",
"(",
"(",
"delta_size1",
"<",
"0",
")",
"and",
"(",
"delta_size2",
">=",
"0",
")",
")",
"or",
"(",
"delta_size1",
">=",
"0",
")",
"and",
"(",
"delta_size2",
"<",
"0",
")",
")",
":",
"return",
"-",
"1",
"if",
"(",
"delta_size1",
"<",
"delta_size2",
")",
"else",
"1",
"# if both below target size, prefer closest",
"if",
"(",
"delta_size1",
"<",
"0",
")",
"and",
"(",
"delta_size2",
"<",
"0",
")",
"and",
"(",
"delta_size1",
"!=",
"delta_size2",
")",
":",
"return",
"-",
"1",
"if",
"(",
"delta_size1",
"<",
"delta_size2",
")",
"else",
"1",
"# prefer covers of most reliable source",
"qs1",
"=",
"first",
".",
"source_quality",
".",
"value",
"qs2",
"=",
"second",
".",
"source_quality",
".",
"value",
"if",
"qs1",
"!=",
"qs2",
":",
"return",
"-",
"1",
"if",
"(",
"qs1",
"<",
"qs2",
")",
"else",
"1",
"# prefer best ranked",
"if",
"(",
"(",
"first",
".",
"rank",
"is",
"not",
"None",
")",
"and",
"(",
"second",
".",
"rank",
"is",
"not",
"None",
")",
"and",
"(",
"first",
".",
"__class__",
"is",
"second",
".",
"__class__",
")",
"and",
"(",
"first",
".",
"rank",
"!=",
"second",
".",
"rank",
")",
")",
":",
"return",
"-",
"1",
"if",
"(",
"first",
".",
"rank",
">",
"second",
".",
"rank",
")",
"else",
"1",
"# prefer reliable metadata",
"if",
"first",
".",
"reliable_metadata",
"!=",
"second",
".",
"reliable_metadata",
":",
"return",
"1",
"if",
"first",
".",
"reliable_metadata",
"else",
"-",
"1",
"# prefer covers with less images to join",
"ic1",
"=",
"len",
"(",
"first",
".",
"urls",
")",
"ic2",
"=",
"len",
"(",
"second",
".",
"urls",
")",
"if",
"ic1",
"!=",
"ic2",
":",
"return",
"-",
"1",
"if",
"(",
"ic1",
">",
"ic2",
")",
"else",
"1",
"# prefer the preferred size",
"if",
"abs",
"(",
"delta_size1",
")",
"!=",
"abs",
"(",
"delta_size2",
")",
":",
"return",
"-",
"1",
"if",
"(",
"abs",
"(",
"delta_size1",
")",
">",
"abs",
"(",
"delta_size2",
")",
")",
"else",
"1",
"# prefer png",
"if",
"first",
".",
"format",
"!=",
"second",
".",
"format",
":",
"return",
"-",
"1",
"if",
"(",
"second",
".",
"format",
"is",
"CoverImageFormat",
".",
"PNG",
")",
"else",
"1",
"# prefer square covers #2",
"if",
"(",
"delta_ratio1",
"!=",
"delta_ratio2",
")",
":",
"return",
"-",
"1",
"if",
"(",
"delta_ratio1",
">",
"delta_ratio2",
")",
"else",
"1",
"# fuck, they are the same!",
"return",
"0"
] | Compare cover relevance/quality.
Return -1 if first is a worst match than second, 1 otherwise, or 0 if cover can't be discriminated.
This code is responsible for comparing two cover results to identify the best one, and is used to sort all results.
It is probably the most important piece of code of this tool.
Covers with sizes under the target size (+- configured tolerance) are excluded before comparison.
The following factors are used in order:
1. Prefer approximately square covers
2. Prefer covers similar to the reference cover
3. Prefer size above target size
4. If both below target size, prefer closest
5. Prefer covers of most reliable source
6. Prefer best ranked cover
7. Prefer covers with reliable metadata
If all previous factors do not allow sorting of two results (very unlikely):
8. Prefer covers with less images to join
9. Prefer covers having the target size
10. Prefer PNG covers
11. Prefer exactly square covers
We don't overload the __lt__ operator because we need to pass the target_size parameter. | [
"Compare",
"cover",
"relevance",
"/",
"quality",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L366-L457 |
desbma/sacad | sacad/cover.py | CoverSourceResult.crunch | async def crunch(image_data, format, silent=False):
""" Crunch image data, and return the processed data, or orignal data if operation failed. """
if (((format is CoverImageFormat.PNG) and (not HAS_OPTIPNG)) or
((format is CoverImageFormat.JPEG) and (not HAS_JPEGOPTIM))):
return image_data
with mkstemp_ctx.mkstemp(suffix=".%s" % (format.name.lower())) as tmp_out_filepath:
if not silent:
logging.getLogger("Cover").info("Crunching %s image..." % (format.name.upper()))
with open(tmp_out_filepath, "wb") as tmp_out_file:
tmp_out_file.write(image_data)
size_before = len(image_data)
if format is CoverImageFormat.PNG:
cmd = ["optipng", "-quiet", "-o1"]
elif format is CoverImageFormat.JPEG:
cmd = ["jpegoptim", "-q", "--strip-all"]
cmd.append(tmp_out_filepath)
p = await asyncio.create_subprocess_exec(*cmd,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL)
await p.wait()
if p.returncode != 0:
if not silent:
logging.getLogger("Cover").warning("Crunching image failed")
return image_data
with open(tmp_out_filepath, "rb") as tmp_out_file:
crunched_image_data = tmp_out_file.read()
size_after = len(crunched_image_data)
pct_saved = 100 * (size_before - size_after) / size_before
if not silent:
logging.getLogger("Cover").debug("Crunching image saved %.2f%% filesize" % (pct_saved))
return crunched_image_data | python | async def crunch(image_data, format, silent=False):
""" Crunch image data, and return the processed data, or orignal data if operation failed. """
if (((format is CoverImageFormat.PNG) and (not HAS_OPTIPNG)) or
((format is CoverImageFormat.JPEG) and (not HAS_JPEGOPTIM))):
return image_data
with mkstemp_ctx.mkstemp(suffix=".%s" % (format.name.lower())) as tmp_out_filepath:
if not silent:
logging.getLogger("Cover").info("Crunching %s image..." % (format.name.upper()))
with open(tmp_out_filepath, "wb") as tmp_out_file:
tmp_out_file.write(image_data)
size_before = len(image_data)
if format is CoverImageFormat.PNG:
cmd = ["optipng", "-quiet", "-o1"]
elif format is CoverImageFormat.JPEG:
cmd = ["jpegoptim", "-q", "--strip-all"]
cmd.append(tmp_out_filepath)
p = await asyncio.create_subprocess_exec(*cmd,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL)
await p.wait()
if p.returncode != 0:
if not silent:
logging.getLogger("Cover").warning("Crunching image failed")
return image_data
with open(tmp_out_filepath, "rb") as tmp_out_file:
crunched_image_data = tmp_out_file.read()
size_after = len(crunched_image_data)
pct_saved = 100 * (size_before - size_after) / size_before
if not silent:
logging.getLogger("Cover").debug("Crunching image saved %.2f%% filesize" % (pct_saved))
return crunched_image_data | [
"async",
"def",
"crunch",
"(",
"image_data",
",",
"format",
",",
"silent",
"=",
"False",
")",
":",
"if",
"(",
"(",
"(",
"format",
"is",
"CoverImageFormat",
".",
"PNG",
")",
"and",
"(",
"not",
"HAS_OPTIPNG",
")",
")",
"or",
"(",
"(",
"format",
"is",
"CoverImageFormat",
".",
"JPEG",
")",
"and",
"(",
"not",
"HAS_JPEGOPTIM",
")",
")",
")",
":",
"return",
"image_data",
"with",
"mkstemp_ctx",
".",
"mkstemp",
"(",
"suffix",
"=",
"\".%s\"",
"%",
"(",
"format",
".",
"name",
".",
"lower",
"(",
")",
")",
")",
"as",
"tmp_out_filepath",
":",
"if",
"not",
"silent",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Crunching %s image...\"",
"%",
"(",
"format",
".",
"name",
".",
"upper",
"(",
")",
")",
")",
"with",
"open",
"(",
"tmp_out_filepath",
",",
"\"wb\"",
")",
"as",
"tmp_out_file",
":",
"tmp_out_file",
".",
"write",
"(",
"image_data",
")",
"size_before",
"=",
"len",
"(",
"image_data",
")",
"if",
"format",
"is",
"CoverImageFormat",
".",
"PNG",
":",
"cmd",
"=",
"[",
"\"optipng\"",
",",
"\"-quiet\"",
",",
"\"-o1\"",
"]",
"elif",
"format",
"is",
"CoverImageFormat",
".",
"JPEG",
":",
"cmd",
"=",
"[",
"\"jpegoptim\"",
",",
"\"-q\"",
",",
"\"--strip-all\"",
"]",
"cmd",
".",
"append",
"(",
"tmp_out_filepath",
")",
"p",
"=",
"await",
"asyncio",
".",
"create_subprocess_exec",
"(",
"*",
"cmd",
",",
"stdin",
"=",
"asyncio",
".",
"subprocess",
".",
"DEVNULL",
",",
"stdout",
"=",
"asyncio",
".",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"asyncio",
".",
"subprocess",
".",
"DEVNULL",
")",
"await",
"p",
".",
"wait",
"(",
")",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"if",
"not",
"silent",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"Crunching image failed\"",
")",
"return",
"image_data",
"with",
"open",
"(",
"tmp_out_filepath",
",",
"\"rb\"",
")",
"as",
"tmp_out_file",
":",
"crunched_image_data",
"=",
"tmp_out_file",
".",
"read",
"(",
")",
"size_after",
"=",
"len",
"(",
"crunched_image_data",
")",
"pct_saved",
"=",
"100",
"*",
"(",
"size_before",
"-",
"size_after",
")",
"/",
"size_before",
"if",
"not",
"silent",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Crunching image saved %.2f%% filesize\"",
"%",
"(",
"pct_saved",
")",
")",
"return",
"crunched_image_data"
] | Crunch image data, and return the processed data, or orignal data if operation failed. | [
"Crunch",
"image",
"data",
"and",
"return",
"the",
"processed",
"data",
"or",
"orignal",
"data",
"if",
"operation",
"failed",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L460-L491 |
desbma/sacad | sacad/cover.py | CoverSourceResult.guessImageMetadataFromData | def guessImageMetadataFromData(img_data):
""" Identify an image format and size from its first bytes. """
format, width, height = None, None, None
img_stream = io.BytesIO(img_data)
try:
img = PIL.Image.open(img_stream)
except IOError:
format = imghdr.what(None, h=img_data)
format = SUPPORTED_IMG_FORMATS.get(format, None)
else:
format = img.format.lower()
format = SUPPORTED_IMG_FORMATS.get(format, None)
width, height = img.size
return format, width, height | python | def guessImageMetadataFromData(img_data):
""" Identify an image format and size from its first bytes. """
format, width, height = None, None, None
img_stream = io.BytesIO(img_data)
try:
img = PIL.Image.open(img_stream)
except IOError:
format = imghdr.what(None, h=img_data)
format = SUPPORTED_IMG_FORMATS.get(format, None)
else:
format = img.format.lower()
format = SUPPORTED_IMG_FORMATS.get(format, None)
width, height = img.size
return format, width, height | [
"def",
"guessImageMetadataFromData",
"(",
"img_data",
")",
":",
"format",
",",
"width",
",",
"height",
"=",
"None",
",",
"None",
",",
"None",
"img_stream",
"=",
"io",
".",
"BytesIO",
"(",
"img_data",
")",
"try",
":",
"img",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"img_stream",
")",
"except",
"IOError",
":",
"format",
"=",
"imghdr",
".",
"what",
"(",
"None",
",",
"h",
"=",
"img_data",
")",
"format",
"=",
"SUPPORTED_IMG_FORMATS",
".",
"get",
"(",
"format",
",",
"None",
")",
"else",
":",
"format",
"=",
"img",
".",
"format",
".",
"lower",
"(",
")",
"format",
"=",
"SUPPORTED_IMG_FORMATS",
".",
"get",
"(",
"format",
",",
"None",
")",
"width",
",",
"height",
"=",
"img",
".",
"size",
"return",
"format",
",",
"width",
",",
"height"
] | Identify an image format and size from its first bytes. | [
"Identify",
"an",
"image",
"format",
"and",
"size",
"from",
"its",
"first",
"bytes",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L494-L507 |
desbma/sacad | sacad/cover.py | CoverSourceResult.guessImageMetadataFromHttpData | async def guessImageMetadataFromHttpData(response):
""" Identify an image format and size from the beginning of its HTTP data. """
metadata = None
img_data = bytearray()
while len(img_data) < CoverSourceResult.MAX_FILE_METADATA_PEEK_SIZE:
new_img_data = await response.content.read(__class__.METADATA_PEEK_SIZE_INCREMENT)
if not new_img_data:
break
img_data.extend(new_img_data)
metadata = __class__.guessImageMetadataFromData(img_data)
if (metadata is not None) and all(metadata):
return metadata
return metadata | python | async def guessImageMetadataFromHttpData(response):
""" Identify an image format and size from the beginning of its HTTP data. """
metadata = None
img_data = bytearray()
while len(img_data) < CoverSourceResult.MAX_FILE_METADATA_PEEK_SIZE:
new_img_data = await response.content.read(__class__.METADATA_PEEK_SIZE_INCREMENT)
if not new_img_data:
break
img_data.extend(new_img_data)
metadata = __class__.guessImageMetadataFromData(img_data)
if (metadata is not None) and all(metadata):
return metadata
return metadata | [
"async",
"def",
"guessImageMetadataFromHttpData",
"(",
"response",
")",
":",
"metadata",
"=",
"None",
"img_data",
"=",
"bytearray",
"(",
")",
"while",
"len",
"(",
"img_data",
")",
"<",
"CoverSourceResult",
".",
"MAX_FILE_METADATA_PEEK_SIZE",
":",
"new_img_data",
"=",
"await",
"response",
".",
"content",
".",
"read",
"(",
"__class__",
".",
"METADATA_PEEK_SIZE_INCREMENT",
")",
"if",
"not",
"new_img_data",
":",
"break",
"img_data",
".",
"extend",
"(",
"new_img_data",
")",
"metadata",
"=",
"__class__",
".",
"guessImageMetadataFromData",
"(",
"img_data",
")",
"if",
"(",
"metadata",
"is",
"not",
"None",
")",
"and",
"all",
"(",
"metadata",
")",
":",
"return",
"metadata",
"return",
"metadata"
] | Identify an image format and size from the beginning of its HTTP data. | [
"Identify",
"an",
"image",
"format",
"and",
"size",
"from",
"the",
"beginning",
"of",
"its",
"HTTP",
"data",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L510-L525 |
desbma/sacad | sacad/cover.py | CoverSourceResult.guessImageFormatFromHttpResponse | def guessImageFormatFromHttpResponse(response):
""" Guess file format from HTTP response, return format or None. """
extensions = []
# try to guess extension from response content-type header
try:
content_type = response.headers["Content-Type"]
except KeyError:
pass
else:
ext = mimetypes.guess_extension(content_type, strict=False)
if ext is not None:
extensions.append(ext)
# try to extract extension from URL
urls = list(response.history) + [response.url]
for url in map(str, urls):
ext = os.path.splitext(urllib.parse.urlsplit(url).path)[-1]
if (ext is not None) and (ext not in extensions):
extensions.append(ext)
# now guess from the extensions
for ext in extensions:
try:
return SUPPORTED_IMG_FORMATS[ext[1:]]
except KeyError:
pass | python | def guessImageFormatFromHttpResponse(response):
""" Guess file format from HTTP response, return format or None. """
extensions = []
# try to guess extension from response content-type header
try:
content_type = response.headers["Content-Type"]
except KeyError:
pass
else:
ext = mimetypes.guess_extension(content_type, strict=False)
if ext is not None:
extensions.append(ext)
# try to extract extension from URL
urls = list(response.history) + [response.url]
for url in map(str, urls):
ext = os.path.splitext(urllib.parse.urlsplit(url).path)[-1]
if (ext is not None) and (ext not in extensions):
extensions.append(ext)
# now guess from the extensions
for ext in extensions:
try:
return SUPPORTED_IMG_FORMATS[ext[1:]]
except KeyError:
pass | [
"def",
"guessImageFormatFromHttpResponse",
"(",
"response",
")",
":",
"extensions",
"=",
"[",
"]",
"# try to guess extension from response content-type header",
"try",
":",
"content_type",
"=",
"response",
".",
"headers",
"[",
"\"Content-Type\"",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"ext",
"=",
"mimetypes",
".",
"guess_extension",
"(",
"content_type",
",",
"strict",
"=",
"False",
")",
"if",
"ext",
"is",
"not",
"None",
":",
"extensions",
".",
"append",
"(",
"ext",
")",
"# try to extract extension from URL",
"urls",
"=",
"list",
"(",
"response",
".",
"history",
")",
"+",
"[",
"response",
".",
"url",
"]",
"for",
"url",
"in",
"map",
"(",
"str",
",",
"urls",
")",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"url",
")",
".",
"path",
")",
"[",
"-",
"1",
"]",
"if",
"(",
"ext",
"is",
"not",
"None",
")",
"and",
"(",
"ext",
"not",
"in",
"extensions",
")",
":",
"extensions",
".",
"append",
"(",
"ext",
")",
"# now guess from the extensions",
"for",
"ext",
"in",
"extensions",
":",
"try",
":",
"return",
"SUPPORTED_IMG_FORMATS",
"[",
"ext",
"[",
"1",
":",
"]",
"]",
"except",
"KeyError",
":",
"pass"
] | Guess file format from HTTP response, return format or None. | [
"Guess",
"file",
"format",
"from",
"HTTP",
"response",
"return",
"format",
"or",
"None",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L528-L554 |
desbma/sacad | sacad/cover.py | CoverSourceResult.preProcessForComparison | async def preProcessForComparison(results, target_size, size_tolerance_prct):
""" Process results to prepare them for future comparison and sorting. """
# find reference (=image most likely to match target cover ignoring factors like size and format)
reference = None
for result in results:
if result.source_quality is CoverSourceQuality.REFERENCE:
if ((reference is None) or
(CoverSourceResult.compare(result,
reference,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) > 0)):
reference = result
# remove results that are only refs
results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results))
# remove duplicates
no_dup_results = []
for result in results:
is_dup = False
for result_comp in results:
if ((result_comp is not result) and
(result_comp.urls == result.urls) and
(__class__.compare(result,
result_comp,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) < 0)):
is_dup = True
break
if not is_dup:
no_dup_results.append(result)
dup_count = len(results) - len(no_dup_results)
if dup_count > 0:
logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count))
results = no_dup_results
if reference is not None:
logging.getLogger("Cover").info("Reference is: %s" % (reference))
reference.is_similar_to_reference = True
# calculate sigs
futures = []
for result in results:
coroutine = result.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if reference.is_only_reference:
assert(reference not in results)
coroutine = reference.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if futures:
await asyncio.wait(futures)
for future in futures:
future.result() # raise pending exception if any
# compare other results to reference
for result in results:
if ((result is not reference) and
(result.thumbnail_sig is not None) and
(reference.thumbnail_sig is not None)):
result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig,
reference.thumbnail_sig)
if result.is_similar_to_reference:
logging.getLogger("Cover").debug("%s is similar to reference" % (result))
else:
logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result))
else:
logging.getLogger("Cover").warning("No reference result found")
return results | python | async def preProcessForComparison(results, target_size, size_tolerance_prct):
""" Process results to prepare them for future comparison and sorting. """
# find reference (=image most likely to match target cover ignoring factors like size and format)
reference = None
for result in results:
if result.source_quality is CoverSourceQuality.REFERENCE:
if ((reference is None) or
(CoverSourceResult.compare(result,
reference,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) > 0)):
reference = result
# remove results that are only refs
results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results))
# remove duplicates
no_dup_results = []
for result in results:
is_dup = False
for result_comp in results:
if ((result_comp is not result) and
(result_comp.urls == result.urls) and
(__class__.compare(result,
result_comp,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) < 0)):
is_dup = True
break
if not is_dup:
no_dup_results.append(result)
dup_count = len(results) - len(no_dup_results)
if dup_count > 0:
logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count))
results = no_dup_results
if reference is not None:
logging.getLogger("Cover").info("Reference is: %s" % (reference))
reference.is_similar_to_reference = True
# calculate sigs
futures = []
for result in results:
coroutine = result.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if reference.is_only_reference:
assert(reference not in results)
coroutine = reference.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if futures:
await asyncio.wait(futures)
for future in futures:
future.result() # raise pending exception if any
# compare other results to reference
for result in results:
if ((result is not reference) and
(result.thumbnail_sig is not None) and
(reference.thumbnail_sig is not None)):
result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig,
reference.thumbnail_sig)
if result.is_similar_to_reference:
logging.getLogger("Cover").debug("%s is similar to reference" % (result))
else:
logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result))
else:
logging.getLogger("Cover").warning("No reference result found")
return results | [
"async",
"def",
"preProcessForComparison",
"(",
"results",
",",
"target_size",
",",
"size_tolerance_prct",
")",
":",
"# find reference (=image most likely to match target cover ignoring factors like size and format)",
"reference",
"=",
"None",
"for",
"result",
"in",
"results",
":",
"if",
"result",
".",
"source_quality",
"is",
"CoverSourceQuality",
".",
"REFERENCE",
":",
"if",
"(",
"(",
"reference",
"is",
"None",
")",
"or",
"(",
"CoverSourceResult",
".",
"compare",
"(",
"result",
",",
"reference",
",",
"target_size",
"=",
"target_size",
",",
"size_tolerance_prct",
"=",
"size_tolerance_prct",
")",
">",
"0",
")",
")",
":",
"reference",
"=",
"result",
"# remove results that are only refs",
"results",
"=",
"list",
"(",
"itertools",
".",
"filterfalse",
"(",
"operator",
".",
"attrgetter",
"(",
"\"is_only_reference\"",
")",
",",
"results",
")",
")",
"# remove duplicates",
"no_dup_results",
"=",
"[",
"]",
"for",
"result",
"in",
"results",
":",
"is_dup",
"=",
"False",
"for",
"result_comp",
"in",
"results",
":",
"if",
"(",
"(",
"result_comp",
"is",
"not",
"result",
")",
"and",
"(",
"result_comp",
".",
"urls",
"==",
"result",
".",
"urls",
")",
"and",
"(",
"__class__",
".",
"compare",
"(",
"result",
",",
"result_comp",
",",
"target_size",
"=",
"target_size",
",",
"size_tolerance_prct",
"=",
"size_tolerance_prct",
")",
"<",
"0",
")",
")",
":",
"is_dup",
"=",
"True",
"break",
"if",
"not",
"is_dup",
":",
"no_dup_results",
".",
"append",
"(",
"result",
")",
"dup_count",
"=",
"len",
"(",
"results",
")",
"-",
"len",
"(",
"no_dup_results",
")",
"if",
"dup_count",
">",
"0",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Removed %u duplicate results\"",
"%",
"(",
"dup_count",
")",
")",
"results",
"=",
"no_dup_results",
"if",
"reference",
"is",
"not",
"None",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"info",
"(",
"\"Reference is: %s\"",
"%",
"(",
"reference",
")",
")",
"reference",
".",
"is_similar_to_reference",
"=",
"True",
"# calculate sigs",
"futures",
"=",
"[",
"]",
"for",
"result",
"in",
"results",
":",
"coroutine",
"=",
"result",
".",
"updateSignature",
"(",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"coroutine",
")",
"futures",
".",
"append",
"(",
"future",
")",
"if",
"reference",
".",
"is_only_reference",
":",
"assert",
"(",
"reference",
"not",
"in",
"results",
")",
"coroutine",
"=",
"reference",
".",
"updateSignature",
"(",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"coroutine",
")",
"futures",
".",
"append",
"(",
"future",
")",
"if",
"futures",
":",
"await",
"asyncio",
".",
"wait",
"(",
"futures",
")",
"for",
"future",
"in",
"futures",
":",
"future",
".",
"result",
"(",
")",
"# raise pending exception if any",
"# compare other results to reference",
"for",
"result",
"in",
"results",
":",
"if",
"(",
"(",
"result",
"is",
"not",
"reference",
")",
"and",
"(",
"result",
".",
"thumbnail_sig",
"is",
"not",
"None",
")",
"and",
"(",
"reference",
".",
"thumbnail_sig",
"is",
"not",
"None",
")",
")",
":",
"result",
".",
"is_similar_to_reference",
"=",
"__class__",
".",
"areImageSigsSimilar",
"(",
"result",
".",
"thumbnail_sig",
",",
"reference",
".",
"thumbnail_sig",
")",
"if",
"result",
".",
"is_similar_to_reference",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"%s is similar to reference\"",
"%",
"(",
"result",
")",
")",
"else",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"%s is NOT similar to reference\"",
"%",
"(",
"result",
")",
")",
"else",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"warning",
"(",
"\"No reference result found\"",
")",
"return",
"results"
] | Process results to prepare them for future comparison and sorting. | [
"Process",
"results",
"to",
"prepare",
"them",
"for",
"future",
"comparison",
"and",
"sorting",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L557-L627 |
desbma/sacad | sacad/cover.py | CoverSourceResult.computeImgSignature | def computeImgSignature(image_data):
"""
Calculate an image signature.
This is similar to ahash but uses 3 colors components
See: https://github.com/JohannesBuchner/imagehash/blob/4.0/imagehash/__init__.py#L125
"""
parser = PIL.ImageFile.Parser()
parser.feed(image_data)
img = parser.close()
target_size = (__class__.IMG_SIG_SIZE, __class__.IMG_SIG_SIZE)
img.thumbnail(target_size, PIL.Image.BICUBIC)
if img.size != target_size:
logging.getLogger("Cover").debug("Non square thumbnail after resize to %ux%u, unable to compute signature" % target_size)
return None
img = img.convert(mode="RGB")
pixels = img.getdata()
pixel_count = target_size[0] * target_size[1]
color_count = 3
r = bitarray.bitarray(pixel_count * color_count)
r.setall(False)
for ic in range(color_count):
mean = sum(p[ic] for p in pixels) // pixel_count
for ip, p in enumerate(pixels):
if p[ic] > mean:
r[pixel_count * ic + ip] = True
return r | python | def computeImgSignature(image_data):
"""
Calculate an image signature.
This is similar to ahash but uses 3 colors components
See: https://github.com/JohannesBuchner/imagehash/blob/4.0/imagehash/__init__.py#L125
"""
parser = PIL.ImageFile.Parser()
parser.feed(image_data)
img = parser.close()
target_size = (__class__.IMG_SIG_SIZE, __class__.IMG_SIG_SIZE)
img.thumbnail(target_size, PIL.Image.BICUBIC)
if img.size != target_size:
logging.getLogger("Cover").debug("Non square thumbnail after resize to %ux%u, unable to compute signature" % target_size)
return None
img = img.convert(mode="RGB")
pixels = img.getdata()
pixel_count = target_size[0] * target_size[1]
color_count = 3
r = bitarray.bitarray(pixel_count * color_count)
r.setall(False)
for ic in range(color_count):
mean = sum(p[ic] for p in pixels) // pixel_count
for ip, p in enumerate(pixels):
if p[ic] > mean:
r[pixel_count * ic + ip] = True
return r | [
"def",
"computeImgSignature",
"(",
"image_data",
")",
":",
"parser",
"=",
"PIL",
".",
"ImageFile",
".",
"Parser",
"(",
")",
"parser",
".",
"feed",
"(",
"image_data",
")",
"img",
"=",
"parser",
".",
"close",
"(",
")",
"target_size",
"=",
"(",
"__class__",
".",
"IMG_SIG_SIZE",
",",
"__class__",
".",
"IMG_SIG_SIZE",
")",
"img",
".",
"thumbnail",
"(",
"target_size",
",",
"PIL",
".",
"Image",
".",
"BICUBIC",
")",
"if",
"img",
".",
"size",
"!=",
"target_size",
":",
"logging",
".",
"getLogger",
"(",
"\"Cover\"",
")",
".",
"debug",
"(",
"\"Non square thumbnail after resize to %ux%u, unable to compute signature\"",
"%",
"target_size",
")",
"return",
"None",
"img",
"=",
"img",
".",
"convert",
"(",
"mode",
"=",
"\"RGB\"",
")",
"pixels",
"=",
"img",
".",
"getdata",
"(",
")",
"pixel_count",
"=",
"target_size",
"[",
"0",
"]",
"*",
"target_size",
"[",
"1",
"]",
"color_count",
"=",
"3",
"r",
"=",
"bitarray",
".",
"bitarray",
"(",
"pixel_count",
"*",
"color_count",
")",
"r",
".",
"setall",
"(",
"False",
")",
"for",
"ic",
"in",
"range",
"(",
"color_count",
")",
":",
"mean",
"=",
"sum",
"(",
"p",
"[",
"ic",
"]",
"for",
"p",
"in",
"pixels",
")",
"//",
"pixel_count",
"for",
"ip",
",",
"p",
"in",
"enumerate",
"(",
"pixels",
")",
":",
"if",
"p",
"[",
"ic",
"]",
">",
"mean",
":",
"r",
"[",
"pixel_count",
"*",
"ic",
"+",
"ip",
"]",
"=",
"True",
"return",
"r"
] | Calculate an image signature.
This is similar to ahash but uses 3 colors components
See: https://github.com/JohannesBuchner/imagehash/blob/4.0/imagehash/__init__.py#L125 | [
"Calculate",
"an",
"image",
"signature",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L630-L657 |
desbma/sacad | sacad/recurse.py | analyze_lib | def analyze_lib(lib_dir, cover_filename, *, ignore_existing=False):
""" Recursively analyze library, and return a dict of path -> (artist, album). """
work = {}
stats = collections.OrderedDict(((k, 0) for k in("files", "albums", "missing covers", "errors")))
with tqdm.tqdm(desc="Analyzing library",
unit="dir",
postfix=stats) as progress, \
tqdm_logging.redirect_logging(progress):
for rootpath, rel_dirpaths, rel_filepaths in os.walk(lib_dir):
metadata = analyze_dir(stats,
rootpath,
rel_filepaths,
cover_filename,
ignore_existing=ignore_existing)
progress.set_postfix(stats, refresh=False)
progress.update(1)
if all(metadata[:-1]):
work[rootpath] = metadata[:-1]
return work | python | def analyze_lib(lib_dir, cover_filename, *, ignore_existing=False):
""" Recursively analyze library, and return a dict of path -> (artist, album). """
work = {}
stats = collections.OrderedDict(((k, 0) for k in("files", "albums", "missing covers", "errors")))
with tqdm.tqdm(desc="Analyzing library",
unit="dir",
postfix=stats) as progress, \
tqdm_logging.redirect_logging(progress):
for rootpath, rel_dirpaths, rel_filepaths in os.walk(lib_dir):
metadata = analyze_dir(stats,
rootpath,
rel_filepaths,
cover_filename,
ignore_existing=ignore_existing)
progress.set_postfix(stats, refresh=False)
progress.update(1)
if all(metadata[:-1]):
work[rootpath] = metadata[:-1]
return work | [
"def",
"analyze_lib",
"(",
"lib_dir",
",",
"cover_filename",
",",
"*",
",",
"ignore_existing",
"=",
"False",
")",
":",
"work",
"=",
"{",
"}",
"stats",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"(",
"k",
",",
"0",
")",
"for",
"k",
"in",
"(",
"\"files\"",
",",
"\"albums\"",
",",
"\"missing covers\"",
",",
"\"errors\"",
")",
")",
")",
"with",
"tqdm",
".",
"tqdm",
"(",
"desc",
"=",
"\"Analyzing library\"",
",",
"unit",
"=",
"\"dir\"",
",",
"postfix",
"=",
"stats",
")",
"as",
"progress",
",",
"tqdm_logging",
".",
"redirect_logging",
"(",
"progress",
")",
":",
"for",
"rootpath",
",",
"rel_dirpaths",
",",
"rel_filepaths",
"in",
"os",
".",
"walk",
"(",
"lib_dir",
")",
":",
"metadata",
"=",
"analyze_dir",
"(",
"stats",
",",
"rootpath",
",",
"rel_filepaths",
",",
"cover_filename",
",",
"ignore_existing",
"=",
"ignore_existing",
")",
"progress",
".",
"set_postfix",
"(",
"stats",
",",
"refresh",
"=",
"False",
")",
"progress",
".",
"update",
"(",
"1",
")",
"if",
"all",
"(",
"metadata",
"[",
":",
"-",
"1",
"]",
")",
":",
"work",
"[",
"rootpath",
"]",
"=",
"metadata",
"[",
":",
"-",
"1",
"]",
"return",
"work"
] | Recursively analyze library, and return a dict of path -> (artist, album). | [
"Recursively",
"analyze",
"library",
"and",
"return",
"a",
"dict",
"of",
"path",
"-",
">",
"(",
"artist",
"album",
")",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L38-L56 |
desbma/sacad | sacad/recurse.py | get_metadata | def get_metadata(audio_filepaths):
""" Return a tuple of album, artist, has_embedded_album_art from a list of audio files. """
artist, album, has_embedded_album_art = None, None, None
for audio_filepath in audio_filepaths:
try:
mf = mutagen.File(audio_filepath)
except Exception:
continue
if mf is None:
continue
# artist
for key in ("albumartist", "artist", # ogg
"TPE1", "TPE2", # mp3
"aART", "\xa9ART"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
artist = val[-1]
break
# album
for key in ("_album", "album", # ogg
"TALB", # mp3
"\xa9alb"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
album = val[-1]
break
if artist and album:
# album art
if isinstance(mf, mutagen.ogg.OggFileType):
has_embedded_album_art = "metadata_block_picture" in mf
elif isinstance(mf, mutagen.mp3.MP3):
has_embedded_album_art = any(map(operator.methodcaller("startswith", "APIC:"), mf.keys()))
elif isinstance(mf, mutagen.mp4.MP4):
has_embedded_album_art = "covr" in mf
# stop at the first file that succeeds (for performance)
break
return artist, album, has_embedded_album_art | python | def get_metadata(audio_filepaths):
""" Return a tuple of album, artist, has_embedded_album_art from a list of audio files. """
artist, album, has_embedded_album_art = None, None, None
for audio_filepath in audio_filepaths:
try:
mf = mutagen.File(audio_filepath)
except Exception:
continue
if mf is None:
continue
# artist
for key in ("albumartist", "artist", # ogg
"TPE1", "TPE2", # mp3
"aART", "\xa9ART"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
artist = val[-1]
break
# album
for key in ("_album", "album", # ogg
"TALB", # mp3
"\xa9alb"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
album = val[-1]
break
if artist and album:
# album art
if isinstance(mf, mutagen.ogg.OggFileType):
has_embedded_album_art = "metadata_block_picture" in mf
elif isinstance(mf, mutagen.mp3.MP3):
has_embedded_album_art = any(map(operator.methodcaller("startswith", "APIC:"), mf.keys()))
elif isinstance(mf, mutagen.mp4.MP4):
has_embedded_album_art = "covr" in mf
# stop at the first file that succeeds (for performance)
break
return artist, album, has_embedded_album_art | [
"def",
"get_metadata",
"(",
"audio_filepaths",
")",
":",
"artist",
",",
"album",
",",
"has_embedded_album_art",
"=",
"None",
",",
"None",
",",
"None",
"for",
"audio_filepath",
"in",
"audio_filepaths",
":",
"try",
":",
"mf",
"=",
"mutagen",
".",
"File",
"(",
"audio_filepath",
")",
"except",
"Exception",
":",
"continue",
"if",
"mf",
"is",
"None",
":",
"continue",
"# artist",
"for",
"key",
"in",
"(",
"\"albumartist\"",
",",
"\"artist\"",
",",
"# ogg",
"\"TPE1\"",
",",
"\"TPE2\"",
",",
"# mp3",
"\"aART\"",
",",
"\"\\xa9ART\"",
")",
":",
"# mp4",
"try",
":",
"val",
"=",
"mf",
".",
"get",
"(",
"key",
",",
"None",
")",
"except",
"ValueError",
":",
"val",
"=",
"None",
"if",
"val",
"is",
"not",
"None",
":",
"artist",
"=",
"val",
"[",
"-",
"1",
"]",
"break",
"# album",
"for",
"key",
"in",
"(",
"\"_album\"",
",",
"\"album\"",
",",
"# ogg",
"\"TALB\"",
",",
"# mp3",
"\"\\xa9alb\"",
")",
":",
"# mp4",
"try",
":",
"val",
"=",
"mf",
".",
"get",
"(",
"key",
",",
"None",
")",
"except",
"ValueError",
":",
"val",
"=",
"None",
"if",
"val",
"is",
"not",
"None",
":",
"album",
"=",
"val",
"[",
"-",
"1",
"]",
"break",
"if",
"artist",
"and",
"album",
":",
"# album art",
"if",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"ogg",
".",
"OggFileType",
")",
":",
"has_embedded_album_art",
"=",
"\"metadata_block_picture\"",
"in",
"mf",
"elif",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"mp3",
".",
"MP3",
")",
":",
"has_embedded_album_art",
"=",
"any",
"(",
"map",
"(",
"operator",
".",
"methodcaller",
"(",
"\"startswith\"",
",",
"\"APIC:\"",
")",
",",
"mf",
".",
"keys",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"mp4",
".",
"MP4",
")",
":",
"has_embedded_album_art",
"=",
"\"covr\"",
"in",
"mf",
"# stop at the first file that succeeds (for performance)",
"break",
"return",
"artist",
",",
"album",
",",
"has_embedded_album_art"
] | Return a tuple of album, artist, has_embedded_album_art from a list of audio files. | [
"Return",
"a",
"tuple",
"of",
"album",
"artist",
"has_embedded_album_art",
"from",
"a",
"list",
"of",
"audio",
"files",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L59-L106 |
desbma/sacad | sacad/recurse.py | analyze_dir | def analyze_dir(stats, parent_dir, rel_filepaths, cover_filename, *, ignore_existing=False):
""" Analyze a directory (non recursively) to get its album metadata if it is one. """
no_metadata = None, None, None
metadata = no_metadata
audio_filepaths = []
for rel_filepath in rel_filepaths:
stats["files"] += 1
try:
ext = os.path.splitext(rel_filepath)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
audio_filepaths.append(os.path.join(parent_dir, rel_filepath))
if audio_filepaths:
stats["albums"] += 1
if (cover_filename != EMBEDDED_ALBUM_ART_SYMBOL):
missing = (not os.path.isfile(os.path.join(parent_dir, cover_filename))) or ignore_existing
if missing:
metadata = get_metadata(audio_filepaths)
else:
metadata = get_metadata(audio_filepaths)
missing = (not metadata[2]) or ignore_existing
if missing:
stats["missing covers"] += 1
if not all(metadata[:-1]):
# failed to get metadata for this album
stats["errors"] += 1
logging.getLogger("sacad_r").error("Unable to read metadata for album directory '%s'" % (parent_dir))
else:
metadata = no_metadata
return metadata | python | def analyze_dir(stats, parent_dir, rel_filepaths, cover_filename, *, ignore_existing=False):
""" Analyze a directory (non recursively) to get its album metadata if it is one. """
no_metadata = None, None, None
metadata = no_metadata
audio_filepaths = []
for rel_filepath in rel_filepaths:
stats["files"] += 1
try:
ext = os.path.splitext(rel_filepath)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
audio_filepaths.append(os.path.join(parent_dir, rel_filepath))
if audio_filepaths:
stats["albums"] += 1
if (cover_filename != EMBEDDED_ALBUM_ART_SYMBOL):
missing = (not os.path.isfile(os.path.join(parent_dir, cover_filename))) or ignore_existing
if missing:
metadata = get_metadata(audio_filepaths)
else:
metadata = get_metadata(audio_filepaths)
missing = (not metadata[2]) or ignore_existing
if missing:
stats["missing covers"] += 1
if not all(metadata[:-1]):
# failed to get metadata for this album
stats["errors"] += 1
logging.getLogger("sacad_r").error("Unable to read metadata for album directory '%s'" % (parent_dir))
else:
metadata = no_metadata
return metadata | [
"def",
"analyze_dir",
"(",
"stats",
",",
"parent_dir",
",",
"rel_filepaths",
",",
"cover_filename",
",",
"*",
",",
"ignore_existing",
"=",
"False",
")",
":",
"no_metadata",
"=",
"None",
",",
"None",
",",
"None",
"metadata",
"=",
"no_metadata",
"audio_filepaths",
"=",
"[",
"]",
"for",
"rel_filepath",
"in",
"rel_filepaths",
":",
"stats",
"[",
"\"files\"",
"]",
"+=",
"1",
"try",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"rel_filepath",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"except",
"IndexError",
":",
"continue",
"if",
"ext",
"in",
"AUDIO_EXTENSIONS",
":",
"audio_filepaths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"rel_filepath",
")",
")",
"if",
"audio_filepaths",
":",
"stats",
"[",
"\"albums\"",
"]",
"+=",
"1",
"if",
"(",
"cover_filename",
"!=",
"EMBEDDED_ALBUM_ART_SYMBOL",
")",
":",
"missing",
"=",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"cover_filename",
")",
")",
")",
"or",
"ignore_existing",
"if",
"missing",
":",
"metadata",
"=",
"get_metadata",
"(",
"audio_filepaths",
")",
"else",
":",
"metadata",
"=",
"get_metadata",
"(",
"audio_filepaths",
")",
"missing",
"=",
"(",
"not",
"metadata",
"[",
"2",
"]",
")",
"or",
"ignore_existing",
"if",
"missing",
":",
"stats",
"[",
"\"missing covers\"",
"]",
"+=",
"1",
"if",
"not",
"all",
"(",
"metadata",
"[",
":",
"-",
"1",
"]",
")",
":",
"# failed to get metadata for this album",
"stats",
"[",
"\"errors\"",
"]",
"+=",
"1",
"logging",
".",
"getLogger",
"(",
"\"sacad_r\"",
")",
".",
"error",
"(",
"\"Unable to read metadata for album directory '%s'\"",
"%",
"(",
"parent_dir",
")",
")",
"else",
":",
"metadata",
"=",
"no_metadata",
"return",
"metadata"
] | Analyze a directory (non recursively) to get its album metadata if it is one. | [
"Analyze",
"a",
"directory",
"(",
"non",
"recursively",
")",
"to",
"get",
"its",
"album",
"metadata",
"if",
"it",
"is",
"one",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L109-L139 |
desbma/sacad | sacad/recurse.py | embed_album_art | def embed_album_art(cover_filepath, path):
""" Embed album art into audio files. """
with open(cover_filepath, "rb") as f:
cover_data = f.read()
for filename in os.listdir(path):
try:
ext = os.path.splitext(filename)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
filepath = os.path.join(path, filename)
mf = mutagen.File(filepath)
if (isinstance(mf.tags, mutagen._vorbis.VComment) or
isinstance(mf, mutagen.ogg.OggFileType)):
picture = mutagen.flac.Picture()
picture.data = cover_data
picture.type = mutagen.id3.PictureType.COVER_FRONT
picture.mime = "image/jpeg"
encoded_data = base64.b64encode(picture.write())
mf["metadata_block_picture"] = encoded_data.decode("ascii")
elif (isinstance(mf.tags, mutagen.id3.ID3) or
isinstance(mf, mutagen.id3.ID3FileType)):
mf.tags.add(mutagen.id3.APIC(mime="image/jpeg",
type=mutagen.id3.PictureType.COVER_FRONT,
data=cover_data))
elif (isinstance(mf.tags, mutagen.mp4.MP4Tags) or
isinstance(mf, mutagen.mp4.MP4)):
mf["covr"] = [mutagen.mp4.MP4Cover(cover_data,
imageformat=mutagen.mp4.AtomDataType.JPEG)]
mf.save() | python | def embed_album_art(cover_filepath, path):
""" Embed album art into audio files. """
with open(cover_filepath, "rb") as f:
cover_data = f.read()
for filename in os.listdir(path):
try:
ext = os.path.splitext(filename)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
filepath = os.path.join(path, filename)
mf = mutagen.File(filepath)
if (isinstance(mf.tags, mutagen._vorbis.VComment) or
isinstance(mf, mutagen.ogg.OggFileType)):
picture = mutagen.flac.Picture()
picture.data = cover_data
picture.type = mutagen.id3.PictureType.COVER_FRONT
picture.mime = "image/jpeg"
encoded_data = base64.b64encode(picture.write())
mf["metadata_block_picture"] = encoded_data.decode("ascii")
elif (isinstance(mf.tags, mutagen.id3.ID3) or
isinstance(mf, mutagen.id3.ID3FileType)):
mf.tags.add(mutagen.id3.APIC(mime="image/jpeg",
type=mutagen.id3.PictureType.COVER_FRONT,
data=cover_data))
elif (isinstance(mf.tags, mutagen.mp4.MP4Tags) or
isinstance(mf, mutagen.mp4.MP4)):
mf["covr"] = [mutagen.mp4.MP4Cover(cover_data,
imageformat=mutagen.mp4.AtomDataType.JPEG)]
mf.save() | [
"def",
"embed_album_art",
"(",
"cover_filepath",
",",
"path",
")",
":",
"with",
"open",
"(",
"cover_filepath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"cover_data",
"=",
"f",
".",
"read",
"(",
")",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"try",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"except",
"IndexError",
":",
"continue",
"if",
"ext",
"in",
"AUDIO_EXTENSIONS",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"mf",
"=",
"mutagen",
".",
"File",
"(",
"filepath",
")",
"if",
"(",
"isinstance",
"(",
"mf",
".",
"tags",
",",
"mutagen",
".",
"_vorbis",
".",
"VComment",
")",
"or",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"ogg",
".",
"OggFileType",
")",
")",
":",
"picture",
"=",
"mutagen",
".",
"flac",
".",
"Picture",
"(",
")",
"picture",
".",
"data",
"=",
"cover_data",
"picture",
".",
"type",
"=",
"mutagen",
".",
"id3",
".",
"PictureType",
".",
"COVER_FRONT",
"picture",
".",
"mime",
"=",
"\"image/jpeg\"",
"encoded_data",
"=",
"base64",
".",
"b64encode",
"(",
"picture",
".",
"write",
"(",
")",
")",
"mf",
"[",
"\"metadata_block_picture\"",
"]",
"=",
"encoded_data",
".",
"decode",
"(",
"\"ascii\"",
")",
"elif",
"(",
"isinstance",
"(",
"mf",
".",
"tags",
",",
"mutagen",
".",
"id3",
".",
"ID3",
")",
"or",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"id3",
".",
"ID3FileType",
")",
")",
":",
"mf",
".",
"tags",
".",
"add",
"(",
"mutagen",
".",
"id3",
".",
"APIC",
"(",
"mime",
"=",
"\"image/jpeg\"",
",",
"type",
"=",
"mutagen",
".",
"id3",
".",
"PictureType",
".",
"COVER_FRONT",
",",
"data",
"=",
"cover_data",
")",
")",
"elif",
"(",
"isinstance",
"(",
"mf",
".",
"tags",
",",
"mutagen",
".",
"mp4",
".",
"MP4Tags",
")",
"or",
"isinstance",
"(",
"mf",
",",
"mutagen",
".",
"mp4",
".",
"MP4",
")",
")",
":",
"mf",
"[",
"\"covr\"",
"]",
"=",
"[",
"mutagen",
".",
"mp4",
".",
"MP4Cover",
"(",
"cover_data",
",",
"imageformat",
"=",
"mutagen",
".",
"mp4",
".",
"AtomDataType",
".",
"JPEG",
")",
"]",
"mf",
".",
"save",
"(",
")"
] | Embed album art into audio files. | [
"Embed",
"album",
"art",
"into",
"audio",
"files",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L142-L173 |
desbma/sacad | sacad/recurse.py | ichunk | def ichunk(iterable, n):
""" Split an iterable into n-sized chunks. """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk | python | def ichunk(iterable, n):
""" Split an iterable into n-sized chunks. """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk | [
"def",
"ichunk",
"(",
"iterable",
",",
"n",
")",
":",
"it",
"=",
"iter",
"(",
"iterable",
")",
"while",
"True",
":",
"chunk",
"=",
"tuple",
"(",
"itertools",
".",
"islice",
"(",
"it",
",",
"n",
")",
")",
"if",
"not",
"chunk",
":",
"return",
"yield",
"chunk"
] | Split an iterable into n-sized chunks. | [
"Split",
"an",
"iterable",
"into",
"n",
"-",
"sized",
"chunks",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L176-L183 |
desbma/sacad | sacad/recurse.py | get_covers | def get_covers(work, args):
""" Get missing covers. """
with contextlib.ExitStack() as cm:
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
tmp_prefix = "%s_" % (os.path.splitext(os.path.basename(inspect.getfile(inspect.currentframe())))[0])
tmp_dir = cm.enter_context(tempfile.TemporaryDirectory(prefix=tmp_prefix))
# setup progress report
stats = collections.OrderedDict(((k, 0) for k in("ok", "errors", "no result found")))
progress = cm.enter_context(tqdm.tqdm(total=len(work),
miniters=1,
desc="Searching covers",
unit="cover",
postfix=stats))
cm.enter_context(tqdm_logging.redirect_logging(progress))
def update_progress(future):
path, cover_filepath, artist, album = futures[future]
try:
status = future.result()
except Exception as exception:
stats["errors"] += 1
logging.getLogger("sacad_r").error("Error occured while searching cover for "
"'%s' by '%s' from '%s': %s %s" % (album,
artist,
path,
exception.__class__.__qualname__,
exception))
else:
if status:
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
try:
embed_album_art(cover_filepath, path)
except Exception as exception:
stats["errors"] += 1
logging.getLogger("sacad_r").error("Error occured while embedding cover for "
"'%s' by '%s' from '%s': %s %s" % (album,
artist,
path,
exception.__class__.__qualname__,
exception))
else:
stats["ok"] += 1
finally:
os.remove(cover_filepath)
else:
stats["ok"] += 1
else:
stats["no result found"] += 1
logging.getLogger("sacad_r").warning("Unable to find cover for '%s' by '%s' from '%s'" % (album, artist, path))
progress.set_postfix(stats, refresh=False)
progress.update(1)
# post work
async_loop = asyncio.get_event_loop()
i = 0
# default event loop on Windows has a 512 fd limit, see https://docs.python.org/3/library/asyncio-eventloops.html#windows
# also on Linux default max open fd limit is 1024 (ulimit -n)
# so work in smaller chunks to avoid hitting fd limit
# this also updates the progress faster (instead of working on all searches, work on finishing the chunk before
# getting to the next one)
work_chunk_length = 16
for work_chunk in ichunk(work.items(), work_chunk_length):
futures = {}
for i, (path, (artist, album)) in enumerate(work_chunk, i):
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
cover_filepath = os.path.join(tmp_dir, "%00u.%s" % (i, args.format.name.lower()))
else:
cover_filepath = os.path.join(path, args.filename)
coroutine = sacad.search_and_download(album,
artist,
args.format,
args.size,
cover_filepath,
size_tolerance_prct=args.size_tolerance_prct,
amazon_tlds=args.amazon_tlds,
no_lq_sources=args.no_lq_sources,
async_loop=async_loop)
future = asyncio.ensure_future(coroutine, loop=async_loop)
futures[future] = (path, cover_filepath, artist, album)
for future in futures:
future.add_done_callback(update_progress)
# wait for end of work
root_future = asyncio.gather(*futures.keys(), loop=async_loop)
async_loop.run_until_complete(root_future) | python | def get_covers(work, args):
""" Get missing covers. """
with contextlib.ExitStack() as cm:
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
tmp_prefix = "%s_" % (os.path.splitext(os.path.basename(inspect.getfile(inspect.currentframe())))[0])
tmp_dir = cm.enter_context(tempfile.TemporaryDirectory(prefix=tmp_prefix))
# setup progress report
stats = collections.OrderedDict(((k, 0) for k in("ok", "errors", "no result found")))
progress = cm.enter_context(tqdm.tqdm(total=len(work),
miniters=1,
desc="Searching covers",
unit="cover",
postfix=stats))
cm.enter_context(tqdm_logging.redirect_logging(progress))
def update_progress(future):
path, cover_filepath, artist, album = futures[future]
try:
status = future.result()
except Exception as exception:
stats["errors"] += 1
logging.getLogger("sacad_r").error("Error occured while searching cover for "
"'%s' by '%s' from '%s': %s %s" % (album,
artist,
path,
exception.__class__.__qualname__,
exception))
else:
if status:
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
try:
embed_album_art(cover_filepath, path)
except Exception as exception:
stats["errors"] += 1
logging.getLogger("sacad_r").error("Error occured while embedding cover for "
"'%s' by '%s' from '%s': %s %s" % (album,
artist,
path,
exception.__class__.__qualname__,
exception))
else:
stats["ok"] += 1
finally:
os.remove(cover_filepath)
else:
stats["ok"] += 1
else:
stats["no result found"] += 1
logging.getLogger("sacad_r").warning("Unable to find cover for '%s' by '%s' from '%s'" % (album, artist, path))
progress.set_postfix(stats, refresh=False)
progress.update(1)
# post work
async_loop = asyncio.get_event_loop()
i = 0
# default event loop on Windows has a 512 fd limit, see https://docs.python.org/3/library/asyncio-eventloops.html#windows
# also on Linux default max open fd limit is 1024 (ulimit -n)
# so work in smaller chunks to avoid hitting fd limit
# this also updates the progress faster (instead of working on all searches, work on finishing the chunk before
# getting to the next one)
work_chunk_length = 16
for work_chunk in ichunk(work.items(), work_chunk_length):
futures = {}
for i, (path, (artist, album)) in enumerate(work_chunk, i):
if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
cover_filepath = os.path.join(tmp_dir, "%00u.%s" % (i, args.format.name.lower()))
else:
cover_filepath = os.path.join(path, args.filename)
coroutine = sacad.search_and_download(album,
artist,
args.format,
args.size,
cover_filepath,
size_tolerance_prct=args.size_tolerance_prct,
amazon_tlds=args.amazon_tlds,
no_lq_sources=args.no_lq_sources,
async_loop=async_loop)
future = asyncio.ensure_future(coroutine, loop=async_loop)
futures[future] = (path, cover_filepath, artist, album)
for future in futures:
future.add_done_callback(update_progress)
# wait for end of work
root_future = asyncio.gather(*futures.keys(), loop=async_loop)
async_loop.run_until_complete(root_future) | [
"def",
"get_covers",
"(",
"work",
",",
"args",
")",
":",
"with",
"contextlib",
".",
"ExitStack",
"(",
")",
"as",
"cm",
":",
"if",
"args",
".",
"filename",
"==",
"EMBEDDED_ALBUM_ART_SYMBOL",
":",
"tmp_prefix",
"=",
"\"%s_\"",
"%",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"inspect",
".",
"getfile",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
")",
")",
"[",
"0",
"]",
")",
"tmp_dir",
"=",
"cm",
".",
"enter_context",
"(",
"tempfile",
".",
"TemporaryDirectory",
"(",
"prefix",
"=",
"tmp_prefix",
")",
")",
"# setup progress report",
"stats",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"(",
"k",
",",
"0",
")",
"for",
"k",
"in",
"(",
"\"ok\"",
",",
"\"errors\"",
",",
"\"no result found\"",
")",
")",
")",
"progress",
"=",
"cm",
".",
"enter_context",
"(",
"tqdm",
".",
"tqdm",
"(",
"total",
"=",
"len",
"(",
"work",
")",
",",
"miniters",
"=",
"1",
",",
"desc",
"=",
"\"Searching covers\"",
",",
"unit",
"=",
"\"cover\"",
",",
"postfix",
"=",
"stats",
")",
")",
"cm",
".",
"enter_context",
"(",
"tqdm_logging",
".",
"redirect_logging",
"(",
"progress",
")",
")",
"def",
"update_progress",
"(",
"future",
")",
":",
"path",
",",
"cover_filepath",
",",
"artist",
",",
"album",
"=",
"futures",
"[",
"future",
"]",
"try",
":",
"status",
"=",
"future",
".",
"result",
"(",
")",
"except",
"Exception",
"as",
"exception",
":",
"stats",
"[",
"\"errors\"",
"]",
"+=",
"1",
"logging",
".",
"getLogger",
"(",
"\"sacad_r\"",
")",
".",
"error",
"(",
"\"Error occured while searching cover for \"",
"\"'%s' by '%s' from '%s': %s %s\"",
"%",
"(",
"album",
",",
"artist",
",",
"path",
",",
"exception",
".",
"__class__",
".",
"__qualname__",
",",
"exception",
")",
")",
"else",
":",
"if",
"status",
":",
"if",
"args",
".",
"filename",
"==",
"EMBEDDED_ALBUM_ART_SYMBOL",
":",
"try",
":",
"embed_album_art",
"(",
"cover_filepath",
",",
"path",
")",
"except",
"Exception",
"as",
"exception",
":",
"stats",
"[",
"\"errors\"",
"]",
"+=",
"1",
"logging",
".",
"getLogger",
"(",
"\"sacad_r\"",
")",
".",
"error",
"(",
"\"Error occured while embedding cover for \"",
"\"'%s' by '%s' from '%s': %s %s\"",
"%",
"(",
"album",
",",
"artist",
",",
"path",
",",
"exception",
".",
"__class__",
".",
"__qualname__",
",",
"exception",
")",
")",
"else",
":",
"stats",
"[",
"\"ok\"",
"]",
"+=",
"1",
"finally",
":",
"os",
".",
"remove",
"(",
"cover_filepath",
")",
"else",
":",
"stats",
"[",
"\"ok\"",
"]",
"+=",
"1",
"else",
":",
"stats",
"[",
"\"no result found\"",
"]",
"+=",
"1",
"logging",
".",
"getLogger",
"(",
"\"sacad_r\"",
")",
".",
"warning",
"(",
"\"Unable to find cover for '%s' by '%s' from '%s'\"",
"%",
"(",
"album",
",",
"artist",
",",
"path",
")",
")",
"progress",
".",
"set_postfix",
"(",
"stats",
",",
"refresh",
"=",
"False",
")",
"progress",
".",
"update",
"(",
"1",
")",
"# post work",
"async_loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"i",
"=",
"0",
"# default event loop on Windows has a 512 fd limit, see https://docs.python.org/3/library/asyncio-eventloops.html#windows",
"# also on Linux default max open fd limit is 1024 (ulimit -n)",
"# so work in smaller chunks to avoid hitting fd limit",
"# this also updates the progress faster (instead of working on all searches, work on finishing the chunk before",
"# getting to the next one)",
"work_chunk_length",
"=",
"16",
"for",
"work_chunk",
"in",
"ichunk",
"(",
"work",
".",
"items",
"(",
")",
",",
"work_chunk_length",
")",
":",
"futures",
"=",
"{",
"}",
"for",
"i",
",",
"(",
"path",
",",
"(",
"artist",
",",
"album",
")",
")",
"in",
"enumerate",
"(",
"work_chunk",
",",
"i",
")",
":",
"if",
"args",
".",
"filename",
"==",
"EMBEDDED_ALBUM_ART_SYMBOL",
":",
"cover_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"%00u.%s\"",
"%",
"(",
"i",
",",
"args",
".",
"format",
".",
"name",
".",
"lower",
"(",
")",
")",
")",
"else",
":",
"cover_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"args",
".",
"filename",
")",
"coroutine",
"=",
"sacad",
".",
"search_and_download",
"(",
"album",
",",
"artist",
",",
"args",
".",
"format",
",",
"args",
".",
"size",
",",
"cover_filepath",
",",
"size_tolerance_prct",
"=",
"args",
".",
"size_tolerance_prct",
",",
"amazon_tlds",
"=",
"args",
".",
"amazon_tlds",
",",
"no_lq_sources",
"=",
"args",
".",
"no_lq_sources",
",",
"async_loop",
"=",
"async_loop",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"coroutine",
",",
"loop",
"=",
"async_loop",
")",
"futures",
"[",
"future",
"]",
"=",
"(",
"path",
",",
"cover_filepath",
",",
"artist",
",",
"album",
")",
"for",
"future",
"in",
"futures",
":",
"future",
".",
"add_done_callback",
"(",
"update_progress",
")",
"# wait for end of work",
"root_future",
"=",
"asyncio",
".",
"gather",
"(",
"*",
"futures",
".",
"keys",
"(",
")",
",",
"loop",
"=",
"async_loop",
")",
"async_loop",
".",
"run_until_complete",
"(",
"root_future",
")"
] | Get missing covers. | [
"Get",
"missing",
"covers",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/recurse.py#L186-L273 |
desbma/sacad | sacad/mkstemp_ctx.py | mkstemp | def mkstemp(*args, **kwargs):
"""
Context manager similar to tempfile.NamedTemporaryFile except the file is not deleted on close, and only the filepath
is returned
.. warnings:: Unlike tempfile.mkstemp, this is not secure
"""
fd, filename = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
try:
yield filename
finally:
os.remove(filename) | python | def mkstemp(*args, **kwargs):
"""
Context manager similar to tempfile.NamedTemporaryFile except the file is not deleted on close, and only the filepath
is returned
.. warnings:: Unlike tempfile.mkstemp, this is not secure
"""
fd, filename = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
try:
yield filename
finally:
os.remove(filename) | [
"def",
"mkstemp",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"fd",
",",
"filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"os",
".",
"close",
"(",
"fd",
")",
"try",
":",
"yield",
"filename",
"finally",
":",
"os",
".",
"remove",
"(",
"filename",
")"
] | Context manager similar to tempfile.NamedTemporaryFile except the file is not deleted on close, and only the filepath
is returned
.. warnings:: Unlike tempfile.mkstemp, this is not secure | [
"Context",
"manager",
"similar",
"to",
"tempfile",
".",
"NamedTemporaryFile",
"except",
"the",
"file",
"is",
"not",
"deleted",
"on",
"close",
"and",
"only",
"the",
"filepath",
"is",
"returned",
"..",
"warnings",
"::",
"Unlike",
"tempfile",
".",
"mkstemp",
"this",
"is",
"not",
"secure"
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/mkstemp_ctx.py#L7-L18 |
desbma/sacad | sacad/tqdm_logging.py | redirect_logging | def redirect_logging(tqdm_obj, logger=logging.getLogger()):
""" Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original. """
# remove current handler
assert(len(logger.handlers) == 1)
prev_handler = logger.handlers[0]
logger.removeHandler(prev_handler)
# add tqdm handler
tqdm_handler = TqdmLoggingHandler(tqdm_obj)
if prev_handler.formatter is not None:
tqdm_handler.setFormatter(prev_handler.formatter)
logger.addHandler(tqdm_handler)
try:
yield
finally:
# restore handler
logger.removeHandler(tqdm_handler)
logger.addHandler(prev_handler) | python | def redirect_logging(tqdm_obj, logger=logging.getLogger()):
""" Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original. """
# remove current handler
assert(len(logger.handlers) == 1)
prev_handler = logger.handlers[0]
logger.removeHandler(prev_handler)
# add tqdm handler
tqdm_handler = TqdmLoggingHandler(tqdm_obj)
if prev_handler.formatter is not None:
tqdm_handler.setFormatter(prev_handler.formatter)
logger.addHandler(tqdm_handler)
try:
yield
finally:
# restore handler
logger.removeHandler(tqdm_handler)
logger.addHandler(prev_handler) | [
"def",
"redirect_logging",
"(",
"tqdm_obj",
",",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
")",
":",
"# remove current handler",
"assert",
"(",
"len",
"(",
"logger",
".",
"handlers",
")",
"==",
"1",
")",
"prev_handler",
"=",
"logger",
".",
"handlers",
"[",
"0",
"]",
"logger",
".",
"removeHandler",
"(",
"prev_handler",
")",
"# add tqdm handler",
"tqdm_handler",
"=",
"TqdmLoggingHandler",
"(",
"tqdm_obj",
")",
"if",
"prev_handler",
".",
"formatter",
"is",
"not",
"None",
":",
"tqdm_handler",
".",
"setFormatter",
"(",
"prev_handler",
".",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"tqdm_handler",
")",
"try",
":",
"yield",
"finally",
":",
"# restore handler",
"logger",
".",
"removeHandler",
"(",
"tqdm_handler",
")",
"logger",
".",
"addHandler",
"(",
"prev_handler",
")"
] | Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original. | [
"Context",
"manager",
"to",
"redirect",
"logging",
"to",
"a",
"TqdmLoggingHandler",
"object",
"and",
"then",
"restore",
"the",
"original",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/tqdm_logging.py#L21-L39 |
desbma/sacad | sacad/sources/base.py | CoverSource.search | async def search(self, album, artist):
""" Search for a given album/artist and return an iterable of CoverSourceResult. """
self.logger.debug("Searching with source '%s'..." % (self.__class__.__name__))
album = self.processAlbumString(album)
artist = self.processArtistString(artist)
url_data = self.getSearchUrl(album, artist)
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
try:
store_in_cache_callback, api_data = await self.fetchResults(url, post_data)
results = await self.parseResults(api_data)
except Exception as e:
# raise
self.logger.warning("Search with source '%s' failed: %s %s" % (self.__class__.__name__,
e.__class__.__qualname__,
e))
return ()
else:
if results:
# only store in cache if parsing succeeds and we have results
await store_in_cache_callback()
# get metadata
futures = []
for result in filter(operator.methodcaller("needMetadataUpdate"), results):
coroutine = result.updateImageMetadata()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if futures:
await asyncio.wait(futures)
for future in futures:
future.result() # raise pending exception if any
# filter
results_excluded_count = 0
reference_only_count = 0
results_kept = []
for result in results:
if ((result.size[0] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or # skip too small images
(result.size[1] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or
(result.format is None) or # unknown format
result.needMetadataUpdate()): # if still true, it means we failed to grab metadata, so exclude it
if result.source_quality is CoverSourceQuality.REFERENCE:
# we keep this result just for the reference, it will be excluded from the results
result.is_only_reference = True
results_kept.append(result)
reference_only_count += 1
else:
results_excluded_count += 1
else:
results_kept.append(result)
result_kept_count = len(results_kept) - reference_only_count
# log
self.logger.info("Got %u relevant (%u excluded) results from source '%s'" % (result_kept_count,
results_excluded_count + reference_only_count,
self.__class__.__name__))
for result in itertools.filterfalse(operator.attrgetter("is_only_reference"), results_kept):
self.logger.debug("%s %s%s %4dx%4d %s%s" % (result.__class__.__name__,
("(%02d) " % (result.rank)) if result.rank is not None else "",
result.format.name,
result.size[0],
result.size[1],
result.urls[0],
" [x%u]" % (len(result.urls)) if len(result.urls) > 1 else ""))
return results_kept | python | async def search(self, album, artist):
""" Search for a given album/artist and return an iterable of CoverSourceResult. """
self.logger.debug("Searching with source '%s'..." % (self.__class__.__name__))
album = self.processAlbumString(album)
artist = self.processArtistString(artist)
url_data = self.getSearchUrl(album, artist)
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
try:
store_in_cache_callback, api_data = await self.fetchResults(url, post_data)
results = await self.parseResults(api_data)
except Exception as e:
# raise
self.logger.warning("Search with source '%s' failed: %s %s" % (self.__class__.__name__,
e.__class__.__qualname__,
e))
return ()
else:
if results:
# only store in cache if parsing succeeds and we have results
await store_in_cache_callback()
# get metadata
futures = []
for result in filter(operator.methodcaller("needMetadataUpdate"), results):
coroutine = result.updateImageMetadata()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if futures:
await asyncio.wait(futures)
for future in futures:
future.result() # raise pending exception if any
# filter
results_excluded_count = 0
reference_only_count = 0
results_kept = []
for result in results:
if ((result.size[0] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or # skip too small images
(result.size[1] + (self.size_tolerance_prct * self.target_size / 100) < self.target_size) or
(result.format is None) or # unknown format
result.needMetadataUpdate()): # if still true, it means we failed to grab metadata, so exclude it
if result.source_quality is CoverSourceQuality.REFERENCE:
# we keep this result just for the reference, it will be excluded from the results
result.is_only_reference = True
results_kept.append(result)
reference_only_count += 1
else:
results_excluded_count += 1
else:
results_kept.append(result)
result_kept_count = len(results_kept) - reference_only_count
# log
self.logger.info("Got %u relevant (%u excluded) results from source '%s'" % (result_kept_count,
results_excluded_count + reference_only_count,
self.__class__.__name__))
for result in itertools.filterfalse(operator.attrgetter("is_only_reference"), results_kept):
self.logger.debug("%s %s%s %4dx%4d %s%s" % (result.__class__.__name__,
("(%02d) " % (result.rank)) if result.rank is not None else "",
result.format.name,
result.size[0],
result.size[1],
result.urls[0],
" [x%u]" % (len(result.urls)) if len(result.urls) > 1 else ""))
return results_kept | [
"async",
"def",
"search",
"(",
"self",
",",
"album",
",",
"artist",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Searching with source '%s'...\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"album",
"=",
"self",
".",
"processAlbumString",
"(",
"album",
")",
"artist",
"=",
"self",
".",
"processArtistString",
"(",
"artist",
")",
"url_data",
"=",
"self",
".",
"getSearchUrl",
"(",
"album",
",",
"artist",
")",
"if",
"isinstance",
"(",
"url_data",
",",
"tuple",
")",
":",
"url",
",",
"post_data",
"=",
"url_data",
"else",
":",
"url",
"=",
"url_data",
"post_data",
"=",
"None",
"try",
":",
"store_in_cache_callback",
",",
"api_data",
"=",
"await",
"self",
".",
"fetchResults",
"(",
"url",
",",
"post_data",
")",
"results",
"=",
"await",
"self",
".",
"parseResults",
"(",
"api_data",
")",
"except",
"Exception",
"as",
"e",
":",
"# raise",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Search with source '%s' failed: %s %s\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"e",
".",
"__class__",
".",
"__qualname__",
",",
"e",
")",
")",
"return",
"(",
")",
"else",
":",
"if",
"results",
":",
"# only store in cache if parsing succeeds and we have results",
"await",
"store_in_cache_callback",
"(",
")",
"# get metadata",
"futures",
"=",
"[",
"]",
"for",
"result",
"in",
"filter",
"(",
"operator",
".",
"methodcaller",
"(",
"\"needMetadataUpdate\"",
")",
",",
"results",
")",
":",
"coroutine",
"=",
"result",
".",
"updateImageMetadata",
"(",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"coroutine",
")",
"futures",
".",
"append",
"(",
"future",
")",
"if",
"futures",
":",
"await",
"asyncio",
".",
"wait",
"(",
"futures",
")",
"for",
"future",
"in",
"futures",
":",
"future",
".",
"result",
"(",
")",
"# raise pending exception if any",
"# filter",
"results_excluded_count",
"=",
"0",
"reference_only_count",
"=",
"0",
"results_kept",
"=",
"[",
"]",
"for",
"result",
"in",
"results",
":",
"if",
"(",
"(",
"result",
".",
"size",
"[",
"0",
"]",
"+",
"(",
"self",
".",
"size_tolerance_prct",
"*",
"self",
".",
"target_size",
"/",
"100",
")",
"<",
"self",
".",
"target_size",
")",
"or",
"# skip too small images",
"(",
"result",
".",
"size",
"[",
"1",
"]",
"+",
"(",
"self",
".",
"size_tolerance_prct",
"*",
"self",
".",
"target_size",
"/",
"100",
")",
"<",
"self",
".",
"target_size",
")",
"or",
"(",
"result",
".",
"format",
"is",
"None",
")",
"or",
"# unknown format",
"result",
".",
"needMetadataUpdate",
"(",
")",
")",
":",
"# if still true, it means we failed to grab metadata, so exclude it",
"if",
"result",
".",
"source_quality",
"is",
"CoverSourceQuality",
".",
"REFERENCE",
":",
"# we keep this result just for the reference, it will be excluded from the results",
"result",
".",
"is_only_reference",
"=",
"True",
"results_kept",
".",
"append",
"(",
"result",
")",
"reference_only_count",
"+=",
"1",
"else",
":",
"results_excluded_count",
"+=",
"1",
"else",
":",
"results_kept",
".",
"append",
"(",
"result",
")",
"result_kept_count",
"=",
"len",
"(",
"results_kept",
")",
"-",
"reference_only_count",
"# log",
"self",
".",
"logger",
".",
"info",
"(",
"\"Got %u relevant (%u excluded) results from source '%s'\"",
"%",
"(",
"result_kept_count",
",",
"results_excluded_count",
"+",
"reference_only_count",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"for",
"result",
"in",
"itertools",
".",
"filterfalse",
"(",
"operator",
".",
"attrgetter",
"(",
"\"is_only_reference\"",
")",
",",
"results_kept",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s %s%s %4dx%4d %s%s\"",
"%",
"(",
"result",
".",
"__class__",
".",
"__name__",
",",
"(",
"\"(%02d) \"",
"%",
"(",
"result",
".",
"rank",
")",
")",
"if",
"result",
".",
"rank",
"is",
"not",
"None",
"else",
"\"\"",
",",
"result",
".",
"format",
".",
"name",
",",
"result",
".",
"size",
"[",
"0",
"]",
",",
"result",
".",
"size",
"[",
"1",
"]",
",",
"result",
".",
"urls",
"[",
"0",
"]",
",",
"\" [x%u]\"",
"%",
"(",
"len",
"(",
"result",
".",
"urls",
")",
")",
"if",
"len",
"(",
"result",
".",
"urls",
")",
">",
"1",
"else",
"\"\"",
")",
")",
"return",
"results_kept"
] | Search for a given album/artist and return an iterable of CoverSourceResult. | [
"Search",
"for",
"a",
"given",
"album",
"/",
"artist",
"and",
"return",
"an",
"iterable",
"of",
"CoverSourceResult",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/base.py#L60-L128 |
desbma/sacad | sacad/sources/base.py | CoverSource.fetchResults | async def fetchResults(self, url, post_data=None):
""" Get a (store in cache callback, search results) tuple from an URL. """
if post_data is not None:
self.logger.debug("Querying URL '%s' %s..." % (url, dict(post_data)))
else:
self.logger.debug("Querying URL '%s'..." % (url))
headers = {}
self.updateHttpHeaders(headers)
return await self.http.query(url,
post_data=post_data,
headers=headers,
cache=__class__.api_cache) | python | async def fetchResults(self, url, post_data=None):
""" Get a (store in cache callback, search results) tuple from an URL. """
if post_data is not None:
self.logger.debug("Querying URL '%s' %s..." % (url, dict(post_data)))
else:
self.logger.debug("Querying URL '%s'..." % (url))
headers = {}
self.updateHttpHeaders(headers)
return await self.http.query(url,
post_data=post_data,
headers=headers,
cache=__class__.api_cache) | [
"async",
"def",
"fetchResults",
"(",
"self",
",",
"url",
",",
"post_data",
"=",
"None",
")",
":",
"if",
"post_data",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Querying URL '%s' %s...\"",
"%",
"(",
"url",
",",
"dict",
"(",
"post_data",
")",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Querying URL '%s'...\"",
"%",
"(",
"url",
")",
")",
"headers",
"=",
"{",
"}",
"self",
".",
"updateHttpHeaders",
"(",
"headers",
")",
"return",
"await",
"self",
".",
"http",
".",
"query",
"(",
"url",
",",
"post_data",
"=",
"post_data",
",",
"headers",
"=",
"headers",
",",
"cache",
"=",
"__class__",
".",
"api_cache",
")"
] | Get a (store in cache callback, search results) tuple from an URL. | [
"Get",
"a",
"(",
"store",
"in",
"cache",
"callback",
"search",
"results",
")",
"tuple",
"from",
"an",
"URL",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/base.py#L130-L141 |
desbma/sacad | sacad/sources/base.py | CoverSource.probeUrl | async def probeUrl(self, url, response_headers=None):
""" Probe URL reachability from cache or HEAD request. """
self.logger.debug("Probing URL '%s'..." % (url))
headers = {}
self.updateHttpHeaders(headers)
resp_headers = {}
resp_ok = await self.http.isReachable(url,
headers=headers,
response_headers=resp_headers,
cache=__class__.probe_cache)
if response_headers is not None:
response_headers.update(resp_headers)
return resp_ok | python | async def probeUrl(self, url, response_headers=None):
""" Probe URL reachability from cache or HEAD request. """
self.logger.debug("Probing URL '%s'..." % (url))
headers = {}
self.updateHttpHeaders(headers)
resp_headers = {}
resp_ok = await self.http.isReachable(url,
headers=headers,
response_headers=resp_headers,
cache=__class__.probe_cache)
if response_headers is not None:
response_headers.update(resp_headers)
return resp_ok | [
"async",
"def",
"probeUrl",
"(",
"self",
",",
"url",
",",
"response_headers",
"=",
"None",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Probing URL '%s'...\"",
"%",
"(",
"url",
")",
")",
"headers",
"=",
"{",
"}",
"self",
".",
"updateHttpHeaders",
"(",
"headers",
")",
"resp_headers",
"=",
"{",
"}",
"resp_ok",
"=",
"await",
"self",
".",
"http",
".",
"isReachable",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"response_headers",
"=",
"resp_headers",
",",
"cache",
"=",
"__class__",
".",
"probe_cache",
")",
"if",
"response_headers",
"is",
"not",
"None",
":",
"response_headers",
".",
"update",
"(",
"resp_headers",
")",
"return",
"resp_ok"
] | Probe URL reachability from cache or HEAD request. | [
"Probe",
"URL",
"reachability",
"from",
"cache",
"or",
"HEAD",
"request",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/base.py#L143-L157 |
desbma/sacad | sacad/sources/base.py | CoverSource.unaccentuate | def unaccentuate(s):
""" Replace accentuated chars in string by their non accentuated equivalent. """
return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c)) | python | def unaccentuate(s):
""" Replace accentuated chars in string by their non accentuated equivalent. """
return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c)) | [
"def",
"unaccentuate",
"(",
"s",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"unicodedata",
".",
"normalize",
"(",
"\"NFKD\"",
",",
"s",
")",
"if",
"not",
"unicodedata",
".",
"combining",
"(",
"c",
")",
")"
] | Replace accentuated chars in string by their non accentuated equivalent. | [
"Replace",
"accentuated",
"chars",
"in",
"string",
"by",
"their",
"non",
"accentuated",
"equivalent",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/base.py#L165-L167 |
desbma/sacad | sacad/sources/base.py | CoverSource.unpunctuate | def unpunctuate(s, *, char_blacklist=string.punctuation):
""" Remove punctuation from string s. """
# remove punctuation
s = "".join(c for c in s if c not in char_blacklist)
# remove consecutive spaces
return " ".join(filter(None, s.split(" "))) | python | def unpunctuate(s, *, char_blacklist=string.punctuation):
""" Remove punctuation from string s. """
# remove punctuation
s = "".join(c for c in s if c not in char_blacklist)
# remove consecutive spaces
return " ".join(filter(None, s.split(" "))) | [
"def",
"unpunctuate",
"(",
"s",
",",
"*",
",",
"char_blacklist",
"=",
"string",
".",
"punctuation",
")",
":",
"# remove punctuation",
"s",
"=",
"\"\"",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"s",
"if",
"c",
"not",
"in",
"char_blacklist",
")",
"# remove consecutive spaces",
"return",
"\" \"",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"s",
".",
"split",
"(",
"\" \"",
")",
")",
")"
] | Remove punctuation from string s. | [
"Remove",
"punctuation",
"from",
"string",
"s",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/base.py#L170-L175 |
desbma/sacad | sacad/sources/amazoncd.py | AmazonCdCoverSource.getSearchUrl | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
params = collections.OrderedDict()
params["search-alias"] = "popular"
params["field-artist"] = artist
params["field-title"] = album
params["sort"] = "relevancerank"
return __class__.assembleUrl(self.base_url, params) | python | def getSearchUrl(self, album, artist):
""" See CoverSource.getSearchUrl. """
params = collections.OrderedDict()
params["search-alias"] = "popular"
params["field-artist"] = artist
params["field-title"] = album
params["sort"] = "relevancerank"
return __class__.assembleUrl(self.base_url, params) | [
"def",
"getSearchUrl",
"(",
"self",
",",
"album",
",",
"artist",
")",
":",
"params",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"params",
"[",
"\"search-alias\"",
"]",
"=",
"\"popular\"",
"params",
"[",
"\"field-artist\"",
"]",
"=",
"artist",
"params",
"[",
"\"field-title\"",
"]",
"=",
"album",
"params",
"[",
"\"sort\"",
"]",
"=",
"\"relevancerank\"",
"return",
"__class__",
".",
"assembleUrl",
"(",
"self",
".",
"base_url",
",",
"params",
")"
] | See CoverSource.getSearchUrl. | [
"See",
"CoverSource",
".",
"getSearchUrl",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/amazoncd.py#L46-L53 |
desbma/sacad | sacad/sources/amazoncd.py | AmazonCdCoverSource.parseResults | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("utf-8", "ignore"), parser)
for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break
for rank, result_node in enumerate(result_nodes, 1):
try:
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0]
except IndexError:
# no image for that product
continue
# get thumbnail & full image url
thumbnail_url = img_node.get("src")
url_parts = thumbnail_url.rsplit(".", 2)
img_url = ".".join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
check_metadata = CoverImageMetadata.SIZE
# try to get higher res image...
if ((self.target_size > size[0]) and # ...only if needed
(rank <= 3)): # and only for first 3 results because this is time
# consuming (1 more GET request per result)
product_url = __class__.PRODUCT_LINK_SELECTORS[page_struct_version](result_node)[0].get("href")
product_url_split = urllib.parse.urlsplit(product_url)
if not product_url_split.scheme:
# relative redirect url
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# needed if page_struct_version == 1
product_url = product_url_query["url"]
except KeyError:
# page_struct_version == 0, make url absolute
product_url = urllib.parse.urljoin(self.base_url, product_url)
product_url_split = urllib.parse.urlsplit(product_url)
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# remove timestamp from url to improve future cache hit rate
del product_url_query["qid"]
except KeyError:
pass
product_url_query = urllib.parse.urlencode(product_url_query)
product_url_no_ts = urllib.parse.urlunsplit(product_url_split[:3] + (product_url_query,) + product_url_split[4:])
store_in_cache_callback, product_page_data = await self.fetchResults(product_url_no_ts)
product_page_html = lxml.etree.XML(product_page_data.decode("latin-1"), parser)
try:
img_node = __class__.PRODUCT_PAGE_IMG_SELECTOR(product_page_html)[0]
except IndexError:
# unable to get better image
pass
else:
better_img_url = img_node.get("data-old-hires")
# img_node.get("data-a-dynamic-image") contains json with image urls too, but they are not larger than
# previous 500px image and are often covered by autorip badges (can be removed by cleaning url though)
if better_img_url:
img_url = better_img_url
size_url_hint = img_url.rsplit(".", 2)[1].strip("_")
assert(size_url_hint.startswith("SL"))
size_url_hint = int(size_url_hint[2:])
size = (size_url_hint, size_url_hint)
check_metadata = CoverImageMetadata.NONE
await store_in_cache_callback()
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonCdCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | python | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("utf-8", "ignore"), parser)
for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break
for rank, result_node in enumerate(result_nodes, 1):
try:
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0]
except IndexError:
# no image for that product
continue
# get thumbnail & full image url
thumbnail_url = img_node.get("src")
url_parts = thumbnail_url.rsplit(".", 2)
img_url = ".".join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
check_metadata = CoverImageMetadata.SIZE
# try to get higher res image...
if ((self.target_size > size[0]) and # ...only if needed
(rank <= 3)): # and only for first 3 results because this is time
# consuming (1 more GET request per result)
product_url = __class__.PRODUCT_LINK_SELECTORS[page_struct_version](result_node)[0].get("href")
product_url_split = urllib.parse.urlsplit(product_url)
if not product_url_split.scheme:
# relative redirect url
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# needed if page_struct_version == 1
product_url = product_url_query["url"]
except KeyError:
# page_struct_version == 0, make url absolute
product_url = urllib.parse.urljoin(self.base_url, product_url)
product_url_split = urllib.parse.urlsplit(product_url)
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# remove timestamp from url to improve future cache hit rate
del product_url_query["qid"]
except KeyError:
pass
product_url_query = urllib.parse.urlencode(product_url_query)
product_url_no_ts = urllib.parse.urlunsplit(product_url_split[:3] + (product_url_query,) + product_url_split[4:])
store_in_cache_callback, product_page_data = await self.fetchResults(product_url_no_ts)
product_page_html = lxml.etree.XML(product_page_data.decode("latin-1"), parser)
try:
img_node = __class__.PRODUCT_PAGE_IMG_SELECTOR(product_page_html)[0]
except IndexError:
# unable to get better image
pass
else:
better_img_url = img_node.get("data-old-hires")
# img_node.get("data-a-dynamic-image") contains json with image urls too, but they are not larger than
# previous 500px image and are often covered by autorip badges (can be removed by cleaning url though)
if better_img_url:
img_url = better_img_url
size_url_hint = img_url.rsplit(".", 2)[1].strip("_")
assert(size_url_hint.startswith("SL"))
size_url_hint = int(size_url_hint[2:])
size = (size_url_hint, size_url_hint)
check_metadata = CoverImageMetadata.NONE
await store_in_cache_callback()
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonCdCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | [
"async",
"def",
"parseResults",
"(",
"self",
",",
"api_data",
")",
":",
"results",
"=",
"[",
"]",
"# parse page",
"parser",
"=",
"lxml",
".",
"etree",
".",
"HTMLParser",
"(",
")",
"html",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"api_data",
".",
"decode",
"(",
"\"utf-8\"",
",",
"\"ignore\"",
")",
",",
"parser",
")",
"for",
"page_struct_version",
",",
"result_selector",
"in",
"enumerate",
"(",
"__class__",
".",
"RESULTS_SELECTORS",
")",
":",
"result_nodes",
"=",
"result_selector",
"(",
"html",
")",
"if",
"result_nodes",
":",
"break",
"for",
"rank",
",",
"result_node",
"in",
"enumerate",
"(",
"result_nodes",
",",
"1",
")",
":",
"try",
":",
"img_node",
"=",
"__class__",
".",
"IMG_SELECTORS",
"[",
"page_struct_version",
"]",
"(",
"result_node",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# no image for that product",
"continue",
"# get thumbnail & full image url",
"thumbnail_url",
"=",
"img_node",
".",
"get",
"(",
"\"src\"",
")",
"url_parts",
"=",
"thumbnail_url",
".",
"rsplit",
"(",
"\".\"",
",",
"2",
")",
"img_url",
"=",
"\".\"",
".",
"join",
"(",
"(",
"url_parts",
"[",
"0",
"]",
",",
"url_parts",
"[",
"2",
"]",
")",
")",
"# assume size is fixed",
"size",
"=",
"(",
"500",
",",
"500",
")",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"SIZE",
"# try to get higher res image...",
"if",
"(",
"(",
"self",
".",
"target_size",
">",
"size",
"[",
"0",
"]",
")",
"and",
"# ...only if needed",
"(",
"rank",
"<=",
"3",
")",
")",
":",
"# and only for first 3 results because this is time",
"# consuming (1 more GET request per result)",
"product_url",
"=",
"__class__",
".",
"PRODUCT_LINK_SELECTORS",
"[",
"page_struct_version",
"]",
"(",
"result_node",
")",
"[",
"0",
"]",
".",
"get",
"(",
"\"href\"",
")",
"product_url_split",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"product_url",
")",
"if",
"not",
"product_url_split",
".",
"scheme",
":",
"# relative redirect url",
"product_url_query",
"=",
"urllib",
".",
"parse",
".",
"parse_qsl",
"(",
"product_url_split",
".",
"query",
")",
"product_url_query",
"=",
"collections",
".",
"OrderedDict",
"(",
"product_url_query",
")",
"try",
":",
"# needed if page_struct_version == 1",
"product_url",
"=",
"product_url_query",
"[",
"\"url\"",
"]",
"except",
"KeyError",
":",
"# page_struct_version == 0, make url absolute",
"product_url",
"=",
"urllib",
".",
"parse",
".",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"product_url",
")",
"product_url_split",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"product_url",
")",
"product_url_query",
"=",
"urllib",
".",
"parse",
".",
"parse_qsl",
"(",
"product_url_split",
".",
"query",
")",
"product_url_query",
"=",
"collections",
".",
"OrderedDict",
"(",
"product_url_query",
")",
"try",
":",
"# remove timestamp from url to improve future cache hit rate",
"del",
"product_url_query",
"[",
"\"qid\"",
"]",
"except",
"KeyError",
":",
"pass",
"product_url_query",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"product_url_query",
")",
"product_url_no_ts",
"=",
"urllib",
".",
"parse",
".",
"urlunsplit",
"(",
"product_url_split",
"[",
":",
"3",
"]",
"+",
"(",
"product_url_query",
",",
")",
"+",
"product_url_split",
"[",
"4",
":",
"]",
")",
"store_in_cache_callback",
",",
"product_page_data",
"=",
"await",
"self",
".",
"fetchResults",
"(",
"product_url_no_ts",
")",
"product_page_html",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"product_page_data",
".",
"decode",
"(",
"\"latin-1\"",
")",
",",
"parser",
")",
"try",
":",
"img_node",
"=",
"__class__",
".",
"PRODUCT_PAGE_IMG_SELECTOR",
"(",
"product_page_html",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# unable to get better image",
"pass",
"else",
":",
"better_img_url",
"=",
"img_node",
".",
"get",
"(",
"\"data-old-hires\"",
")",
"# img_node.get(\"data-a-dynamic-image\") contains json with image urls too, but they are not larger than",
"# previous 500px image and are often covered by autorip badges (can be removed by cleaning url though)",
"if",
"better_img_url",
":",
"img_url",
"=",
"better_img_url",
"size_url_hint",
"=",
"img_url",
".",
"rsplit",
"(",
"\".\"",
",",
"2",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"\"_\"",
")",
"assert",
"(",
"size_url_hint",
".",
"startswith",
"(",
"\"SL\"",
")",
")",
"size_url_hint",
"=",
"int",
"(",
"size_url_hint",
"[",
"2",
":",
"]",
")",
"size",
"=",
"(",
"size_url_hint",
",",
"size_url_hint",
")",
"check_metadata",
"=",
"CoverImageMetadata",
".",
"NONE",
"await",
"store_in_cache_callback",
"(",
")",
"# assume format is always jpg",
"format",
"=",
"CoverImageFormat",
".",
"JPEG",
"# add result",
"results",
".",
"append",
"(",
"AmazonCdCoverSourceResult",
"(",
"img_url",
",",
"size",
",",
"format",
",",
"thumbnail_url",
"=",
"thumbnail_url",
",",
"source",
"=",
"self",
",",
"rank",
"=",
"rank",
",",
"check_metadata",
"=",
"check_metadata",
")",
")",
"return",
"results"
] | See CoverSource.parseResults. | [
"See",
"CoverSource",
".",
"parseResults",
"."
] | train | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/amazoncd.py#L59-L142 |
fogleman/pg | pg/core.py | delete_all | def delete_all(obj):
'''Calls `delete()` on all members of `obj` that are recognized as
instances of `pg` objects.'''
types = tuple([
Shader,
Mesh,
VertexBuffer,
IndexBuffer,
Texture,
Program,
Context,
])
for name in dir(obj):
child = getattr(obj, name)
if isinstance(child, types):
child.delete() | python | def delete_all(obj):
'''Calls `delete()` on all members of `obj` that are recognized as
instances of `pg` objects.'''
types = tuple([
Shader,
Mesh,
VertexBuffer,
IndexBuffer,
Texture,
Program,
Context,
])
for name in dir(obj):
child = getattr(obj, name)
if isinstance(child, types):
child.delete() | [
"def",
"delete_all",
"(",
"obj",
")",
":",
"types",
"=",
"tuple",
"(",
"[",
"Shader",
",",
"Mesh",
",",
"VertexBuffer",
",",
"IndexBuffer",
",",
"Texture",
",",
"Program",
",",
"Context",
",",
"]",
")",
"for",
"name",
"in",
"dir",
"(",
"obj",
")",
":",
"child",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"if",
"isinstance",
"(",
"child",
",",
"types",
")",
":",
"child",
".",
"delete",
"(",
")"
] | Calls `delete()` on all members of `obj` that are recognized as
instances of `pg` objects. | [
"Calls",
"delete",
"()",
"on",
"all",
"members",
"of",
"obj",
"that",
"are",
"recognized",
"as",
"instances",
"of",
"pg",
"objects",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/core.py#L14-L29 |
fogleman/pg | pg/glfw.py | _glfw_get_version | def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None | python | def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None | [
"def",
"_glfw_get_version",
"(",
"filename",
")",
":",
"version_checker_source",
"=",
"\"\"\"\n import sys\n import ctypes\n\n def get_version(library_handle):\n '''\n Queries and returns the library version tuple or None.\n '''\n major_value = ctypes.c_int(0)\n major = ctypes.pointer(major_value)\n minor_value = ctypes.c_int(0)\n minor = ctypes.pointer(minor_value)\n rev_value = ctypes.c_int(0)\n rev = ctypes.pointer(rev_value)\n if hasattr(library_handle, 'glfwGetVersion'):\n library_handle.glfwGetVersion(major, minor, rev)\n version = (major_value.value,\n minor_value.value,\n rev_value.value)\n return version\n else:\n return None\n\n try:\n input_func = raw_input\n except NameError:\n input_func = input\n filename = input_func().strip()\n\n try:\n library_handle = ctypes.CDLL(filename)\n except OSError:\n pass\n else:\n version = get_version(library_handle)\n print(version)\n \"\"\"",
"args",
"=",
"[",
"sys",
".",
"executable",
",",
"'-c'",
",",
"textwrap",
".",
"dedent",
"(",
"version_checker_source",
")",
"]",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"universal_newlines",
"=",
"True",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
"=",
"process",
".",
"communicate",
"(",
"_to_char_p",
"(",
"filename",
")",
")",
"[",
"0",
"]",
"out",
"=",
"out",
".",
"strip",
"(",
")",
"if",
"out",
":",
"return",
"eval",
"(",
"out",
")",
"else",
":",
"return",
"None"
] | Queries and returns the library version tuple or None by using a
subprocess. | [
"Queries",
"and",
"returns",
"the",
"library",
"version",
"tuple",
"or",
"None",
"by",
"using",
"a",
"subprocess",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L84-L135 |
fogleman/pg | pg/glfw.py | set_error_callback | def set_error_callback(cbfun):
'''
Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
'''
global _error_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWerrorfun(cbfun)
_error_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetErrorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | python | def set_error_callback(cbfun):
'''
Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
'''
global _error_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWerrorfun(cbfun)
_error_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetErrorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | [
"def",
"set_error_callback",
"(",
"cbfun",
")",
":",
"global",
"_error_callback",
"previous_callback",
"=",
"_error_callback",
"if",
"cbfun",
"is",
"None",
":",
"cbfun",
"=",
"0",
"c_cbfun",
"=",
"_GLFWerrorfun",
"(",
"cbfun",
")",
"_error_callback",
"=",
"(",
"cbfun",
",",
"c_cbfun",
")",
"cbfun",
"=",
"c_cbfun",
"_glfw",
".",
"glfwSetErrorCallback",
"(",
"cbfun",
")",
"if",
"previous_callback",
"is",
"not",
"None",
"and",
"previous_callback",
"[",
"0",
"]",
"!=",
"0",
":",
"return",
"previous_callback",
"[",
"0",
"]"
] | Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun); | [
"Sets",
"the",
"error",
"callback",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L579-L595 |
fogleman/pg | pg/glfw.py | destroy_window | def destroy_window(window):
'''
Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window);
'''
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
if window_addr in callback_repository:
del callback_repository[window_addr] | python | def destroy_window(window):
'''
Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window);
'''
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
if window_addr in callback_repository:
del callback_repository[window_addr] | [
"def",
"destroy_window",
"(",
"window",
")",
":",
"_glfw",
".",
"glfwDestroyWindow",
"(",
"window",
")",
"window_addr",
"=",
"ctypes",
".",
"cast",
"(",
"ctypes",
".",
"pointer",
"(",
"window",
")",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_ulong",
")",
")",
".",
"contents",
".",
"value",
"for",
"callback_repository",
"in",
"_callback_repositories",
":",
"if",
"window_addr",
"in",
"callback_repository",
":",
"del",
"callback_repository",
"[",
"window_addr",
"]"
] | Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window); | [
"Destroys",
"the",
"specified",
"window",
"and",
"its",
"context",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L798-L810 |
fogleman/pg | pg/glfw.py | _GLFWvidmode.unwrap | def unwrap(self):
'''
Returns a nested python sequence.
'''
size = self.width, self.height
bits = self.red_bits, self.green_bits, self.blue_bits
return size, bits, self.refresh_rate | python | def unwrap(self):
'''
Returns a nested python sequence.
'''
size = self.width, self.height
bits = self.red_bits, self.green_bits, self.blue_bits
return size, bits, self.refresh_rate | [
"def",
"unwrap",
"(",
"self",
")",
":",
"size",
"=",
"self",
".",
"width",
",",
"self",
".",
"height",
"bits",
"=",
"self",
".",
"red_bits",
",",
"self",
".",
"green_bits",
",",
"self",
".",
"blue_bits",
"return",
"size",
",",
"bits",
",",
"self",
".",
"refresh_rate"
] | Returns a nested python sequence. | [
"Returns",
"a",
"nested",
"python",
"sequence",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L190-L196 |
fogleman/pg | pg/glfw.py | _GLFWgammaramp.wrap | def wrap(self, gammaramp):
'''
Wraps a nested python sequence.
'''
red, green, blue = gammaramp
size = min(len(red), len(green), len(blue))
array_type = ctypes.c_ushort*size
self.size = ctypes.c_uint(size)
self.red_array = array_type()
self.green_array = array_type()
self.blue_array = array_type()
for i in range(self.size):
self.red_array[i] = int(red[i]*65535)
self.green_array[i] = int(green[i]*65535)
self.blue_array[i] = int(blue[i]*65535)
pointer_type = ctypes.POINTER(ctypes.c_ushort)
self.red = ctypes.cast(self.red_array, pointer_type)
self.green = ctypes.cast(self.green_array, pointer_type)
self.blue = ctypes.cast(self.blue_array, pointer_type) | python | def wrap(self, gammaramp):
'''
Wraps a nested python sequence.
'''
red, green, blue = gammaramp
size = min(len(red), len(green), len(blue))
array_type = ctypes.c_ushort*size
self.size = ctypes.c_uint(size)
self.red_array = array_type()
self.green_array = array_type()
self.blue_array = array_type()
for i in range(self.size):
self.red_array[i] = int(red[i]*65535)
self.green_array[i] = int(green[i]*65535)
self.blue_array[i] = int(blue[i]*65535)
pointer_type = ctypes.POINTER(ctypes.c_ushort)
self.red = ctypes.cast(self.red_array, pointer_type)
self.green = ctypes.cast(self.green_array, pointer_type)
self.blue = ctypes.cast(self.blue_array, pointer_type) | [
"def",
"wrap",
"(",
"self",
",",
"gammaramp",
")",
":",
"red",
",",
"green",
",",
"blue",
"=",
"gammaramp",
"size",
"=",
"min",
"(",
"len",
"(",
"red",
")",
",",
"len",
"(",
"green",
")",
",",
"len",
"(",
"blue",
")",
")",
"array_type",
"=",
"ctypes",
".",
"c_ushort",
"*",
"size",
"self",
".",
"size",
"=",
"ctypes",
".",
"c_uint",
"(",
"size",
")",
"self",
".",
"red_array",
"=",
"array_type",
"(",
")",
"self",
".",
"green_array",
"=",
"array_type",
"(",
")",
"self",
".",
"blue_array",
"=",
"array_type",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
")",
":",
"self",
".",
"red_array",
"[",
"i",
"]",
"=",
"int",
"(",
"red",
"[",
"i",
"]",
"*",
"65535",
")",
"self",
".",
"green_array",
"[",
"i",
"]",
"=",
"int",
"(",
"green",
"[",
"i",
"]",
"*",
"65535",
")",
"self",
".",
"blue_array",
"[",
"i",
"]",
"=",
"int",
"(",
"blue",
"[",
"i",
"]",
"*",
"65535",
")",
"pointer_type",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_ushort",
")",
"self",
".",
"red",
"=",
"ctypes",
".",
"cast",
"(",
"self",
".",
"red_array",
",",
"pointer_type",
")",
"self",
".",
"green",
"=",
"ctypes",
".",
"cast",
"(",
"self",
".",
"green_array",
",",
"pointer_type",
")",
"self",
".",
"blue",
"=",
"ctypes",
".",
"cast",
"(",
"self",
".",
"blue_array",
",",
"pointer_type",
")"
] | Wraps a nested python sequence. | [
"Wraps",
"a",
"nested",
"python",
"sequence",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L219-L237 |
fogleman/pg | pg/glfw.py | _GLFWgammaramp.unwrap | def unwrap(self):
'''
Returns a nested python sequence.
'''
red = [self.red[i]/65535.0 for i in range(self.size)]
green = [self.green[i]/65535.0 for i in range(self.size)]
blue = [self.blue[i]/65535.0 for i in range(self.size)]
return red, green, blue | python | def unwrap(self):
'''
Returns a nested python sequence.
'''
red = [self.red[i]/65535.0 for i in range(self.size)]
green = [self.green[i]/65535.0 for i in range(self.size)]
blue = [self.blue[i]/65535.0 for i in range(self.size)]
return red, green, blue | [
"def",
"unwrap",
"(",
"self",
")",
":",
"red",
"=",
"[",
"self",
".",
"red",
"[",
"i",
"]",
"/",
"65535.0",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
")",
"]",
"green",
"=",
"[",
"self",
".",
"green",
"[",
"i",
"]",
"/",
"65535.0",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
")",
"]",
"blue",
"=",
"[",
"self",
".",
"blue",
"[",
"i",
"]",
"/",
"65535.0",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
")",
"]",
"return",
"red",
",",
"green",
",",
"blue"
] | Returns a nested python sequence. | [
"Returns",
"a",
"nested",
"python",
"sequence",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L239-L246 |
fogleman/pg | pg/util.py | hex_color | def hex_color(value):
'''Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0.
'''
r = ((value >> (8 * 2)) & 255) / 255.0
g = ((value >> (8 * 1)) & 255) / 255.0
b = ((value >> (8 * 0)) & 255) / 255.0
return (r, g, b) | python | def hex_color(value):
'''Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0.
'''
r = ((value >> (8 * 2)) & 255) / 255.0
g = ((value >> (8 * 1)) & 255) / 255.0
b = ((value >> (8 * 0)) & 255) / 255.0
return (r, g, b) | [
"def",
"hex_color",
"(",
"value",
")",
":",
"r",
"=",
"(",
"(",
"value",
">>",
"(",
"8",
"*",
"2",
")",
")",
"&",
"255",
")",
"/",
"255.0",
"g",
"=",
"(",
"(",
"value",
">>",
"(",
"8",
"*",
"1",
")",
")",
"&",
"255",
")",
"/",
"255.0",
"b",
"=",
"(",
"(",
"value",
">>",
"(",
"8",
"*",
"0",
")",
")",
"&",
"255",
")",
"/",
"255.0",
"return",
"(",
"r",
",",
"g",
",",
"b",
")"
] | Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0. | [
"Accepts",
"a",
"hexadecimal",
"color",
"value",
"in",
"the",
"format",
"0xrrggbb",
"and",
"returns",
"an",
"(",
"r",
"g",
"b",
")",
"tuple",
"where",
"0",
".",
"0",
"<",
"=",
"r",
"g",
"b",
"<",
"=",
"1",
".",
"0",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L5-L12 |
fogleman/pg | pg/util.py | normalize | def normalize(vector):
'''Normalizes the `vector` so that its length is 1. `vector` can have
any number of components.
'''
d = sum(x * x for x in vector) ** 0.5
return tuple(x / d for x in vector) | python | def normalize(vector):
'''Normalizes the `vector` so that its length is 1. `vector` can have
any number of components.
'''
d = sum(x * x for x in vector) ** 0.5
return tuple(x / d for x in vector) | [
"def",
"normalize",
"(",
"vector",
")",
":",
"d",
"=",
"sum",
"(",
"x",
"*",
"x",
"for",
"x",
"in",
"vector",
")",
"**",
"0.5",
"return",
"tuple",
"(",
"x",
"/",
"d",
"for",
"x",
"in",
"vector",
")"
] | Normalizes the `vector` so that its length is 1. `vector` can have
any number of components. | [
"Normalizes",
"the",
"vector",
"so",
"that",
"its",
"length",
"is",
"1",
".",
"vector",
"can",
"have",
"any",
"number",
"of",
"components",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L14-L19 |
fogleman/pg | pg/util.py | distance | def distance(p1, p2):
'''Computes and returns the distance between two points, `p1` and `p2`.
The points can have any number of components.
'''
return sum((a - b) ** 2 for a, b in zip(p1, p2)) ** 0.5 | python | def distance(p1, p2):
'''Computes and returns the distance between two points, `p1` and `p2`.
The points can have any number of components.
'''
return sum((a - b) ** 2 for a, b in zip(p1, p2)) ** 0.5 | [
"def",
"distance",
"(",
"p1",
",",
"p2",
")",
":",
"return",
"sum",
"(",
"(",
"a",
"-",
"b",
")",
"**",
"2",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"p1",
",",
"p2",
")",
")",
"**",
"0.5"
] | Computes and returns the distance between two points, `p1` and `p2`.
The points can have any number of components. | [
"Computes",
"and",
"returns",
"the",
"distance",
"between",
"two",
"points",
"p1",
"and",
"p2",
".",
"The",
"points",
"can",
"have",
"any",
"number",
"of",
"components",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L21-L25 |
fogleman/pg | pg/util.py | cross | def cross(v1, v2):
'''Computes the cross product of two vectors.
'''
return (
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0],
) | python | def cross(v1, v2):
'''Computes the cross product of two vectors.
'''
return (
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0],
) | [
"def",
"cross",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"(",
"v1",
"[",
"1",
"]",
"*",
"v2",
"[",
"2",
"]",
"-",
"v1",
"[",
"2",
"]",
"*",
"v2",
"[",
"1",
"]",
",",
"v1",
"[",
"2",
"]",
"*",
"v2",
"[",
"0",
"]",
"-",
"v1",
"[",
"0",
"]",
"*",
"v2",
"[",
"2",
"]",
",",
"v1",
"[",
"0",
"]",
"*",
"v2",
"[",
"1",
"]",
"-",
"v1",
"[",
"1",
"]",
"*",
"v2",
"[",
"0",
"]",
",",
")"
] | Computes the cross product of two vectors. | [
"Computes",
"the",
"cross",
"product",
"of",
"two",
"vectors",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L27-L34 |
fogleman/pg | pg/util.py | dot | def dot(v1, v2):
'''Computes the dot product of two vectors.
'''
x1, y1, z1 = v1
x2, y2, z2 = v2
return x1 * x2 + y1 * y2 + z1 * z2 | python | def dot(v1, v2):
'''Computes the dot product of two vectors.
'''
x1, y1, z1 = v1
x2, y2, z2 = v2
return x1 * x2 + y1 * y2 + z1 * z2 | [
"def",
"dot",
"(",
"v1",
",",
"v2",
")",
":",
"x1",
",",
"y1",
",",
"z1",
"=",
"v1",
"x2",
",",
"y2",
",",
"z2",
"=",
"v2",
"return",
"x1",
"*",
"x2",
"+",
"y1",
"*",
"y2",
"+",
"z1",
"*",
"z2"
] | Computes the dot product of two vectors. | [
"Computes",
"the",
"dot",
"product",
"of",
"two",
"vectors",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L36-L41 |
fogleman/pg | pg/util.py | add | def add(v1, v2):
'''Adds two vectors.
'''
return tuple(a + b for a, b in zip(v1, v2)) | python | def add(v1, v2):
'''Adds two vectors.
'''
return tuple(a + b for a, b in zip(v1, v2)) | [
"def",
"add",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"tuple",
"(",
"a",
"+",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"v1",
",",
"v2",
")",
")"
] | Adds two vectors. | [
"Adds",
"two",
"vectors",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L43-L46 |
fogleman/pg | pg/util.py | sub | def sub(v1, v2):
'''Subtracts two vectors.
'''
return tuple(a - b for a, b in zip(v1, v2)) | python | def sub(v1, v2):
'''Subtracts two vectors.
'''
return tuple(a - b for a, b in zip(v1, v2)) | [
"def",
"sub",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"tuple",
"(",
"a",
"-",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"v1",
",",
"v2",
")",
")"
] | Subtracts two vectors. | [
"Subtracts",
"two",
"vectors",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L48-L51 |
fogleman/pg | pg/util.py | interpolate | def interpolate(v1, v2, t):
'''Interpolate from one vector to another.
'''
return add(v1, mul(sub(v2, v1), t)) | python | def interpolate(v1, v2, t):
'''Interpolate from one vector to another.
'''
return add(v1, mul(sub(v2, v1), t)) | [
"def",
"interpolate",
"(",
"v1",
",",
"v2",
",",
"t",
")",
":",
"return",
"add",
"(",
"v1",
",",
"mul",
"(",
"sub",
"(",
"v2",
",",
"v1",
")",
",",
"t",
")",
")"
] | Interpolate from one vector to another. | [
"Interpolate",
"from",
"one",
"vector",
"to",
"another",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L63-L66 |
fogleman/pg | pg/util.py | normal_from_points | def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d) | python | def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d) | [
"def",
"normal_from_points",
"(",
"a",
",",
"b",
",",
"c",
")",
":",
"x1",
",",
"y1",
",",
"z1",
"=",
"a",
"x2",
",",
"y2",
",",
"z2",
"=",
"b",
"x3",
",",
"y3",
",",
"z3",
"=",
"c",
"ab",
"=",
"(",
"x2",
"-",
"x1",
",",
"y2",
"-",
"y1",
",",
"z2",
"-",
"z1",
")",
"ac",
"=",
"(",
"x3",
"-",
"x1",
",",
"y3",
"-",
"y1",
",",
"z3",
"-",
"z1",
")",
"x",
",",
"y",
",",
"z",
"=",
"cross",
"(",
"ab",
",",
"ac",
")",
"d",
"=",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
"+",
"z",
"*",
"z",
")",
"**",
"0.5",
"return",
"(",
"x",
"/",
"d",
",",
"y",
"/",
"d",
",",
"z",
"/",
"d",
")"
] | Computes a normal vector given three points. | [
"Computes",
"a",
"normal",
"vector",
"given",
"three",
"points",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L68-L78 |
fogleman/pg | pg/util.py | smooth_normals | def smooth_normals(positions, normals):
'''Assigns an averaged normal to each position based on all of the normals
originally used for the position.
'''
lookup = defaultdict(list)
for position, normal in zip(positions, normals):
lookup[position].append(normal)
result = []
for position in positions:
tx = ty = tz = 0
for x, y, z in lookup[position]:
tx += x
ty += y
tz += z
d = (tx * tx + ty * ty + tz * tz) ** 0.5
result.append((tx / d, ty / d, tz / d))
return result | python | def smooth_normals(positions, normals):
'''Assigns an averaged normal to each position based on all of the normals
originally used for the position.
'''
lookup = defaultdict(list)
for position, normal in zip(positions, normals):
lookup[position].append(normal)
result = []
for position in positions:
tx = ty = tz = 0
for x, y, z in lookup[position]:
tx += x
ty += y
tz += z
d = (tx * tx + ty * ty + tz * tz) ** 0.5
result.append((tx / d, ty / d, tz / d))
return result | [
"def",
"smooth_normals",
"(",
"positions",
",",
"normals",
")",
":",
"lookup",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"position",
",",
"normal",
"in",
"zip",
"(",
"positions",
",",
"normals",
")",
":",
"lookup",
"[",
"position",
"]",
".",
"append",
"(",
"normal",
")",
"result",
"=",
"[",
"]",
"for",
"position",
"in",
"positions",
":",
"tx",
"=",
"ty",
"=",
"tz",
"=",
"0",
"for",
"x",
",",
"y",
",",
"z",
"in",
"lookup",
"[",
"position",
"]",
":",
"tx",
"+=",
"x",
"ty",
"+=",
"y",
"tz",
"+=",
"z",
"d",
"=",
"(",
"tx",
"*",
"tx",
"+",
"ty",
"*",
"ty",
"+",
"tz",
"*",
"tz",
")",
"**",
"0.5",
"result",
".",
"append",
"(",
"(",
"tx",
"/",
"d",
",",
"ty",
"/",
"d",
",",
"tz",
"/",
"d",
")",
")",
"return",
"result"
] | Assigns an averaged normal to each position based on all of the normals
originally used for the position. | [
"Assigns",
"an",
"averaged",
"normal",
"to",
"each",
"position",
"based",
"on",
"all",
"of",
"the",
"normals",
"originally",
"used",
"for",
"the",
"position",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L80-L96 |
fogleman/pg | pg/util.py | bounding_box | def bounding_box(positions):
'''Computes the bounding box for a list of 3-dimensional points.
'''
(x0, y0, z0) = (x1, y1, z1) = positions[0]
for x, y, z in positions:
x0 = min(x0, x)
y0 = min(y0, y)
z0 = min(z0, z)
x1 = max(x1, x)
y1 = max(y1, y)
z1 = max(z1, z)
return (x0, y0, z0), (x1, y1, z1) | python | def bounding_box(positions):
'''Computes the bounding box for a list of 3-dimensional points.
'''
(x0, y0, z0) = (x1, y1, z1) = positions[0]
for x, y, z in positions:
x0 = min(x0, x)
y0 = min(y0, y)
z0 = min(z0, z)
x1 = max(x1, x)
y1 = max(y1, y)
z1 = max(z1, z)
return (x0, y0, z0), (x1, y1, z1) | [
"def",
"bounding_box",
"(",
"positions",
")",
":",
"(",
"x0",
",",
"y0",
",",
"z0",
")",
"=",
"(",
"x1",
",",
"y1",
",",
"z1",
")",
"=",
"positions",
"[",
"0",
"]",
"for",
"x",
",",
"y",
",",
"z",
"in",
"positions",
":",
"x0",
"=",
"min",
"(",
"x0",
",",
"x",
")",
"y0",
"=",
"min",
"(",
"y0",
",",
"y",
")",
"z0",
"=",
"min",
"(",
"z0",
",",
"z",
")",
"x1",
"=",
"max",
"(",
"x1",
",",
"x",
")",
"y1",
"=",
"max",
"(",
"y1",
",",
"y",
")",
"z1",
"=",
"max",
"(",
"z1",
",",
"z",
")",
"return",
"(",
"x0",
",",
"y0",
",",
"z0",
")",
",",
"(",
"x1",
",",
"y1",
",",
"z1",
")"
] | Computes the bounding box for a list of 3-dimensional points. | [
"Computes",
"the",
"bounding",
"box",
"for",
"a",
"list",
"of",
"3",
"-",
"dimensional",
"points",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L98-L109 |
fogleman/pg | pg/util.py | recenter | def recenter(positions):
'''Returns a list of new positions centered around the origin.
'''
(x0, y0, z0), (x1, y1, z1) = bounding_box(positions)
dx = x1 - (x1 - x0) / 2.0
dy = y1 - (y1 - y0) / 2.0
dz = z1 - (z1 - z0) / 2.0
result = []
for x, y, z in positions:
result.append((x - dx, y - dy, z - dz))
return result | python | def recenter(positions):
'''Returns a list of new positions centered around the origin.
'''
(x0, y0, z0), (x1, y1, z1) = bounding_box(positions)
dx = x1 - (x1 - x0) / 2.0
dy = y1 - (y1 - y0) / 2.0
dz = z1 - (z1 - z0) / 2.0
result = []
for x, y, z in positions:
result.append((x - dx, y - dy, z - dz))
return result | [
"def",
"recenter",
"(",
"positions",
")",
":",
"(",
"x0",
",",
"y0",
",",
"z0",
")",
",",
"(",
"x1",
",",
"y1",
",",
"z1",
")",
"=",
"bounding_box",
"(",
"positions",
")",
"dx",
"=",
"x1",
"-",
"(",
"x1",
"-",
"x0",
")",
"/",
"2.0",
"dy",
"=",
"y1",
"-",
"(",
"y1",
"-",
"y0",
")",
"/",
"2.0",
"dz",
"=",
"z1",
"-",
"(",
"z1",
"-",
"z0",
")",
"/",
"2.0",
"result",
"=",
"[",
"]",
"for",
"x",
",",
"y",
",",
"z",
"in",
"positions",
":",
"result",
".",
"append",
"(",
"(",
"x",
"-",
"dx",
",",
"y",
"-",
"dy",
",",
"z",
"-",
"dz",
")",
")",
"return",
"result"
] | Returns a list of new positions centered around the origin. | [
"Returns",
"a",
"list",
"of",
"new",
"positions",
"centered",
"around",
"the",
"origin",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L111-L121 |
fogleman/pg | pg/util.py | interleave | def interleave(*args):
'''Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
'''
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result | python | def interleave(*args):
'''Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
'''
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result | [
"def",
"interleave",
"(",
"*",
"args",
")",
":",
"result",
"=",
"[",
"]",
"for",
"array",
"in",
"zip",
"(",
"*",
"args",
")",
":",
"result",
".",
"append",
"(",
"tuple",
"(",
"flatten",
"(",
"array",
")",
")",
")",
"return",
"result"
] | Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer. | [
"Interleaves",
"the",
"elements",
"of",
"the",
"provided",
"arrays",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L123-L138 |
fogleman/pg | pg/util.py | distinct | def distinct(iterable, keyfunc=None):
'''Yields distinct items from `iterable` in the order that they appear.
'''
seen = set()
for item in iterable:
key = item if keyfunc is None else keyfunc(item)
if key not in seen:
seen.add(key)
yield item | python | def distinct(iterable, keyfunc=None):
'''Yields distinct items from `iterable` in the order that they appear.
'''
seen = set()
for item in iterable:
key = item if keyfunc is None else keyfunc(item)
if key not in seen:
seen.add(key)
yield item | [
"def",
"distinct",
"(",
"iterable",
",",
"keyfunc",
"=",
"None",
")",
":",
"seen",
"=",
"set",
"(",
")",
"for",
"item",
"in",
"iterable",
":",
"key",
"=",
"item",
"if",
"keyfunc",
"is",
"None",
"else",
"keyfunc",
"(",
"item",
")",
"if",
"key",
"not",
"in",
"seen",
":",
"seen",
".",
"add",
"(",
"key",
")",
"yield",
"item"
] | Yields distinct items from `iterable` in the order that they appear. | [
"Yields",
"distinct",
"items",
"from",
"iterable",
"in",
"the",
"order",
"that",
"they",
"appear",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L154-L162 |
fogleman/pg | pg/util.py | ray_triangle_intersection | def ray_triangle_intersection(v1, v2, v3, o, d):
'''Computes the distance from a point to a triangle given a ray.
'''
eps = 1e-6
e1 = sub(v2, v1)
e2 = sub(v3, v1)
p = cross(d, e2)
det = dot(e1, p)
if abs(det) < eps:
return None
inv = 1.0 / det
t = sub(o, v1)
u = dot(t, p) * inv
if u < 0 or u > 1:
return None
q = cross(t, e1)
v = dot(d, q) * inv
if v < 0 or v > 1:
return None
t = dot(e2, q) * inv
if t > eps:
return t
return None | python | def ray_triangle_intersection(v1, v2, v3, o, d):
'''Computes the distance from a point to a triangle given a ray.
'''
eps = 1e-6
e1 = sub(v2, v1)
e2 = sub(v3, v1)
p = cross(d, e2)
det = dot(e1, p)
if abs(det) < eps:
return None
inv = 1.0 / det
t = sub(o, v1)
u = dot(t, p) * inv
if u < 0 or u > 1:
return None
q = cross(t, e1)
v = dot(d, q) * inv
if v < 0 or v > 1:
return None
t = dot(e2, q) * inv
if t > eps:
return t
return None | [
"def",
"ray_triangle_intersection",
"(",
"v1",
",",
"v2",
",",
"v3",
",",
"o",
",",
"d",
")",
":",
"eps",
"=",
"1e-6",
"e1",
"=",
"sub",
"(",
"v2",
",",
"v1",
")",
"e2",
"=",
"sub",
"(",
"v3",
",",
"v1",
")",
"p",
"=",
"cross",
"(",
"d",
",",
"e2",
")",
"det",
"=",
"dot",
"(",
"e1",
",",
"p",
")",
"if",
"abs",
"(",
"det",
")",
"<",
"eps",
":",
"return",
"None",
"inv",
"=",
"1.0",
"/",
"det",
"t",
"=",
"sub",
"(",
"o",
",",
"v1",
")",
"u",
"=",
"dot",
"(",
"t",
",",
"p",
")",
"*",
"inv",
"if",
"u",
"<",
"0",
"or",
"u",
">",
"1",
":",
"return",
"None",
"q",
"=",
"cross",
"(",
"t",
",",
"e1",
")",
"v",
"=",
"dot",
"(",
"d",
",",
"q",
")",
"*",
"inv",
"if",
"v",
"<",
"0",
"or",
"v",
">",
"1",
":",
"return",
"None",
"t",
"=",
"dot",
"(",
"e2",
",",
"q",
")",
"*",
"inv",
"if",
"t",
">",
"eps",
":",
"return",
"t",
"return",
"None"
] | Computes the distance from a point to a triangle given a ray. | [
"Computes",
"the",
"distance",
"from",
"a",
"point",
"to",
"a",
"triangle",
"given",
"a",
"ray",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L164-L186 |
fogleman/pg | pg/util.py | pack_list | def pack_list(fmt, data):
'''Convert a Python list into a ctypes buffer.
This appears to be faster than the typical method of creating a ctypes
array, e.g. (c_float * len(data))(*data)
'''
func = struct.Struct(fmt).pack
return create_string_buffer(''.join([func(x) for x in data])) | python | def pack_list(fmt, data):
'''Convert a Python list into a ctypes buffer.
This appears to be faster than the typical method of creating a ctypes
array, e.g. (c_float * len(data))(*data)
'''
func = struct.Struct(fmt).pack
return create_string_buffer(''.join([func(x) for x in data])) | [
"def",
"pack_list",
"(",
"fmt",
",",
"data",
")",
":",
"func",
"=",
"struct",
".",
"Struct",
"(",
"fmt",
")",
".",
"pack",
"return",
"create_string_buffer",
"(",
"''",
".",
"join",
"(",
"[",
"func",
"(",
"x",
")",
"for",
"x",
"in",
"data",
"]",
")",
")"
] | Convert a Python list into a ctypes buffer.
This appears to be faster than the typical method of creating a ctypes
array, e.g. (c_float * len(data))(*data) | [
"Convert",
"a",
"Python",
"list",
"into",
"a",
"ctypes",
"buffer",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L188-L195 |
wgnet/webium | webium/controls/click.py | Clickable.click | def click(self, jquery=False):
"""
Click by WebElement, if not, JQuery click
"""
if jquery:
e = JQuery(self)
e.click()
else:
super(Clickable, self).click() | python | def click(self, jquery=False):
"""
Click by WebElement, if not, JQuery click
"""
if jquery:
e = JQuery(self)
e.click()
else:
super(Clickable, self).click() | [
"def",
"click",
"(",
"self",
",",
"jquery",
"=",
"False",
")",
":",
"if",
"jquery",
":",
"e",
"=",
"JQuery",
"(",
"self",
")",
"e",
".",
"click",
"(",
")",
"else",
":",
"super",
"(",
"Clickable",
",",
"self",
")",
".",
"click",
"(",
")"
] | Click by WebElement, if not, JQuery click | [
"Click",
"by",
"WebElement",
"if",
"not",
"JQuery",
"click"
] | train | https://github.com/wgnet/webium/blob/ccb09876a201e75f5c5810392d4db7a8708b90cb/webium/controls/click.py#L8-L16 |
wgnet/webium | webium/cookie.py | convert_cookie_to_dict | def convert_cookie_to_dict(cookie, keys_map=WEB_DRIVER_COOKIE_KEYS_MAP):
"""
Converts an instance of Cookie class from cookielib to a dict.
The names of attributes can be changed according to keys_map:.
For example, this method can be used to create a cookie which compatible with WebDriver format.
:param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries.
:param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used.
:return:
"""
cookie_dict = dict()
for k in keys_map.keys():
key = _to_unicode_if_str(keys_map[k])
value = _to_unicode_if_str(getattr(cookie, k))
cookie_dict[key] = value
return cookie_dict | python | def convert_cookie_to_dict(cookie, keys_map=WEB_DRIVER_COOKIE_KEYS_MAP):
"""
Converts an instance of Cookie class from cookielib to a dict.
The names of attributes can be changed according to keys_map:.
For example, this method can be used to create a cookie which compatible with WebDriver format.
:param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries.
:param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used.
:return:
"""
cookie_dict = dict()
for k in keys_map.keys():
key = _to_unicode_if_str(keys_map[k])
value = _to_unicode_if_str(getattr(cookie, k))
cookie_dict[key] = value
return cookie_dict | [
"def",
"convert_cookie_to_dict",
"(",
"cookie",
",",
"keys_map",
"=",
"WEB_DRIVER_COOKIE_KEYS_MAP",
")",
":",
"cookie_dict",
"=",
"dict",
"(",
")",
"for",
"k",
"in",
"keys_map",
".",
"keys",
"(",
")",
":",
"key",
"=",
"_to_unicode_if_str",
"(",
"keys_map",
"[",
"k",
"]",
")",
"value",
"=",
"_to_unicode_if_str",
"(",
"getattr",
"(",
"cookie",
",",
"k",
")",
")",
"cookie_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"cookie_dict"
] | Converts an instance of Cookie class from cookielib to a dict.
The names of attributes can be changed according to keys_map:.
For example, this method can be used to create a cookie which compatible with WebDriver format.
:param cookie: Cookie instance received from requests/sessions using url2lib or requests libraries.
:param keys_map: The dict to map cookie attributes for different schemas. By default WebDriver format is used.
:return: | [
"Converts",
"an",
"instance",
"of",
"Cookie",
"class",
"from",
"cookielib",
"to",
"a",
"dict",
".",
"The",
"names",
"of",
"attributes",
"can",
"be",
"changed",
"according",
"to",
"keys_map",
":",
".",
"For",
"example",
"this",
"method",
"can",
"be",
"used",
"to",
"create",
"a",
"cookie",
"which",
"compatible",
"with",
"WebDriver",
"format",
"."
] | train | https://github.com/wgnet/webium/blob/ccb09876a201e75f5c5810392d4db7a8708b90cb/webium/cookie.py#L20-L38 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.