body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def GETorHEAD(self, req):
'Handler for HTTP GET/HEAD requests.'
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = (self.app.account_ring.replica_count if self.app.get_policy_options(None).concurrent_gets else 1)
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(req, _('Account'), node_iter, partition, req.swift_entity_path.rstrip('/'), concurrency)
if (resp.status_int == HTTP_NOT_FOUND):
if (resp.headers.get('X-Account-Status', '').lower() == 'deleted'):
resp.status = HTTP_GONE
elif self.app.account_autocreate:
resp = account_listing_response(self.account_name, req, listing_formats.get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
resp.headers['X-Backend-Recheck-Account-Existence'] = str(self.app.recheck_account_existence)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp | -2,028,738,259,296,392,200 | Handler for HTTP GET/HEAD requests. | swift/proxy/controllers/account.py | GETorHEAD | AymericDu/swift | python | def GETorHEAD(self, req):
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = (self.app.account_ring.replica_count if self.app.get_policy_options(None).concurrent_gets else 1)
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(req, _('Account'), node_iter, partition, req.swift_entity_path.rstrip('/'), concurrency)
if (resp.status_int == HTTP_NOT_FOUND):
if (resp.headers.get('X-Account-Status', ).lower() == 'deleted'):
resp.status = HTTP_GONE
elif self.app.account_autocreate:
resp = account_listing_response(self.account_name, req, listing_formats.get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
resp.headers['X-Backend-Recheck-Account-Existence'] = str(self.app.recheck_account_existence)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp |
@public
def PUT(self, req):
'HTTP PUT request handler.'
if (not self.app.allow_account_management):
return HTTPMethodNotAllowed(request=req, headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'PUT', req.swift_entity_path, ([headers] * len(accounts)))
self.add_acls_from_sys_metadata(resp)
return resp | -8,082,433,754,177,956,000 | HTTP PUT request handler. | swift/proxy/controllers/account.py | PUT | AymericDu/swift | python | @public
def PUT(self, req):
if (not self.app.allow_account_management):
return HTTPMethodNotAllowed(request=req, headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'PUT', req.swift_entity_path, ([headers] * len(accounts)))
self.add_acls_from_sys_metadata(resp)
return resp |
@public
def POST(self, req):
'HTTP POST request handler.'
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, ([headers] * len(accounts)))
if ((resp.status_int == HTTP_NOT_FOUND) and self.app.account_autocreate):
self.autocreate_account(req, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, ([headers] * len(accounts)))
self.add_acls_from_sys_metadata(resp)
return resp | -7,641,809,301,346,970,000 | HTTP POST request handler. | swift/proxy/controllers/account.py | POST | AymericDu/swift | python | @public
def POST(self, req):
length_limit = self.get_name_length_limit()
if (len(self.account_name) > length_limit):
resp = HTTPBadRequest(request=req)
resp.body = (b'Account name length of %d longer than %d' % (len(self.account_name), length_limit))
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, ([headers] * len(accounts)))
if ((resp.status_int == HTTP_NOT_FOUND) and self.app.account_autocreate):
self.autocreate_account(req, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, ([headers] * len(accounts)))
self.add_acls_from_sys_metadata(resp)
return resp |
@public
def DELETE(self, req):
'HTTP DELETE request handler.'
if req.query_string:
return HTTPBadRequest(request=req)
if (not self.app.allow_account_management):
return HTTPMethodNotAllowed(request=req, headers={'Allow': ', '.join(self.allowed_methods)})
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'DELETE', req.swift_entity_path, ([headers] * len(accounts)))
return resp | 8,040,606,574,105,074,000 | HTTP DELETE request handler. | swift/proxy/controllers/account.py | DELETE | AymericDu/swift | python | @public
def DELETE(self, req):
if req.query_string:
return HTTPBadRequest(request=req)
if (not self.app.allow_account_management):
return HTTPMethodNotAllowed(request=req, headers={'Allow': ', '.join(self.allowed_methods)})
(account_partition, accounts) = self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(req, self.app.account_ring, account_partition, 'DELETE', req.swift_entity_path, ([headers] * len(accounts)))
return resp |
def section_text(text):
'Splits text into sections.\n\n Assumes text is in a radiology report format, e.g.:\n\n COMPARISON: Chest radiograph dated XYZ.\n\n IMPRESSION: ABC...\n\n Given text like this, it will output text from each section, \n where the section type is determined by the all caps header.\n\n Returns a three element tuple:\n sections - list containing the text of each section\n section_names - a normalized version of the section name\n section_idx - list of start indices of the text in the section\n '
p_section = re.compile('\\n ([A-Z ()/,-]+):\\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
idx_start = s.end()
idx_skip = text[idx_start:].find('\n')
if (idx_skip == (- 1)):
idx_skip = 0
s = p_section.search(text, (idx_start + idx_skip))
if (s is None):
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
for i in reversed(range(len(section_names))):
if (section_names[i] in ('impression', 'findings')):
if (sections[i].strip() == ''):
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if (('impression' not in section_names) & ('findings' not in section_names)):
if ('\n \n' in sections[(- 1)]):
sections.append('\n \n'.join(sections[(- 1)].split('\n \n')[1:]))
sections[(- 2)] = sections[(- 2)].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append((section_idx[(- 1)] + len(sections[(- 2)])))
return (sections, section_names, section_idx) | 7,319,260,994,792,756,000 | Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section | src/data/datasets/mimic_cxr/section_parser.py | section_text | philip-mueller/lovt | python | def section_text(text):
'Splits text into sections.\n\n Assumes text is in a radiology report format, e.g.:\n\n COMPARISON: Chest radiograph dated XYZ.\n\n IMPRESSION: ABC...\n\n Given text like this, it will output text from each section, \n where the section type is determined by the all caps header.\n\n Returns a three element tuple:\n sections - list containing the text of each section\n section_names - a normalized version of the section name\n section_idx - list of start indices of the text in the section\n '
p_section = re.compile('\\n ([A-Z ()/,-]+):\\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
idx_start = s.end()
idx_skip = text[idx_start:].find('\n')
if (idx_skip == (- 1)):
idx_skip = 0
s = p_section.search(text, (idx_start + idx_skip))
if (s is None):
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
for i in reversed(range(len(section_names))):
if (section_names[i] in ('impression', 'findings')):
if (sections[i].strip() == ):
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if (('impression' not in section_names) & ('findings' not in section_names)):
if ('\n \n' in sections[(- 1)]):
sections.append('\n \n'.join(sections[(- 1)].split('\n \n')[1:]))
sections[(- 2)] = sections[(- 2)].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append((section_idx[(- 1)] + len(sections[(- 2)])))
return (sections, section_names, section_idx) |
@_rewrite_parameters(body_name='text_files')
def find_structure(self, *, text_files: t.Union[(t.List[t.Any], t.Tuple[(t.Any, ...)])], charset: t.Optional[str]=None, column_names: t.Optional[str]=None, delimiter: t.Optional[str]=None, explain: t.Optional[bool]=None, format: t.Optional[str]=None, grok_pattern: t.Optional[str]=None, has_header_row: t.Optional[bool]=None, line_merge_size_limit: t.Optional[int]=None, lines_to_sample: t.Optional[int]=None, quote: t.Optional[str]=None, should_trim_fields: t.Optional[bool]=None, timeout: t.Optional[t.Union[(int, str)]]=None, timestamp_field: t.Optional[str]=None, timestamp_format: t.Optional[str]=None) -> ObjectApiResponse[t.Any]:
'\n Finds the structure of a text file. The text file must contain data that is suitable\n to be ingested into Elasticsearch.\n\n `<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_\n\n :param text_files:\n :param charset: The text’s character set. It must be a character set that is\n supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,\n windows-1252, or EUC-JP. If this parameter is not specified, the structure\n finder chooses an appropriate character set.\n :param column_names: If you have set format to delimited, you can specify the\n column names in a comma-separated list. If this parameter is not specified,\n the structure finder uses the column names from the header row of the text.\n If the text does not have a header role, columns are named "column1", "column2",\n "column3", etc.\n :param delimiter: If you have set format to delimited, you can specify the character\n used to delimit the values in each row. Only a single character is supported;\n the delimiter cannot have multiple characters. By default, the API considers\n the following possibilities: comma, tab, semi-colon, and pipe (|). In this\n default scenario, all rows must have the same number of fields for the delimited\n format to be detected. If you specify a delimiter, up to 10% of the rows\n can have a different number of columns than the first row.\n :param explain: If this parameter is set to true, the response includes a field\n named explanation, which is an array of strings that indicate how the structure\n finder produced its result.\n :param format: The high level structure of the text. Valid values are ndjson,\n xml, delimited, and semi_structured_text. By default, the API chooses the\n format. In this default scenario, all rows must have the same number of fields\n for a delimited format to be detected. If the format is set to delimited\n and the delimiter is not set, however, the API tolerates up to 5% of rows\n that have a different number of columns than the first row.\n :param grok_pattern: If you have set format to semi_structured_text, you can\n specify a Grok pattern that is used to extract fields from every message\n in the text. The name of the timestamp field in the Grok pattern must match\n what is specified in the timestamp_field parameter. If that parameter is\n not specified, the name of the timestamp field in the Grok pattern must match\n "timestamp". If grok_pattern is not specified, the structure finder creates\n a Grok pattern.\n :param has_header_row: If you have set format to delimited, you can use this\n parameter to indicate whether the column names are in the first row of the\n text. If this parameter is not specified, the structure finder guesses based\n on the similarity of the first row of the text to other rows.\n :param line_merge_size_limit: The maximum number of characters in a message when\n lines are merged to form messages while analyzing semi-structured text. If\n you have extremely long messages you may need to increase this, but be aware\n that this may lead to very long processing times if the way to group lines\n into messages is misdetected.\n :param lines_to_sample: The number of lines to include in the structural analysis,\n starting from the beginning of the text. The minimum is 2; If the value of\n this parameter is greater than the number of lines in the text, the analysis\n proceeds (as long as there are at least two lines in the text) for all of\n the lines.\n :param quote: If you have set format to delimited, you can specify the character\n used to quote the values in each row if they contain newlines or the delimiter\n character. Only a single character is supported. If this parameter is not\n specified, the default value is a double quote ("). If your delimited text\n format does not use quoting, a workaround is to set this argument to a character\n that does not appear anywhere in the sample.\n :param should_trim_fields: If you have set format to delimited, you can specify\n whether values between delimiters should have whitespace trimmed from them.\n If this parameter is not specified and the delimiter is pipe (|), the default\n value is true. Otherwise, the default value is false.\n :param timeout: Sets the maximum amount of time that the structure analysis make\n take. If the analysis is still running when the timeout expires then it will\n be aborted.\n :param timestamp_field: Optional parameter to specify the timestamp field in\n the file\n :param timestamp_format: The Java time format of the timestamp field in the text.\n '
if (text_files is None):
raise ValueError("Empty value passed for parameter 'text_files'")
__path = '/_text_structure/find_structure'
__query: t.Dict[(str, t.Any)] = {}
if (charset is not None):
__query['charset'] = charset
if (column_names is not None):
__query['column_names'] = column_names
if (delimiter is not None):
__query['delimiter'] = delimiter
if (explain is not None):
__query['explain'] = explain
if (format is not None):
__query['format'] = format
if (grok_pattern is not None):
__query['grok_pattern'] = grok_pattern
if (has_header_row is not None):
__query['has_header_row'] = has_header_row
if (line_merge_size_limit is not None):
__query['line_merge_size_limit'] = line_merge_size_limit
if (lines_to_sample is not None):
__query['lines_to_sample'] = lines_to_sample
if (quote is not None):
__query['quote'] = quote
if (should_trim_fields is not None):
__query['should_trim_fields'] = should_trim_fields
if (timeout is not None):
__query['timeout'] = timeout
if (timestamp_field is not None):
__query['timestamp_field'] = timestamp_field
if (timestamp_format is not None):
__query['timestamp_format'] = timestamp_format
__body = text_files
__headers = {'accept': 'application/json', 'content-type': 'application/x-ndjson'}
return self.perform_request('POST', __path, params=__query, headers=__headers, body=__body) | 6,677,154,515,690,402,000 | Finds the structure of a text file. The text file must contain data that is suitable
to be ingested into Elasticsearch.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_
:param text_files:
:param charset: The text’s character set. It must be a character set that is
supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,
windows-1252, or EUC-JP. If this parameter is not specified, the structure
finder chooses an appropriate character set.
:param column_names: If you have set format to delimited, you can specify the
column names in a comma-separated list. If this parameter is not specified,
the structure finder uses the column names from the header row of the text.
If the text does not have a header role, columns are named "column1", "column2",
"column3", etc.
:param delimiter: If you have set format to delimited, you can specify the character
used to delimit the values in each row. Only a single character is supported;
the delimiter cannot have multiple characters. By default, the API considers
the following possibilities: comma, tab, semi-colon, and pipe (|). In this
default scenario, all rows must have the same number of fields for the delimited
format to be detected. If you specify a delimiter, up to 10% of the rows
can have a different number of columns than the first row.
:param explain: If this parameter is set to true, the response includes a field
named explanation, which is an array of strings that indicate how the structure
finder produced its result.
:param format: The high level structure of the text. Valid values are ndjson,
xml, delimited, and semi_structured_text. By default, the API chooses the
format. In this default scenario, all rows must have the same number of fields
for a delimited format to be detected. If the format is set to delimited
and the delimiter is not set, however, the API tolerates up to 5% of rows
that have a different number of columns than the first row.
:param grok_pattern: If you have set format to semi_structured_text, you can
specify a Grok pattern that is used to extract fields from every message
in the text. The name of the timestamp field in the Grok pattern must match
what is specified in the timestamp_field parameter. If that parameter is
not specified, the name of the timestamp field in the Grok pattern must match
"timestamp". If grok_pattern is not specified, the structure finder creates
a Grok pattern.
:param has_header_row: If you have set format to delimited, you can use this
parameter to indicate whether the column names are in the first row of the
text. If this parameter is not specified, the structure finder guesses based
on the similarity of the first row of the text to other rows.
:param line_merge_size_limit: The maximum number of characters in a message when
lines are merged to form messages while analyzing semi-structured text. If
you have extremely long messages you may need to increase this, but be aware
that this may lead to very long processing times if the way to group lines
into messages is misdetected.
:param lines_to_sample: The number of lines to include in the structural analysis,
starting from the beginning of the text. The minimum is 2; If the value of
this parameter is greater than the number of lines in the text, the analysis
proceeds (as long as there are at least two lines in the text) for all of
the lines.
:param quote: If you have set format to delimited, you can specify the character
used to quote the values in each row if they contain newlines or the delimiter
character. Only a single character is supported. If this parameter is not
specified, the default value is a double quote ("). If your delimited text
format does not use quoting, a workaround is to set this argument to a character
that does not appear anywhere in the sample.
:param should_trim_fields: If you have set format to delimited, you can specify
whether values between delimiters should have whitespace trimmed from them.
If this parameter is not specified and the delimiter is pipe (|), the default
value is true. Otherwise, the default value is false.
:param timeout: Sets the maximum amount of time that the structure analysis make
take. If the analysis is still running when the timeout expires then it will
be aborted.
:param timestamp_field: Optional parameter to specify the timestamp field in
the file
:param timestamp_format: The Java time format of the timestamp field in the text. | elasticsearch/_sync/client/text_structure.py | find_structure | neubloc/elasticsearch-py | python | @_rewrite_parameters(body_name='text_files')
def find_structure(self, *, text_files: t.Union[(t.List[t.Any], t.Tuple[(t.Any, ...)])], charset: t.Optional[str]=None, column_names: t.Optional[str]=None, delimiter: t.Optional[str]=None, explain: t.Optional[bool]=None, format: t.Optional[str]=None, grok_pattern: t.Optional[str]=None, has_header_row: t.Optional[bool]=None, line_merge_size_limit: t.Optional[int]=None, lines_to_sample: t.Optional[int]=None, quote: t.Optional[str]=None, should_trim_fields: t.Optional[bool]=None, timeout: t.Optional[t.Union[(int, str)]]=None, timestamp_field: t.Optional[str]=None, timestamp_format: t.Optional[str]=None) -> ObjectApiResponse[t.Any]:
'\n Finds the structure of a text file. The text file must contain data that is suitable\n to be ingested into Elasticsearch.\n\n `<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_\n\n :param text_files:\n :param charset: The text’s character set. It must be a character set that is\n supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,\n windows-1252, or EUC-JP. If this parameter is not specified, the structure\n finder chooses an appropriate character set.\n :param column_names: If you have set format to delimited, you can specify the\n column names in a comma-separated list. If this parameter is not specified,\n the structure finder uses the column names from the header row of the text.\n If the text does not have a header role, columns are named "column1", "column2",\n "column3", etc.\n :param delimiter: If you have set format to delimited, you can specify the character\n used to delimit the values in each row. Only a single character is supported;\n the delimiter cannot have multiple characters. By default, the API considers\n the following possibilities: comma, tab, semi-colon, and pipe (|). In this\n default scenario, all rows must have the same number of fields for the delimited\n format to be detected. If you specify a delimiter, up to 10% of the rows\n can have a different number of columns than the first row.\n :param explain: If this parameter is set to true, the response includes a field\n named explanation, which is an array of strings that indicate how the structure\n finder produced its result.\n :param format: The high level structure of the text. Valid values are ndjson,\n xml, delimited, and semi_structured_text. By default, the API chooses the\n format. In this default scenario, all rows must have the same number of fields\n for a delimited format to be detected. If the format is set to delimited\n and the delimiter is not set, however, the API tolerates up to 5% of rows\n that have a different number of columns than the first row.\n :param grok_pattern: If you have set format to semi_structured_text, you can\n specify a Grok pattern that is used to extract fields from every message\n in the text. The name of the timestamp field in the Grok pattern must match\n what is specified in the timestamp_field parameter. If that parameter is\n not specified, the name of the timestamp field in the Grok pattern must match\n "timestamp". If grok_pattern is not specified, the structure finder creates\n a Grok pattern.\n :param has_header_row: If you have set format to delimited, you can use this\n parameter to indicate whether the column names are in the first row of the\n text. If this parameter is not specified, the structure finder guesses based\n on the similarity of the first row of the text to other rows.\n :param line_merge_size_limit: The maximum number of characters in a message when\n lines are merged to form messages while analyzing semi-structured text. If\n you have extremely long messages you may need to increase this, but be aware\n that this may lead to very long processing times if the way to group lines\n into messages is misdetected.\n :param lines_to_sample: The number of lines to include in the structural analysis,\n starting from the beginning of the text. The minimum is 2; If the value of\n this parameter is greater than the number of lines in the text, the analysis\n proceeds (as long as there are at least two lines in the text) for all of\n the lines.\n :param quote: If you have set format to delimited, you can specify the character\n used to quote the values in each row if they contain newlines or the delimiter\n character. Only a single character is supported. If this parameter is not\n specified, the default value is a double quote ("). If your delimited text\n format does not use quoting, a workaround is to set this argument to a character\n that does not appear anywhere in the sample.\n :param should_trim_fields: If you have set format to delimited, you can specify\n whether values between delimiters should have whitespace trimmed from them.\n If this parameter is not specified and the delimiter is pipe (|), the default\n value is true. Otherwise, the default value is false.\n :param timeout: Sets the maximum amount of time that the structure analysis make\n take. If the analysis is still running when the timeout expires then it will\n be aborted.\n :param timestamp_field: Optional parameter to specify the timestamp field in\n the file\n :param timestamp_format: The Java time format of the timestamp field in the text.\n '
if (text_files is None):
raise ValueError("Empty value passed for parameter 'text_files'")
__path = '/_text_structure/find_structure'
__query: t.Dict[(str, t.Any)] = {}
if (charset is not None):
__query['charset'] = charset
if (column_names is not None):
__query['column_names'] = column_names
if (delimiter is not None):
__query['delimiter'] = delimiter
if (explain is not None):
__query['explain'] = explain
if (format is not None):
__query['format'] = format
if (grok_pattern is not None):
__query['grok_pattern'] = grok_pattern
if (has_header_row is not None):
__query['has_header_row'] = has_header_row
if (line_merge_size_limit is not None):
__query['line_merge_size_limit'] = line_merge_size_limit
if (lines_to_sample is not None):
__query['lines_to_sample'] = lines_to_sample
if (quote is not None):
__query['quote'] = quote
if (should_trim_fields is not None):
__query['should_trim_fields'] = should_trim_fields
if (timeout is not None):
__query['timeout'] = timeout
if (timestamp_field is not None):
__query['timestamp_field'] = timestamp_field
if (timestamp_format is not None):
__query['timestamp_format'] = timestamp_format
__body = text_files
__headers = {'accept': 'application/json', 'content-type': 'application/x-ndjson'}
return self.perform_request('POST', __path, params=__query, headers=__headers, body=__body) |
def test_now_in_utc():
'now_in_utc() should return the current time set to the UTC time zone'
now = now_in_utc()
assert is_near_now(now)
assert (now.tzinfo == pytz.UTC) | 1,980,469,855,028,686,800 | now_in_utc() should return the current time set to the UTC time zone | main/utils_test.py | test_now_in_utc | mitodl/bootcamp-ecommerce | python | def test_now_in_utc():
now = now_in_utc()
assert is_near_now(now)
assert (now.tzinfo == pytz.UTC) |
def test_is_near_now():
'\n Test is_near_now for now\n '
now = datetime.datetime.now(tz=pytz.UTC)
assert (is_near_now(now) is True)
later = (now + datetime.timedelta(0, 6))
assert (is_near_now(later) is False)
earlier = (now - datetime.timedelta(0, 6))
assert (is_near_now(earlier) is False) | 5,014,501,646,750,768,000 | Test is_near_now for now | main/utils_test.py | test_is_near_now | mitodl/bootcamp-ecommerce | python | def test_is_near_now():
'\n \n '
now = datetime.datetime.now(tz=pytz.UTC)
assert (is_near_now(now) is True)
later = (now + datetime.timedelta(0, 6))
assert (is_near_now(later) is False)
earlier = (now - datetime.timedelta(0, 6))
assert (is_near_now(earlier) is False) |
def test_first_or_none():
'\n Assert that first_or_none returns the first item in an iterable or None\n '
assert (first_or_none([]) is None)
assert (first_or_none(set()) is None)
assert (first_or_none([1, 2, 3]) == 1)
assert (first_or_none(range(1, 5)) == 1) | -2,245,960,498,571,180,800 | Assert that first_or_none returns the first item in an iterable or None | main/utils_test.py | test_first_or_none | mitodl/bootcamp-ecommerce | python | def test_first_or_none():
'\n \n '
assert (first_or_none([]) is None)
assert (first_or_none(set()) is None)
assert (first_or_none([1, 2, 3]) == 1)
assert (first_or_none(range(1, 5)) == 1) |
def test_first_matching_item():
'first_matching_item should return the first item where the predicate function returns true'
assert (first_matching_item([1, 2, 3, 4, 5], (lambda x: ((x % 2) == 0))) == 2)
assert (first_matching_item([], (lambda x: True)) is None)
assert (first_matching_item(['x', 'y', 'z'], (lambda x: False)) is None) | -6,679,562,110,484,070,000 | first_matching_item should return the first item where the predicate function returns true | main/utils_test.py | test_first_matching_item | mitodl/bootcamp-ecommerce | python | def test_first_matching_item():
assert (first_matching_item([1, 2, 3, 4, 5], (lambda x: ((x % 2) == 0))) == 2)
assert (first_matching_item([], (lambda x: True)) is None)
assert (first_matching_item(['x', 'y', 'z'], (lambda x: False)) is None) |
def test_max_or_none():
'\n Assert that max_or_none returns the max of some iterable, or None if the iterable has no items\n '
assert (max_or_none((i for i in [5, 4, 3, 2, 1])) == 5)
assert (max_or_none([1, 3, 5, 4, 2]) == 5)
assert (max_or_none([]) is None) | 5,047,917,916,077,396,000 | Assert that max_or_none returns the max of some iterable, or None if the iterable has no items | main/utils_test.py | test_max_or_none | mitodl/bootcamp-ecommerce | python | def test_max_or_none():
'\n \n '
assert (max_or_none((i for i in [5, 4, 3, 2, 1])) == 5)
assert (max_or_none([1, 3, 5, 4, 2]) == 5)
assert (max_or_none([]) is None) |
def test_unique():
'\n Assert that unique() returns a generator of unique elements from a provided iterable\n '
assert (list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0])
assert (list(unique(('a', 'b', 'a', 'c', 'C', None))) == ['a', 'b', 'c', 'C', None]) | 986,328,228,094,812,800 | Assert that unique() returns a generator of unique elements from a provided iterable | main/utils_test.py | test_unique | mitodl/bootcamp-ecommerce | python | def test_unique():
'\n \n '
assert (list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0])
assert (list(unique(('a', 'b', 'a', 'c', 'C', None))) == ['a', 'b', 'c', 'C', None]) |
def test_unique_ignore_case():
'\n Assert that unique_ignore_case() returns a generator of unique lowercase strings from a\n provided iterable\n '
assert (list(unique_ignore_case(['ABC', 'def', 'AbC', 'DEf'])) == ['abc', 'def']) | -9,212,515,244,537,056,000 | Assert that unique_ignore_case() returns a generator of unique lowercase strings from a
provided iterable | main/utils_test.py | test_unique_ignore_case | mitodl/bootcamp-ecommerce | python | def test_unique_ignore_case():
'\n Assert that unique_ignore_case() returns a generator of unique lowercase strings from a\n provided iterable\n '
assert (list(unique_ignore_case(['ABC', 'def', 'AbC', 'DEf'])) == ['abc', 'def']) |
def test_item_at_index_or_none():
"\n Assert that item_at_index_or_none returns an item at a given index, or None if that index\n doesn't exist\n "
arr = [1, 2, 3]
assert (item_at_index_or_none(arr, 1) == 2)
assert (item_at_index_or_none(arr, 10) is None) | 9,027,047,907,124,018,000 | Assert that item_at_index_or_none returns an item at a given index, or None if that index
doesn't exist | main/utils_test.py | test_item_at_index_or_none | mitodl/bootcamp-ecommerce | python | def test_item_at_index_or_none():
"\n Assert that item_at_index_or_none returns an item at a given index, or None if that index\n doesn't exist\n "
arr = [1, 2, 3]
assert (item_at_index_or_none(arr, 1) == 2)
assert (item_at_index_or_none(arr, 10) is None) |
def test_all_equal():
'\n Assert that all_equal returns True if all of the provided args are equal to each other\n '
assert (all_equal(1, 1, 1) is True)
assert (all_equal(1, 2, 1) is False)
assert (all_equal() is True) | 6,843,933,897,489,577,000 | Assert that all_equal returns True if all of the provided args are equal to each other | main/utils_test.py | test_all_equal | mitodl/bootcamp-ecommerce | python | def test_all_equal():
'\n \n '
assert (all_equal(1, 1, 1) is True)
assert (all_equal(1, 2, 1) is False)
assert (all_equal() is True) |
def test_all_unique():
'\n Assert that all_unique returns True if all of the items in the iterable argument are unique\n '
assert (all_unique([1, 2, 3, 4]) is True)
assert (all_unique((1, 2, 3, 4)) is True)
assert (all_unique([1, 2, 3, 1]) is False) | -4,191,406,065,421,053,000 | Assert that all_unique returns True if all of the items in the iterable argument are unique | main/utils_test.py | test_all_unique | mitodl/bootcamp-ecommerce | python | def test_all_unique():
'\n \n '
assert (all_unique([1, 2, 3, 4]) is True)
assert (all_unique((1, 2, 3, 4)) is True)
assert (all_unique([1, 2, 3, 1]) is False) |
def test_has_all_keys():
'\n Assert that has_all_keys returns True if the given dict has all of the specified keys\n '
d = {'a': 1, 'b': 2, 'c': 3}
assert (has_all_keys(d, ['a', 'c']) is True)
assert (has_all_keys(d, ['a', 'z']) is False) | -3,950,808,303,279,116,000 | Assert that has_all_keys returns True if the given dict has all of the specified keys | main/utils_test.py | test_has_all_keys | mitodl/bootcamp-ecommerce | python | def test_has_all_keys():
'\n \n '
d = {'a': 1, 'b': 2, 'c': 3}
assert (has_all_keys(d, ['a', 'c']) is True)
assert (has_all_keys(d, ['a', 'z']) is False) |
def test_is_blank():
'\n Assert that is_blank returns True if the given value is None or a blank string\n '
assert (is_blank('') is True)
assert (is_blank(None) is True)
assert (is_blank(0) is False)
assert (is_blank(' ') is False)
assert (is_blank(False) is False)
assert (is_blank('value') is False) | -20,777,822,704,435,870 | Assert that is_blank returns True if the given value is None or a blank string | main/utils_test.py | test_is_blank | mitodl/bootcamp-ecommerce | python | def test_is_blank():
'\n \n '
assert (is_blank() is True)
assert (is_blank(None) is True)
assert (is_blank(0) is False)
assert (is_blank(' ') is False)
assert (is_blank(False) is False)
assert (is_blank('value') is False) |
def test_group_into_dict():
'\n Assert that group_into_dict takes an iterable of items and returns a dictionary of those items\n grouped by generated keys\n '
class Car():
def __init__(self, make, model):
self.make = make
self.model = model
cars = [Car(make='Honda', model='Civic'), Car(make='Honda', model='Accord'), Car(make='Ford', model='F150'), Car(make='Ford', model='Focus'), Car(make='Jeep', model='Wrangler')]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter('make'))
assert (set(grouped_cars.keys()) == {'Honda', 'Ford', 'Jeep'})
assert (set(grouped_cars['Honda']) == set(cars[0:2]))
assert (set(grouped_cars['Ford']) == set(cars[2:4]))
assert (grouped_cars['Jeep'] == [cars[4]])
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=(lambda num: ((num % 2) == 0)))
assert (grouped_nums.keys() == {True, False})
assert (set(grouped_nums[True]) == {2, 4, 6})
assert (set(grouped_nums[False]) == {1, 3, 5}) | -6,935,898,489,097,271,000 | Assert that group_into_dict takes an iterable of items and returns a dictionary of those items
grouped by generated keys | main/utils_test.py | test_group_into_dict | mitodl/bootcamp-ecommerce | python | def test_group_into_dict():
'\n Assert that group_into_dict takes an iterable of items and returns a dictionary of those items\n grouped by generated keys\n '
class Car():
def __init__(self, make, model):
self.make = make
self.model = model
cars = [Car(make='Honda', model='Civic'), Car(make='Honda', model='Accord'), Car(make='Ford', model='F150'), Car(make='Ford', model='Focus'), Car(make='Jeep', model='Wrangler')]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter('make'))
assert (set(grouped_cars.keys()) == {'Honda', 'Ford', 'Jeep'})
assert (set(grouped_cars['Honda']) == set(cars[0:2]))
assert (set(grouped_cars['Ford']) == set(cars[2:4]))
assert (grouped_cars['Jeep'] == [cars[4]])
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=(lambda num: ((num % 2) == 0)))
assert (grouped_nums.keys() == {True, False})
assert (set(grouped_nums[True]) == {2, 4, 6})
assert (set(grouped_nums[False]) == {1, 3, 5}) |
def test_filter_dict_by_key_set():
'\n Test that filter_dict_by_key_set returns a dict with only the given keys\n '
d = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
assert (filter_dict_by_key_set(d, {'a', 'c'}) == {'a': 1, 'c': 3})
assert (filter_dict_by_key_set(d, {'a', 'c', 'nonsense'}) == {'a': 1, 'c': 3})
assert (filter_dict_by_key_set(d, {'nonsense'}) == {}) | -1,523,572,021,128,611,600 | Test that filter_dict_by_key_set returns a dict with only the given keys | main/utils_test.py | test_filter_dict_by_key_set | mitodl/bootcamp-ecommerce | python | def test_filter_dict_by_key_set():
'\n \n '
d = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
assert (filter_dict_by_key_set(d, {'a', 'c'}) == {'a': 1, 'c': 3})
assert (filter_dict_by_key_set(d, {'a', 'c', 'nonsense'}) == {'a': 1, 'c': 3})
assert (filter_dict_by_key_set(d, {'nonsense'}) == {}) |
def test_partition_to_lists():
'\n Assert that partition_to_lists splits an iterable into two lists according to a condition\n '
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
(not_ones, ones) = partition_to_lists(nums, (lambda n: (n == 1)))
assert (not_ones == [2, 3, 4, 0, None, None])
assert (ones == [1, 1, 1])
(falsey, truthy) = partition_to_lists(nums)
assert (falsey == [0, None, None])
assert (truthy == [1, 2, 1, 3, 1, 4]) | 8,380,860,281,203,333,000 | Assert that partition_to_lists splits an iterable into two lists according to a condition | main/utils_test.py | test_partition_to_lists | mitodl/bootcamp-ecommerce | python | def test_partition_to_lists():
'\n \n '
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
(not_ones, ones) = partition_to_lists(nums, (lambda n: (n == 1)))
assert (not_ones == [2, 3, 4, 0, None, None])
assert (ones == [1, 1, 1])
(falsey, truthy) = partition_to_lists(nums)
assert (falsey == [0, None, None])
assert (truthy == [1, 2, 1, 3, 1, 4]) |
def test_partition_around_index():
'partition_around_index should split a list into two lists around an index'
assert (partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4]))
assert (partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4]))
assert (partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], []))
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4) | 8,945,775,255,869,581,000 | partition_around_index should split a list into two lists around an index | main/utils_test.py | test_partition_around_index | mitodl/bootcamp-ecommerce | python | def test_partition_around_index():
assert (partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4]))
assert (partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4]))
assert (partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], []))
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4) |
@pytest.mark.parametrize('content,content_type,exp_summary_content,exp_url_in_summary', [['{"bad": "response"}', 'application/json', '{"bad": "response"}', False], ['plain text', 'text/plain', 'plain text', False], ['<div>HTML content</div>', 'text/html; charset=utf-8', '(HTML body ignored)', True]])
def test_get_error_response_summary(content, content_type, exp_summary_content, exp_url_in_summary):
'\n get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of\n information depending on the type of content.\n '
status_code = 400
url = 'http://example.com'
mock_response = MockResponse(status_code=status_code, content=content, content_type=content_type, url=url)
summary = get_error_response_summary(mock_response)
assert (f'Response - code: {status_code}' in summary)
assert (f'content: {exp_summary_content}' in summary)
assert ((f'url: {url}' in summary) is exp_url_in_summary) | 3,631,322,079,011,886,600 | get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of
information depending on the type of content. | main/utils_test.py | test_get_error_response_summary | mitodl/bootcamp-ecommerce | python | @pytest.mark.parametrize('content,content_type,exp_summary_content,exp_url_in_summary', [['{"bad": "response"}', 'application/json', '{"bad": "response"}', False], ['plain text', 'text/plain', 'plain text', False], ['<div>HTML content</div>', 'text/html; charset=utf-8', '(HTML body ignored)', True]])
def test_get_error_response_summary(content, content_type, exp_summary_content, exp_url_in_summary):
'\n get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of\n information depending on the type of content.\n '
status_code = 400
url = 'http://example.com'
mock_response = MockResponse(status_code=status_code, content=content, content_type=content_type, url=url)
summary = get_error_response_summary(mock_response)
assert (f'Response - code: {status_code}' in summary)
assert (f'content: {exp_summary_content}' in summary)
assert ((f'url: {url}' in summary) is exp_url_in_summary) |
@pytest.mark.django_db
def test_jsonfield(settings):
'\n Test a model with a JSONField is handled correctly\n '
settings.CYBERSOURCE_SECURITY_KEY = 'asdf'
receipt = ReceiptFactory.create()
assert (serialize_model_object(receipt) == {'created_on': format_as_iso8601(receipt.created_on), 'data': receipt.data, 'id': receipt.id, 'updated_on': format_as_iso8601(receipt.updated_on), 'order': receipt.order.id}) | -4,728,769,654,445,678,000 | Test a model with a JSONField is handled correctly | main/utils_test.py | test_jsonfield | mitodl/bootcamp-ecommerce | python | @pytest.mark.django_db
def test_jsonfield(settings):
'\n \n '
settings.CYBERSOURCE_SECURITY_KEY = 'asdf'
receipt = ReceiptFactory.create()
assert (serialize_model_object(receipt) == {'created_on': format_as_iso8601(receipt.created_on), 'data': receipt.data, 'id': receipt.id, 'updated_on': format_as_iso8601(receipt.updated_on), 'order': receipt.order.id}) |
def test_get_field_names():
'\n Assert that get_field_names does not include related fields\n '
assert (set(get_field_names(Order)) == {'user', 'status', 'total_price_paid', 'application', 'created_on', 'updated_on', 'payment_type'}) | -7,139,863,925,248,950,000 | Assert that get_field_names does not include related fields | main/utils_test.py | test_get_field_names | mitodl/bootcamp-ecommerce | python | def test_get_field_names():
'\n \n '
assert (set(get_field_names(Order)) == {'user', 'status', 'total_price_paid', 'application', 'created_on', 'updated_on', 'payment_type'}) |
def test_is_empty_file():
'is_empty_file should return True if the given object is None or has a blank name property'
fake_file = None
assert (is_empty_file(fake_file) is True)
fake_file = SimpleNamespace(name='')
assert (is_empty_file(fake_file) is True)
fake_file = SimpleNamespace(name='path/to/file.txt')
assert (is_empty_file(fake_file) is False) | 8,666,167,418,031,092,000 | is_empty_file should return True if the given object is None or has a blank name property | main/utils_test.py | test_is_empty_file | mitodl/bootcamp-ecommerce | python | def test_is_empty_file():
fake_file = None
assert (is_empty_file(fake_file) is True)
fake_file = SimpleNamespace(name=)
assert (is_empty_file(fake_file) is True)
fake_file = SimpleNamespace(name='path/to/file.txt')
assert (is_empty_file(fake_file) is False) |
def test_chunks():
'\n test for chunks\n '
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert (output_list == input_list)
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert (output_list == input_list)
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert (output_list == input_list) | -6,628,668,773,888,255,000 | test for chunks | main/utils_test.py | test_chunks | mitodl/bootcamp-ecommerce | python | def test_chunks():
'\n \n '
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert (output_list == input_list)
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert (output_list == input_list)
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert (output_list == input_list) |
def test_chunks_iterable():
'\n test that chunks works on non-list iterables too\n '
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert (len(chunk_output) == ceil((113 / 10)))
range_list = []
for chunk in chunk_output:
range_list += chunk
assert (range_list == list(range(count))) | 5,628,105,431,535,348,000 | test that chunks works on non-list iterables too | main/utils_test.py | test_chunks_iterable | mitodl/bootcamp-ecommerce | python | def test_chunks_iterable():
'\n \n '
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert (len(chunk_output) == ceil((113 / 10)))
range_list = []
for chunk in chunk_output:
range_list += chunk
assert (range_list == list(range(count))) |
def test_format_month_day():
'\n format_month_day should format the month and day from a datetime\n '
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert (format_month_day(dt) == 'Jan 1')
assert (format_month_day(dt, month_fmt='%b') == 'Jan 1')
assert (format_month_day(dt, month_fmt='%B') == 'January 1') | -5,556,454,043,159,934,000 | format_month_day should format the month and day from a datetime | main/utils_test.py | test_format_month_day | mitodl/bootcamp-ecommerce | python | def test_format_month_day():
'\n \n '
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert (format_month_day(dt) == 'Jan 1')
assert (format_month_day(dt, month_fmt='%b') == 'Jan 1')
assert (format_month_day(dt, month_fmt='%B') == 'January 1') |
def test_has_equal_properties():
'\n Assert that has_equal_properties returns True if an object has equivalent properties to a given dict\n '
obj = SimpleNamespace(a=1, b=2, c=3)
assert (has_equal_properties(obj, {}) is True)
assert (has_equal_properties(obj, dict(a=1, b=2)) is True)
assert (has_equal_properties(obj, dict(a=1, b=2, c=3)) is True)
assert (has_equal_properties(obj, dict(a=2)) is False)
assert (has_equal_properties(obj, dict(d=4)) is False) | -3,059,819,990,250,566,000 | Assert that has_equal_properties returns True if an object has equivalent properties to a given dict | main/utils_test.py | test_has_equal_properties | mitodl/bootcamp-ecommerce | python | def test_has_equal_properties():
'\n \n '
obj = SimpleNamespace(a=1, b=2, c=3)
assert (has_equal_properties(obj, {}) is True)
assert (has_equal_properties(obj, dict(a=1, b=2)) is True)
assert (has_equal_properties(obj, dict(a=1, b=2, c=3)) is True)
assert (has_equal_properties(obj, dict(a=2)) is False)
assert (has_equal_properties(obj, dict(d=4)) is False) |
def __init__(self, file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, buffer_size=DEFAULT_READ_BUFFER_SIZE, validate=True, skip_header_lines=0, header_processor_fns=(None, None)):
'Initialize a _TextSource\n\n Args:\n header_processor_fns (tuple): a tuple of a `header_matcher` function\n and a `header_processor` function. The `header_matcher` should\n return `True` for all lines at the start of the file that are part\n of the file header and `False` otherwise. These header lines will\n not be yielded when reading records and instead passed into\n `header_processor` to be handled. If `skip_header_lines` and a\n `header_matcher` are both provided, the value of `skip_header_lines`\n lines will be skipped and the header will be processed from\n there.\n Raises:\n ValueError: if skip_lines is negative.\n\n Please refer to documentation in class `ReadFromText` for the rest\n of the arguments.\n '
super(_TextSource, self).__init__(file_pattern, min_bundle_size, compression_type=compression_type, validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if (skip_header_lines < 0):
raise ValueError(('Cannot skip negative number of header lines: %d' % skip_header_lines))
elif (skip_header_lines > 10):
_LOGGER.warning('Skipping %d header lines. Skipping large number of header lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
(self._header_matcher, self._header_processor) = header_processor_fns | 7,102,111,700,206,611,000 | Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments. | sdks/python/apache_beam/io/textio.py | __init__ | AhnLab-OSS/beam | python | def __init__(self, file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, buffer_size=DEFAULT_READ_BUFFER_SIZE, validate=True, skip_header_lines=0, header_processor_fns=(None, None)):
'Initialize a _TextSource\n\n Args:\n header_processor_fns (tuple): a tuple of a `header_matcher` function\n and a `header_processor` function. The `header_matcher` should\n return `True` for all lines at the start of the file that are part\n of the file header and `False` otherwise. These header lines will\n not be yielded when reading records and instead passed into\n `header_processor` to be handled. If `skip_header_lines` and a\n `header_matcher` are both provided, the value of `skip_header_lines`\n lines will be skipped and the header will be processed from\n there.\n Raises:\n ValueError: if skip_lines is negative.\n\n Please refer to documentation in class `ReadFromText` for the rest\n of the arguments.\n '
super(_TextSource, self).__init__(file_pattern, min_bundle_size, compression_type=compression_type, validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if (skip_header_lines < 0):
raise ValueError(('Cannot skip negative number of header lines: %d' % skip_header_lines))
elif (skip_header_lines > 10):
_LOGGER.warning('Skipping %d header lines. Skipping large number of header lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
(self._header_matcher, self._header_processor) = header_processor_fns |
def _skip_lines(self, file_to_read, read_buffer, num_lines):
'Skip num_lines from file_to_read, return num_lines+1 start position.'
if (file_to_read.tell() > 0):
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
(_, num_bytes_to_next_record) = self._read_record(file_to_read, read_buffer)
if (num_bytes_to_next_record < 0):
break
position += num_bytes_to_next_record
return position | -6,993,979,125,286,766,000 | Skip num_lines from file_to_read, return num_lines+1 start position. | sdks/python/apache_beam/io/textio.py | _skip_lines | AhnLab-OSS/beam | python | def _skip_lines(self, file_to_read, read_buffer, num_lines):
if (file_to_read.tell() > 0):
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
(_, num_bytes_to_next_record) = self._read_record(file_to_read, read_buffer)
if (num_bytes_to_next_record < 0):
break
position += num_bytes_to_next_record
return position |
def __init__(self, file_path_prefix, file_name_suffix='', append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None):
"Initialize a _TextSink.\n\n Args:\n file_path_prefix: The file path to write to. The files written will begin\n with this prefix, followed by a shard identifier (see num_shards), and\n end in a common extension, if given by file_name_suffix. In most cases,\n only this argument is specified and num_shards, shard_name_template, and\n file_name_suffix use default values.\n file_name_suffix: Suffix for the files written.\n append_trailing_newlines: indicate whether this sink should write an\n additional newline char after writing each element.\n num_shards: The number of files (shards) used for output. If not set, the\n service will decide on the optimal number of shards.\n Constraining the number of shards is likely to reduce\n the performance of a pipeline. Setting this value is not recommended\n unless you require a specific number of output files.\n shard_name_template: A template string containing placeholders for\n the shard number and shard count. When constructing a filename for a\n particular shard number, the upper-case letters 'S' and 'N' are\n replaced with the 0-padded shard number and shard count respectively.\n This argument can be '' in which case it behaves as if num_shards was\n set to 1 and only one file will be generated. The default pattern used\n is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.\n coder: Coder used to encode each line.\n compression_type: Used to handle compressed output files. Typical value\n is CompressionTypes.AUTO, in which case the final file path's\n extension (as determined by file_path_prefix, file_name_suffix,\n num_shards and shard_name_template) will be used to detect the\n compression.\n header: String to write at beginning of file as a header. If not None and\n append_trailing_newlines is set, '\n' will be added.\n\n Returns:\n A _TextSink object usable for writing.\n "
super(_TextSink, self).__init__(file_path_prefix, file_name_suffix=file_name_suffix, num_shards=num_shards, shard_name_template=shard_name_template, coder=coder, mime_type='text/plain', compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header | 2,864,030,333,706,770,000 | Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '
' will be added.
Returns:
A _TextSink object usable for writing. | sdks/python/apache_beam/io/textio.py | __init__ | AhnLab-OSS/beam | python | def __init__(self, file_path_prefix, file_name_suffix=, append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None):
"Initialize a _TextSink.\n\n Args:\n file_path_prefix: The file path to write to. The files written will begin\n with this prefix, followed by a shard identifier (see num_shards), and\n end in a common extension, if given by file_name_suffix. In most cases,\n only this argument is specified and num_shards, shard_name_template, and\n file_name_suffix use default values.\n file_name_suffix: Suffix for the files written.\n append_trailing_newlines: indicate whether this sink should write an\n additional newline char after writing each element.\n num_shards: The number of files (shards) used for output. If not set, the\n service will decide on the optimal number of shards.\n Constraining the number of shards is likely to reduce\n the performance of a pipeline. Setting this value is not recommended\n unless you require a specific number of output files.\n shard_name_template: A template string containing placeholders for\n the shard number and shard count. When constructing a filename for a\n particular shard number, the upper-case letters 'S' and 'N' are\n replaced with the 0-padded shard number and shard count respectively.\n This argument can be in which case it behaves as if num_shards was\n set to 1 and only one file will be generated. The default pattern used\n is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.\n coder: Coder used to encode each line.\n compression_type: Used to handle compressed output files. Typical value\n is CompressionTypes.AUTO, in which case the final file path's\n extension (as determined by file_path_prefix, file_name_suffix,\n num_shards and shard_name_template) will be used to detect the\n compression.\n header: String to write at beginning of file as a header. If not None and\n append_trailing_newlines is set, '\n' will be added.\n\n Returns:\n A _TextSink object usable for writing.\n "
super(_TextSink, self).__init__(file_path_prefix, file_name_suffix=file_name_suffix, num_shards=num_shards, shard_name_template=shard_name_template, coder=coder, mime_type='text/plain', compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header |
def write_encoded_record(self, file_handle, encoded_value):
'Writes a single encoded record.'
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n') | -7,928,732,276,254,415,000 | Writes a single encoded record. | sdks/python/apache_beam/io/textio.py | write_encoded_record | AhnLab-OSS/beam | python | def write_encoded_record(self, file_handle, encoded_value):
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n') |
def __init__(self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), skip_header_lines=0, **kwargs):
"Initialize the ``ReadAllFromText`` transform.\n\n Args:\n min_bundle_size: Minimum size of bundles that should be generated when\n splitting this source into bundles. See ``FileBasedSource`` for more\n details.\n desired_bundle_size: Desired size of bundles that should be generated when\n splitting this source into bundles. See ``FileBasedSource`` for more\n details.\n compression_type: Used to handle compressed input files. Typical value\n is ``CompressionTypes.AUTO``, in which case the underlying file_path's\n extension will be used to detect the compression.\n strip_trailing_newlines: Indicates whether this source should remove\n the newline char in each line it reads before decoding that line.\n validate: flag to verify that the files exist during the pipeline\n creation time.\n skip_header_lines: Number of header lines to skip. Same number is skipped\n from each source file. Must be 0 or higher. Large number of skipped\n lines might impact performance.\n coder: Coder used to decode each line.\n "
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(_create_text_source, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coder, skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(True, compression_type, desired_bundle_size, min_bundle_size, source_from_file) | 6,986,434,036,015,012,000 | Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line. | sdks/python/apache_beam/io/textio.py | __init__ | AhnLab-OSS/beam | python | def __init__(self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), skip_header_lines=0, **kwargs):
"Initialize the ``ReadAllFromText`` transform.\n\n Args:\n min_bundle_size: Minimum size of bundles that should be generated when\n splitting this source into bundles. See ``FileBasedSource`` for more\n details.\n desired_bundle_size: Desired size of bundles that should be generated when\n splitting this source into bundles. See ``FileBasedSource`` for more\n details.\n compression_type: Used to handle compressed input files. Typical value\n is ``CompressionTypes.AUTO``, in which case the underlying file_path's\n extension will be used to detect the compression.\n strip_trailing_newlines: Indicates whether this source should remove\n the newline char in each line it reads before decoding that line.\n validate: flag to verify that the files exist during the pipeline\n creation time.\n skip_header_lines: Number of header lines to skip. Same number is skipped\n from each source file. Must be 0 or higher. Large number of skipped\n lines might impact performance.\n coder: Coder used to decode each line.\n "
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(_create_text_source, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coder, skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(True, compression_type, desired_bundle_size, min_bundle_size, source_from_file) |
def __init__(self, file_pattern=None, min_bundle_size=0, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), validate=True, skip_header_lines=0, **kwargs):
"Initialize the :class:`ReadFromText` transform.\n\n Args:\n file_pattern (str): The file path to read from as a local file path or a\n GCS ``gs://`` path. The path can contain glob characters\n (``*``, ``?``, and ``[...]`` sets).\n min_bundle_size (int): Minimum size of bundles that should be generated\n when splitting this source into bundles. See\n :class:`~apache_beam.io.filebasedsource.FileBasedSource` for more\n details.\n compression_type (str): Used to handle compressed input files.\n Typical value is :attr:`CompressionTypes.AUTO\n <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the\n underlying file_path's extension will be used to detect the compression.\n strip_trailing_newlines (bool): Indicates whether this source should\n remove the newline char in each line it reads before decoding that line.\n validate (bool): flag to verify that the files exist during the pipeline\n creation time.\n skip_header_lines (int): Number of header lines to skip. Same number is\n skipped from each source file. Must be 0 or higher. Large number of\n skipped lines might impact performance.\n coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.\n "
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, validate=validate, skip_header_lines=skip_header_lines) | 7,339,110,293,041,342,000 | Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line. | sdks/python/apache_beam/io/textio.py | __init__ | AhnLab-OSS/beam | python | def __init__(self, file_pattern=None, min_bundle_size=0, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), validate=True, skip_header_lines=0, **kwargs):
"Initialize the :class:`ReadFromText` transform.\n\n Args:\n file_pattern (str): The file path to read from as a local file path or a\n GCS ``gs://`` path. The path can contain glob characters\n (``*``, ``?``, and ``[...]`` sets).\n min_bundle_size (int): Minimum size of bundles that should be generated\n when splitting this source into bundles. See\n :class:`~apache_beam.io.filebasedsource.FileBasedSource` for more\n details.\n compression_type (str): Used to handle compressed input files.\n Typical value is :attr:`CompressionTypes.AUTO\n <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the\n underlying file_path's extension will be used to detect the compression.\n strip_trailing_newlines (bool): Indicates whether this source should\n remove the newline char in each line it reads before decoding that line.\n validate (bool): flag to verify that the files exist during the pipeline\n creation time.\n skip_header_lines (int): Number of header lines to skip. Same number is\n skipped from each source file. Must be 0 or higher. Large number of\n skipped lines might impact performance.\n coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.\n "
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, validate=validate, skip_header_lines=skip_header_lines) |
def __init__(self, file_path_prefix, file_name_suffix='', append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None):
"Initialize a :class:`WriteToText` transform.\n\n Args:\n file_path_prefix (str): The file path to write to. The files written will\n begin with this prefix, followed by a shard identifier (see\n **num_shards**), and end in a common extension, if given by\n **file_name_suffix**. In most cases, only this argument is specified and\n **num_shards**, **shard_name_template**, and **file_name_suffix** use\n default values.\n file_name_suffix (str): Suffix for the files written.\n append_trailing_newlines (bool): indicate whether this sink should write\n an additional newline char after writing each element.\n num_shards (int): The number of files (shards) used for output.\n If not set, the service will decide on the optimal number of shards.\n Constraining the number of shards is likely to reduce\n the performance of a pipeline. Setting this value is not recommended\n unless you require a specific number of output files.\n shard_name_template (str): A template string containing placeholders for\n the shard number and shard count. Currently only ``''`` and\n ``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.\n When constructing a filename for a particular shard number, the\n upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded\n shard number and shard count respectively. This argument can be ``''``\n in which case it behaves as if num_shards was set to 1 and only one file\n will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.\n coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.\n compression_type (str): Used to handle compressed output files.\n Typical value is :class:`CompressionTypes.AUTO\n <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the\n final file path's extension (as determined by **file_path_prefix**,\n **file_name_suffix**, **num_shards** and **shard_name_template**) will\n be used to detect the compression.\n header (str): String to write at beginning of file as a header.\n If not :data:`None` and **append_trailing_newlines** is set, ``\\n`` will\n be added.\n "
self._sink = _TextSink(file_path_prefix, file_name_suffix, append_trailing_newlines, num_shards, shard_name_template, coder, compression_type, header) | 1,442,425,946,996,938,800 | Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added. | sdks/python/apache_beam/io/textio.py | __init__ | AhnLab-OSS/beam | python | def __init__(self, file_path_prefix, file_name_suffix=, append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None):
"Initialize a :class:`WriteToText` transform.\n\n Args:\n file_path_prefix (str): The file path to write to. The files written will\n begin with this prefix, followed by a shard identifier (see\n **num_shards**), and end in a common extension, if given by\n **file_name_suffix**. In most cases, only this argument is specified and\n **num_shards**, **shard_name_template**, and **file_name_suffix** use\n default values.\n file_name_suffix (str): Suffix for the files written.\n append_trailing_newlines (bool): indicate whether this sink should write\n an additional newline char after writing each element.\n num_shards (int): The number of files (shards) used for output.\n If not set, the service will decide on the optimal number of shards.\n Constraining the number of shards is likely to reduce\n the performance of a pipeline. Setting this value is not recommended\n unless you require a specific number of output files.\n shard_name_template (str): A template string containing placeholders for\n the shard number and shard count. Currently only ```` and\n ``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.\n When constructing a filename for a particular shard number, the\n upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded\n shard number and shard count respectively. This argument can be ````\n in which case it behaves as if num_shards was set to 1 and only one file\n will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.\n coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.\n compression_type (str): Used to handle compressed output files.\n Typical value is :class:`CompressionTypes.AUTO\n <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the\n final file path's extension (as determined by **file_path_prefix**,\n **file_name_suffix**, **num_shards** and **shard_name_template**) will\n be used to detect the compression.\n header (str): String to write at beginning of file as a header.\n If not :data:`None` and **append_trailing_newlines** is set, ``\\n`` will\n be added.\n "
self._sink = _TextSink(file_path_prefix, file_name_suffix, append_trailing_newlines, num_shards, shard_name_template, coder, compression_type, header) |
def register_extensions(app: Flask):
'注册需要的扩展程序包到 Flask 程序实例 app 中'
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
moment.init_app(app) | 8,959,475,275,388,297,000 | 注册需要的扩展程序包到 Flask 程序实例 app 中 | telechat/__init__.py | register_extensions | Sefank/telechat | python | def register_extensions(app: Flask):
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
moment.init_app(app) |
def register_blueprints(app: Flask):
'注册需要的蓝图程序包到 Flask 程序实例 app 中'
app.register_blueprint(auth_bp)
app.register_blueprint(oauth_bp)
app.register_blueprint(chat_bp)
app.register_blueprint(admin_bp) | 7,145,644,853,949,605,000 | 注册需要的蓝图程序包到 Flask 程序实例 app 中 | telechat/__init__.py | register_blueprints | Sefank/telechat | python | def register_blueprints(app: Flask):
app.register_blueprint(auth_bp)
app.register_blueprint(oauth_bp)
app.register_blueprint(chat_bp)
app.register_blueprint(admin_bp) |
def register_errors(app: Flask):
'注册需要的错误处理程序包到 Flask 程序实例 app 中'
@app.errorhandler(400)
def bad_request(e):
return (render_template('error.html', description=e.description, code=e.code), 400)
@app.errorhandler(404)
def page_not_found(e):
return (render_template('error.html', description=e.description, code=e.code), 404)
@app.errorhandler(500)
def internal_server_error(e):
return (render_template('error.html', description='服务器内部错误,无法完成请求!', code='500'), 500)
@app.errorhandler(CSRFError)
def csrf_error_handle(e):
return (render_template('error.html', description=e.description, code=e.code), 400) | 1,824,958,579,672,288,500 | 注册需要的错误处理程序包到 Flask 程序实例 app 中 | telechat/__init__.py | register_errors | Sefank/telechat | python | def register_errors(app: Flask):
@app.errorhandler(400)
def bad_request(e):
return (render_template('error.html', description=e.description, code=e.code), 400)
@app.errorhandler(404)
def page_not_found(e):
return (render_template('error.html', description=e.description, code=e.code), 404)
@app.errorhandler(500)
def internal_server_error(e):
return (render_template('error.html', description='服务器内部错误,无法完成请求!', code='500'), 500)
@app.errorhandler(CSRFError)
def csrf_error_handle(e):
return (render_template('error.html', description=e.description, code=e.code), 400) |
def register_commands(app: Flask):
'注册需要的CLI命令程序包到 Flask 程序实例 app 中'
@app.cli.command()
@click.option('--drop', is_flag=True, help='创建之前销毁数据库')
def initdb(drop: bool):
'初始化数据库结构'
if drop:
pass
pass
@app.cli.command()
@click.option('--num', default=300, help='消息数量,默认为300')
def forge(num: int):
'生成虚拟数据'
pass | 1,707,144,654,204,815,000 | 注册需要的CLI命令程序包到 Flask 程序实例 app 中 | telechat/__init__.py | register_commands | Sefank/telechat | python | def register_commands(app: Flask):
@app.cli.command()
@click.option('--drop', is_flag=True, help='创建之前销毁数据库')
def initdb(drop: bool):
'初始化数据库结构'
if drop:
pass
pass
@app.cli.command()
@click.option('--num', default=300, help='消息数量,默认为300')
def forge(num: int):
'生成虚拟数据'
pass |
def create_app(config_name=None):
'程序工厂:创建 Flask 程序,加载配置,注册扩展、蓝图等程序包'
if (config_name is None):
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('telechat')
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
register_errors(app)
register_commands(app)
return app | -5,401,803,342,700,037,000 | 程序工厂:创建 Flask 程序,加载配置,注册扩展、蓝图等程序包 | telechat/__init__.py | create_app | Sefank/telechat | python | def create_app(config_name=None):
if (config_name is None):
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('telechat')
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
register_errors(app)
register_commands(app)
return app |
@app.cli.command()
@click.option('--drop', is_flag=True, help='创建之前销毁数据库')
def initdb(drop: bool):
'初始化数据库结构'
if drop:
pass
pass | -6,779,515,592,460,771,000 | 初始化数据库结构 | telechat/__init__.py | initdb | Sefank/telechat | python | @app.cli.command()
@click.option('--drop', is_flag=True, help='创建之前销毁数据库')
def initdb(drop: bool):
if drop:
pass
pass |
@app.cli.command()
@click.option('--num', default=300, help='消息数量,默认为300')
def forge(num: int):
'生成虚拟数据'
pass | -5,643,895,302,043,924,000 | 生成虚拟数据 | telechat/__init__.py | forge | Sefank/telechat | python | @app.cli.command()
@click.option('--num', default=300, help='消息数量,默认为300')
def forge(num: int):
pass |
def load_configuration(configuration_file_path, parameters_file_path, bundles):
'Combines the configuration and parameters and build the configuration object'
mappings = {}
for bundle in bundles:
if hasattr(bundle, 'config_mapping'):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path) | 259,998,891,701,529,540 | Combines the configuration and parameters and build the configuration object | applauncher/configuration.py | load_configuration | applauncher-team/applauncher | python | def load_configuration(configuration_file_path, parameters_file_path, bundles):
mappings = {}
for bundle in bundles:
if hasattr(bundle, 'config_mapping'):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path) |
def is_string(value):
'Check if the value is actually a string or not'
try:
float(value)
return False
except ValueError:
if (value.lower() in ['true', 'false']):
return False
return True | 3,238,916,346,945,022,500 | Check if the value is actually a string or not | applauncher/configuration.py | is_string | applauncher-team/applauncher | python | def is_string(value):
try:
float(value)
return False
except ValueError:
if (value.lower() in ['true', 'false']):
return False
return True |
@abstractmethod
def load_parameters(self, source):
'Convert the source into a dictionary' | -4,875,817,250,234,026,000 | Convert the source into a dictionary | applauncher/configuration.py | load_parameters | applauncher-team/applauncher | python | @abstractmethod
def load_parameters(self, source):
|
@abstractmethod
def load_config(self, config_source, parameters_source):
'Prase the config file and build a dictionary' | -2,837,957,084,366,495,000 | Prase the config file and build a dictionary | applauncher/configuration.py | load_config | applauncher-team/applauncher | python | @abstractmethod
def load_config(self, config_source, parameters_source):
|
def build_config(self, config_mappings, config_source, parameters_source):
'By using the loaded parameters and loaded config, build the final configuration object'
configuration_class = create_model('Configuration', **{k: (v, ...) for (k, v) in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source)) | 311,806,605,572,668,300 | By using the loaded parameters and loaded config, build the final configuration object | applauncher/configuration.py | build_config | applauncher-team/applauncher | python | def build_config(self, config_mappings, config_source, parameters_source):
configuration_class = create_model('Configuration', **{k: (v, ...) for (k, v) in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source)) |
def load_parameters(self, source):
'For YML, the source it the file path'
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for (key, value) in loaded.items():
if isinstance(value, str):
loaded[key] = (("'" + value) + "'")
return loaded
return {} | 9,100,939,142,661,697,000 | For YML, the source it the file path | applauncher/configuration.py | load_parameters | applauncher-team/applauncher | python | def load_parameters(self, source):
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for (key, value) in loaded.items():
if isinstance(value, str):
loaded[key] = (("'" + value) + "'")
return loaded
return {} |
def load_config(self, config_source, parameters_source):
'For YML, the source it the file path'
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if (params is not None):
parameters.update(params)
env_params = {}
env_params.update(os.environ)
for (key, value) in env_params.items():
if is_string(value):
env_params[key] = (("'" + value) + "'")
parameters.update(env_params)
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return (final_configuration if (final_configuration is not None) else {}) | -1,562,659,037,624,938,800 | For YML, the source it the file path | applauncher/configuration.py | load_config | applauncher-team/applauncher | python | def load_config(self, config_source, parameters_source):
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if (params is not None):
parameters.update(params)
env_params = {}
env_params.update(os.environ)
for (key, value) in env_params.items():
if is_string(value):
env_params[key] = (("'" + value) + "'")
parameters.update(env_params)
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return (final_configuration if (final_configuration is not None) else {}) |
def test_insert_heterogeneous_params(self):
'test that executemany parameters are asserted to match the\n parameter set of the first.'
users = self.tables.users
assert_raises_message(exc.StatementError, "\\(sqlalchemy.exc.InvalidRequestError\\) A value is required for bind parameter 'user_name', in parameter group 2\n\\[SQL: u?INSERT INTO users", users.insert().execute, {'user_id': 7, 'user_name': 'jack'}, {'user_id': 8, 'user_name': 'ed'}, {'user_id': 9})
users.insert().execute({'user_id': 7}, {'user_id': 8, 'user_name': 'ed'}, {'user_id': 9}) | -7,232,828,510,795,666,000 | test that executemany parameters are asserted to match the
parameter set of the first. | test/sql/test_insert_exec.py | test_insert_heterogeneous_params | AngelLiang/hacking-sqlalchemy | python | def test_insert_heterogeneous_params(self):
'test that executemany parameters are asserted to match the\n parameter set of the first.'
users = self.tables.users
assert_raises_message(exc.StatementError, "\\(sqlalchemy.exc.InvalidRequestError\\) A value is required for bind parameter 'user_name', in parameter group 2\n\\[SQL: u?INSERT INTO users", users.insert().execute, {'user_id': 7, 'user_name': 'jack'}, {'user_id': 8, 'user_name': 'ed'}, {'user_id': 9})
users.insert().execute({'user_id': 7}, {'user_id': 8, 'user_name': 'ed'}, {'user_id': 9}) |
def _test_lastrow_accessor(self, table_, values, assertvalues):
'Tests the inserted_primary_key and lastrow_has_id() functions.'
def insert_values(engine, table_, values):
'\n Inserts a row into a table, returns the full list of values\n INSERTed including defaults that fired off on the DB side and\n detects rows that had defaults and post-fetches.\n '
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if (not set(values).issuperset((c.key for c in table_.primary_key))):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for (col, id_) in zip(table_.primary_key, result.inserted_primary_key):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(*[(col == id_) for (col, id_) in zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [engines.testing_engine(options={'implicit_returning': False}), engines.testing_engine(options={'implicit_returning': True})]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine) | -6,284,793,476,165,940,000 | Tests the inserted_primary_key and lastrow_has_id() functions. | test/sql/test_insert_exec.py | _test_lastrow_accessor | AngelLiang/hacking-sqlalchemy | python | def _test_lastrow_accessor(self, table_, values, assertvalues):
def insert_values(engine, table_, values):
'\n Inserts a row into a table, returns the full list of values\n INSERTed including defaults that fired off on the DB side and\n detects rows that had defaults and post-fetches.\n '
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if (not set(values).issuperset((c.key for c in table_.primary_key))):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for (col, id_) in zip(table_.primary_key, result.inserted_primary_key):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(*[(col == id_) for (col, id_) in zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [engines.testing_engine(options={'implicit_returning': False}), engines.testing_engine(options={'implicit_returning': True})]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine) |
def insert_values(engine, table_, values):
'\n Inserts a row into a table, returns the full list of values\n INSERTed including defaults that fired off on the DB side and\n detects rows that had defaults and post-fetches.\n '
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if (not set(values).issuperset((c.key for c in table_.primary_key))):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for (col, id_) in zip(table_.primary_key, result.inserted_primary_key):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(*[(col == id_) for (col, id_) in zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret | 1,840,504,308,974,259,000 | Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches. | test/sql/test_insert_exec.py | insert_values | AngelLiang/hacking-sqlalchemy | python | def insert_values(engine, table_, values):
'\n Inserts a row into a table, returns the full list of values\n INSERTed including defaults that fired off on the DB side and\n detects rows that had defaults and post-fetches.\n '
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if (not set(values).issuperset((c.key for c in table_.primary_key))):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for (col, id_) in zip(table_.primary_key, result.inserted_primary_key):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(*[(col == id_) for (col, id_) in zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret |
@pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
' This fixture clone a south repo and starts south instance\n add_south: Fixture that starts any south service with given configuration\n remove_data_file: Fixture that remove data file created during the tests\n remove_directories: Fixture that remove directories created during the tests '
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
(yield self.start_south)
remove_data_file(fogbench_template_path)
remove_directories('/tmp/fledge-south-{}'.format(self.SOUTH_PLUGIN_NAME)) | -5,151,229,683,836,889,000 | This fixture clone a south repo and starts south instance
add_south: Fixture that starts any south service with given configuration
remove_data_file: Fixture that remove data file created during the tests
remove_directories: Fixture that remove directories created during the tests | tests/system/python/e2e/test_e2e_notification_service_with_plugins.py | start_south | YashTatkondawar/fledge | python | @pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
' This fixture clone a south repo and starts south instance\n add_south: Fixture that starts any south service with given configuration\n remove_data_file: Fixture that remove data file created during the tests\n remove_directories: Fixture that remove directories created during the tests '
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
(yield self.start_south)
remove_data_file(fogbench_template_path)
remove_directories('/tmp/fledge-south-{}'.format(self.SOUTH_PLUGIN_NAME)) |
def prepare_template_reading_from_fogbench(self):
' Define the template file for fogbench readings '
fogbench_template_path = os.path.join(os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, 'w') as f:
f.write(('[{"name": "%s", "sensor_values": [{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE)))
return fogbench_template_path | -4,634,762,238,000,365,000 | Define the template file for fogbench readings | tests/system/python/e2e/test_e2e_notification_service_with_plugins.py | prepare_template_reading_from_fogbench | YashTatkondawar/fledge | python | def prepare_template_reading_from_fogbench(self):
' '
fogbench_template_path = os.path.join(os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, 'w') as f:
f.write(('[{"name": "%s", "sensor_values": [{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE)))
return fogbench_template_path |
def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow=0, excludeabove=3000):
" Loads metadata, selects instances for the positive\n and negative classes (using a ratio to dilute the positive\n class with negative instances), creates a lexicon if one doesn't\n already exist, and creates a pandas dataframe storing\n texts as rows and words/features as columns. A refactored\n and simplified version of get_data_for_model().\n "
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0:(- numchars2trim)]
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol=indexcol, datecols=datecols, genrecol=genrecol)
(orderedIDs, classdictionary) = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
volspresent = [(x, ((sourcefolder + x) + extension)) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n=numfeatures)
numfeatures = len(vocablist)
print()
print(('Number of features: ' + str(numfeatures)))
authormatches = [[] for x in orderedIDs]
if holdout_authors:
for (idx1, anid) in enumerate(orderedIDs):
thisauthor = metadata.loc[(anid, 'author')]
authormatches[idx1] = list(np.flatnonzero((metadata['author'] == thisauthor)))
for alist in authormatches:
alist.sort(reverse=True)
print()
print('Authors matched.')
print()
(masterdata, classvector) = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return (metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist) | 2,959,876,061,134,097,000 | Loads metadata, selects instances for the positive
and negative classes (using a ratio to dilute the positive
class with negative instances), creates a lexicon if one doesn't
already exist, and creates a pandas dataframe storing
texts as rows and words/features as columns. A refactored
and simplified version of get_data_for_model(). | variation/methodological_experiment.py | get_ratio_data | tedunderwood/fiction | python | def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow=0, excludeabove=3000):
" Loads metadata, selects instances for the positive\n and negative classes (using a ratio to dilute the positive\n class with negative instances), creates a lexicon if one doesn't\n already exist, and creates a pandas dataframe storing\n texts as rows and words/features as columns. A refactored\n and simplified version of get_data_for_model().\n "
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0:(- numchars2trim)]
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol=indexcol, datecols=datecols, genrecol=genrecol)
(orderedIDs, classdictionary) = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
volspresent = [(x, ((sourcefolder + x) + extension)) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n=numfeatures)
numfeatures = len(vocablist)
print()
print(('Number of features: ' + str(numfeatures)))
authormatches = [[] for x in orderedIDs]
if holdout_authors:
for (idx1, anid) in enumerate(orderedIDs):
thisauthor = metadata.loc[(anid, 'author')]
authormatches[idx1] = list(np.flatnonzero((metadata['author'] == thisauthor)))
for alist in authormatches:
alist.sort(reverse=True)
print()
print('Authors matched.')
print()
(masterdata, classvector) = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return (metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist) |
def kldivergence(p, q):
'Kullback-Leibler divergence D(P || Q) for discrete distributions\n Parameters\n ----------\n p, q : array-like, dtype=float, shape=n\n Discrete probability distributions.\n '
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where((p != 0), (p * np.log((p / q))), 0)) | -3,483,818,306,852,924,400 | Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions. | variation/methodological_experiment.py | kldivergence | tedunderwood/fiction | python | def kldivergence(p, q):
'Kullback-Leibler divergence D(P || Q) for discrete distributions\n Parameters\n ----------\n p, q : array-like, dtype=float, shape=n\n Discrete probability distributions.\n '
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where((p != 0), (p * np.log((p / q))), 0)) |
def get_divergences(gold, testname, itera, size, pct):
'\n This function gets several possible measures of divergence\n between two models.\n '
model1 = (('../measuredivergence/modeloutput/' + gold) + '.pkl')
meta1 = (('../measuredivergence/modeloutput/' + gold) + '.csv')
testpath = ('../measuredivergence/modeloutput/' + testname)
model2 = (testpath + '.pkl')
meta2 = (testpath + '.csv')
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = ((loss1on2 + loss2on1) / 2)
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = ((kl1on2 + kl2on1) / 2)
return (pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1) | -5,795,583,140,593,156,000 | This function gets several possible measures of divergence
between two models. | variation/methodological_experiment.py | get_divergences | tedunderwood/fiction | python | def get_divergences(gold, testname, itera, size, pct):
'\n This function gets several possible measures of divergence\n between two models.\n '
model1 = (('../measuredivergence/modeloutput/' + gold) + '.pkl')
meta1 = (('../measuredivergence/modeloutput/' + gold) + '.csv')
testpath = ('../measuredivergence/modeloutput/' + testname)
model2 = (testpath + '.pkl')
meta2 = (testpath + '.csv')
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = ((loss1on2 + loss2on1) / 2)
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = ((kl1on2 + kl2on1) / 2)
return (pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1) |
def get_divergence(sampleA, sampleB, twodatafolder='../data/', onedatafolder='../data/'):
'\n This function applies model a to b, and vice versa, and returns\n a couple of measures of divergence: notably lost accuracy and\n z-tranformed spearman correlation.\n '
model1 = (('../measuredivergence/newmodeloutput/' + sampleA) + '.pkl')
meta1 = (('../measuredivergence/newmodeloutput/' + sampleA) + '.csv')
model2 = (('../measuredivergence/newmodeloutput/' + sampleB) + '.pkl')
meta2 = (('../measuredivergence/newmodeloutput/' + sampleB) + '.csv')
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = ((spearman1on2 + spearman2on1) / 2)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = ((loss1on2 + loss2on1) / 2)
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return (spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2) | 2,197,540,136,824,818,000 | This function applies model a to b, and vice versa, and returns
a couple of measures of divergence: notably lost accuracy and
z-tranformed spearman correlation. | variation/methodological_experiment.py | get_divergence | tedunderwood/fiction | python | def get_divergence(sampleA, sampleB, twodatafolder='../data/', onedatafolder='../data/'):
'\n This function applies model a to b, and vice versa, and returns\n a couple of measures of divergence: notably lost accuracy and\n z-tranformed spearman correlation.\n '
model1 = (('../measuredivergence/newmodeloutput/' + sampleA) + '.pkl')
meta1 = (('../measuredivergence/newmodeloutput/' + sampleA) + '.csv')
model2 = (('../measuredivergence/newmodeloutput/' + sampleB) + '.pkl')
meta2 = (('../measuredivergence/newmodeloutput/' + sampleB) + '.csv')
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = ((spearman1on2 + spearman2on1) / 2)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = ((loss1on2 + loss2on1) / 2)
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return (spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2) |
@pytest.fixture
def start_of_recurrence(future_date):
'\n Date object representing the first day of a record with recurrence\n '
return future_date | 5,637,887,532,328,062,000 | Date object representing the first day of a record with recurrence | moneyforecast/tests/records/fixtures.py | start_of_recurrence | curaloucura/money-forecast | python | @pytest.fixture
def start_of_recurrence(future_date):
'\n \n '
return future_date |
@pytest.fixture
def end_of_recurrence(future_date):
'\n Return a date which is used to determine the end month the recurrence\n should occur\n '
date = (future_date + relativedelta(months=6))
return date | -4,469,297,031,474,214,400 | Return a date which is used to determine the end month the recurrence
should occur | moneyforecast/tests/records/fixtures.py | end_of_recurrence | curaloucura/money-forecast | python | @pytest.fixture
def end_of_recurrence(future_date):
'\n Return a date which is used to determine the end month the recurrence\n should occur\n '
date = (future_date + relativedelta(months=6))
return date |
@pytest.fixture
def month_control(user, current_date):
'\n Return a MonthControl object for the current date.\n\n Important: currently any Record fixture should come before month_control\n '
month_control = MonthControl(user, current_date.month, current_date.year, cache={})
return month_control | 6,479,720,257,097,776,000 | Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control | moneyforecast/tests/records/fixtures.py | month_control | curaloucura/money-forecast | python | @pytest.fixture
def month_control(user, current_date):
'\n Return a MonthControl object for the current date.\n\n Important: currently any Record fixture should come before month_control\n '
month_control = MonthControl(user, current_date.month, current_date.year, cache={})
return month_control |
@pytest.fixture
def month_control_with_budget(user, current_date):
'\n Return a MonthControlWithBudget object for the current date.\n\n Important: currently any Record fixture should come before month_control\n '
month_control = MonthControlWithBudget(user, current_date.month, current_date.year, cache={})
return month_control | 5,738,756,399,209,524,000 | Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control | moneyforecast/tests/records/fixtures.py | month_control_with_budget | curaloucura/money-forecast | python | @pytest.fixture
def month_control_with_budget(user, current_date):
'\n Return a MonthControlWithBudget object for the current date.\n\n Important: currently any Record fixture should come before month_control\n '
month_control = MonthControlWithBudget(user, current_date.month, current_date.year, cache={})
return month_control |
@pytest.fixture
def outcome(user):
'\n Main category of outcome type\n '
category = Category.objects.create(name='outcome', type_category=OUTCOME, user=user)
return category | 6,987,829,848,362,085,000 | Main category of outcome type | moneyforecast/tests/records/fixtures.py | outcome | curaloucura/money-forecast | python | @pytest.fixture
def outcome(user):
'\n \n '
category = Category.objects.create(name='outcome', type_category=OUTCOME, user=user)
return category |
@pytest.fixture
def income(user):
'\n Main category of income type\n '
category = Category.objects.create(name='income', type_category=INCOME, user=user)
return category | 4,078,536,843,082,901,500 | Main category of income type | moneyforecast/tests/records/fixtures.py | income | curaloucura/money-forecast | python | @pytest.fixture
def income(user):
'\n \n '
category = Category.objects.create(name='income', type_category=INCOME, user=user)
return category |
@pytest.fixture
def savings(user):
'\n Category of Savings\n '
category = Category.objects.create(name='savings', type_category=SAVINGS, user=user)
return category | -7,184,796,791,208,873,000 | Category of Savings | moneyforecast/tests/records/fixtures.py | savings | curaloucura/money-forecast | python | @pytest.fixture
def savings(user):
'\n \n '
category = Category.objects.create(name='savings', type_category=SAVINGS, user=user)
return category |
@pytest.fixture
def outcome_current(user, outcome, current_date):
'\n Record of type Outcome set to today (current date)\n '
record = Record.objects.create(category=outcome, amount=1, start_date=current_date, user=user)
return record | 4,778,692,088,416,692,000 | Record of type Outcome set to today (current date) | moneyforecast/tests/records/fixtures.py | outcome_current | curaloucura/money-forecast | python | @pytest.fixture
def outcome_current(user, outcome, current_date):
'\n \n '
record = Record.objects.create(category=outcome, amount=1, start_date=current_date, user=user)
return record |
@pytest.fixture
def outcome_future(user, outcome, future_date):
'\n Record of type Outcome set in the future\n '
record = Record.objects.create(category=outcome, amount=1, start_date=future_date, user=user)
return record | 2,979,084,224,250,077,700 | Record of type Outcome set in the future | moneyforecast/tests/records/fixtures.py | outcome_future | curaloucura/money-forecast | python | @pytest.fixture
def outcome_future(user, outcome, future_date):
'\n \n '
record = Record.objects.create(category=outcome, amount=1, start_date=future_date, user=user)
return record |
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
'\n Record of type Outcome set in the future with a day of the month set\n to create a recurring record\n\n This fixture should not be used with outcome_recurrent_limited and\n outcome_with_parent since they change the instance of this own record\n '
record = Record.objects.create(category=outcome, amount=1, start_date=start_of_recurrence, user=user, day_of_month=start_of_recurrence.day)
return record | -1,136,533,490,107,749,800 | Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record | moneyforecast/tests/records/fixtures.py | outcome_recurrent | curaloucura/money-forecast | python | @pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
'\n Record of type Outcome set in the future with a day of the month set\n to create a recurring record\n\n This fixture should not be used with outcome_recurrent_limited and\n outcome_with_parent since they change the instance of this own record\n '
record = Record.objects.create(category=outcome, amount=1, start_date=start_of_recurrence, user=user, day_of_month=start_of_recurrence.day)
return record |
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
'\n Record of type Outcome set in the future with a recurring day of the month\n set and limited to a certain time\n '
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent | -463,034,850,628,568,500 | Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time | moneyforecast/tests/records/fixtures.py | outcome_recurrent_limited | curaloucura/money-forecast | python | @pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
'\n Record of type Outcome set in the future with a recurring day of the month\n set and limited to a certain time\n '
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent |
@pytest.fixture
def savings_current(request, user, savings, current_date):
'\n Record of type Outcome set in the future\n '
record = Record.objects.create(category=savings, amount=1, start_date=current_date, user=user)
return record | -8,849,889,830,254,215,000 | Record of type Outcome set in the future | moneyforecast/tests/records/fixtures.py | savings_current | curaloucura/money-forecast | python | @pytest.fixture
def savings_current(request, user, savings, current_date):
'\n \n '
record = Record.objects.create(category=savings, amount=1, start_date=current_date, user=user)
return record |
def __init__(self, weight=1.0):
'Constructor for this class does following tasks, if not already downloaded , it first downloads text detector dnn weights file from public URL ands save it at USER_HOME/.katna directory, or /tmp/.katna directory. After this initializer code initializes internal parameter: min_confidence (for text detection)\n '
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser('~'), '.katna')
if (not os.access(self.network_folder_path, os.W_OK)):
self.network_folder_path = os.path.join('/tmp', '.katna')
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if (not os.path.exists(self.datadir)):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if (not os.path.exists(self.network_file_path)):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError((self.frozen_weights + ' seems to be missing. Download the file and specify the full path while initializing TextDetector class')) | 8,590,788,004,543,124,000 | Constructor for this class does following tasks, if not already downloaded , it first downloads text detector dnn weights file from public URL ands save it at USER_HOME/.katna directory, or /tmp/.katna directory. After this initializer code initializes internal parameter: min_confidence (for text detection) | Katna/image_filters/text_detector.py | __init__ | jibinmathew69/katna | python | def __init__(self, weight=1.0):
'\n '
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser('~'), '.katna')
if (not os.access(self.network_folder_path, os.W_OK)):
self.network_folder_path = os.path.join('/tmp', '.katna')
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if (not os.path.exists(self.datadir)):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if (not os.path.exists(self.network_file_path)):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError((self.frozen_weights + ' seems to be missing. Download the file and specify the full path while initializing TextDetector class')) |
def download_data(self):
'Public function for downloading the network weight from the URL link, to be used for\n text detection functionality. \n Troubleshooting tip: If you get FileNotFound error during text detector initialization,\n initialize the text detector and call this function directly to download the model file from public URL link.\n '
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
print('Downloading model file...')
with open(os.path.join(self.datadir, self.frozen_weights), 'wb') as f:
for chunk in r.iter_content(chunk_size=(1024 * 1024)):
if chunk:
f.write(chunk)
print('Model file downloaded.') | 8,059,587,863,676,840,000 | Public function for downloading the network weight from the URL link, to be used for
text detection functionality.
Troubleshooting tip: If you get FileNotFound error during text detector initialization,
initialize the text detector and call this function directly to download the model file from public URL link. | Katna/image_filters/text_detector.py | download_data | jibinmathew69/katna | python | def download_data(self):
'Public function for downloading the network weight from the URL link, to be used for\n text detection functionality. \n Troubleshooting tip: If you get FileNotFound error during text detector initialization,\n initialize the text detector and call this function directly to download the model file from public URL link.\n '
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
print('Downloading model file...')
with open(os.path.join(self.datadir, self.frozen_weights), 'wb') as f:
for chunk in r.iter_content(chunk_size=(1024 * 1024)):
if chunk:
f.write(chunk)
print('Model file downloaded.') |
def __decode_predictions(self, scores, geometry):
'Internal Function for getting bounding box and confidence values \n from text detector dnn network output (scores, geometry)\n function takes the number of rows and columns from the scores volume, then\n initializes set of bounding box rectangles and corresponding confidence scores\n '
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[(0, 0, y)]
xData0 = geometry[(0, 0, y)]
xData1 = geometry[(0, 1, y)]
xData2 = geometry[(0, 2, y)]
xData3 = geometry[(0, 3, y)]
anglesData = geometry[(0, 4, y)]
for x in range(0, numCols):
if (scoresData[x] < self.min_confidence):
continue
(offsetX, offsetY) = ((x * 4.0), (y * 4.0))
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = (xData0[x] + xData2[x])
w = (xData1[x] + xData3[x])
endX = int(((offsetX + (cos * xData1[x])) + (sin * xData2[x])))
endY = int(((offsetY - (sin * xData1[x])) + (cos * xData2[x])))
startX = int((endX - w))
startY = int((endY - h))
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
return (rects, confidences) | 4,351,937,684,898,817,500 | Internal Function for getting bounding box and confidence values
from text detector dnn network output (scores, geometry)
function takes the number of rows and columns from the scores volume, then
initializes set of bounding box rectangles and corresponding confidence scores | Katna/image_filters/text_detector.py | __decode_predictions | jibinmathew69/katna | python | def __decode_predictions(self, scores, geometry):
'Internal Function for getting bounding box and confidence values \n from text detector dnn network output (scores, geometry)\n function takes the number of rows and columns from the scores volume, then\n initializes set of bounding box rectangles and corresponding confidence scores\n '
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[(0, 0, y)]
xData0 = geometry[(0, 0, y)]
xData1 = geometry[(0, 1, y)]
xData2 = geometry[(0, 2, y)]
xData3 = geometry[(0, 3, y)]
anglesData = geometry[(0, 4, y)]
for x in range(0, numCols):
if (scoresData[x] < self.min_confidence):
continue
(offsetX, offsetY) = ((x * 4.0), (y * 4.0))
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = (xData0[x] + xData2[x])
w = (xData1[x] + xData3[x])
endX = int(((offsetX + (cos * xData1[x])) + (sin * xData2[x])))
endY = int(((offsetY - (sin * xData1[x])) + (cos * xData2[x])))
startX = int((endX - w))
startY = int((endY - h))
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
return (rects, confidences) |
def __merge_boxes(self, rects):
'main function to detect text boxes from image\n\n :param rects: list of \n :type rects: numpy array\n :param rectsUsed: image file in numpy array/opencv format\n :type rectsUsed: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n '
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if ((not prev) or (abs((item[1] - prev[1])) <= interval)):
group.append(item)
else:
(yield group)
group = [item]
prev = item
if group:
(yield group)
rects_used = []
heights = list()
for bbox in rects:
heights.append((bbox[3] - bbox[1]))
heights = sorted(heights)
median_height = (heights[(len(heights) // 2)] / 2)
bboxes_list = sorted(rects, key=(lambda k: k[1]))
combined_bboxes = grouper(bboxes_list, median_height)
for group in combined_bboxes:
x_min = min(group, key=(lambda k: k[0]))[0]
x_max = max(group, key=(lambda k: k[2]))[2]
y_min = min(group, key=(lambda k: k[1]))[1]
y_max = max(group, key=(lambda k: k[3]))[3]
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used | -9,106,895,150,870,533,000 | main function to detect text boxes from image
:param rects: list of
:type rects: numpy array
:param rectsUsed: image file in numpy array/opencv format
:type rectsUsed: numpy array
:return: output image with the list of text boxes
:rtype: file, list | Katna/image_filters/text_detector.py | __merge_boxes | jibinmathew69/katna | python | def __merge_boxes(self, rects):
'main function to detect text boxes from image\n\n :param rects: list of \n :type rects: numpy array\n :param rectsUsed: image file in numpy array/opencv format\n :type rectsUsed: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n '
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if ((not prev) or (abs((item[1] - prev[1])) <= interval)):
group.append(item)
else:
(yield group)
group = [item]
prev = item
if group:
(yield group)
rects_used = []
heights = list()
for bbox in rects:
heights.append((bbox[3] - bbox[1]))
heights = sorted(heights)
median_height = (heights[(len(heights) // 2)] / 2)
bboxes_list = sorted(rects, key=(lambda k: k[1]))
combined_bboxes = grouper(bboxes_list, median_height)
for group in combined_bboxes:
x_min = min(group, key=(lambda k: k[0]))[0]
x_max = max(group, key=(lambda k: k[2]))[2]
y_min = min(group, key=(lambda k: k[1]))[1]
y_max = max(group, key=(lambda k: k[3]))[3]
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used |
def __detect_text(self):
'Internal function to detect text bounding boxes from input image.\n Returns list of bounding boxes of each detected text field in input image.\n\n :param image: image file in numpy array/opencv format\n :type image: numpy array\n :param output_image: image file in numpy array/opencv format\n :type output_image: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n '
(H, W) = self.image.shape[:2]
rW = (W / 320)
rH = (H / 320)
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
(rects, confidences) = self.__decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
for (startX, startY, endX, endY) in boxes:
startX = int((startX * rW))
startY = int((startY * rH))
endX = int((endX * rW))
endY = int((endY * rH))
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=(lambda item: item[0]))
final_rects = text_rects
if (len(text_rects) > 0):
final_rects = self.__merge_boxes(text_rects)
return final_rects | -8,224,508,995,595,900,000 | Internal function to detect text bounding boxes from input image.
Returns list of bounding boxes of each detected text field in input image.
:param image: image file in numpy array/opencv format
:type image: numpy array
:param output_image: image file in numpy array/opencv format
:type output_image: numpy array
:return: output image with the list of text boxes
:rtype: file, list | Katna/image_filters/text_detector.py | __detect_text | jibinmathew69/katna | python | def __detect_text(self):
'Internal function to detect text bounding boxes from input image.\n Returns list of bounding boxes of each detected text field in input image.\n\n :param image: image file in numpy array/opencv format\n :type image: numpy array\n :param output_image: image file in numpy array/opencv format\n :type output_image: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n '
(H, W) = self.image.shape[:2]
rW = (W / 320)
rH = (H / 320)
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
(rects, confidences) = self.__decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
for (startX, startY, endX, endY) in boxes:
startX = int((startX * rW))
startY = int((startY * rH))
endX = int((endX * rW))
endY = int((endY * rH))
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=(lambda item: item[0]))
final_rects = text_rects
if (len(text_rects) > 0):
final_rects = self.__merge_boxes(text_rects)
return final_rects |
def set_image(self, image):
'Public set_image function, This will detect all text boxes in input image and\n will saves them as internal list of text_rect to be used in get_filter_result\n\n :param image: input image from which needs to be cropped\n :type image: numpy array(opencv)\n '
if (image is None):
return None
self.image = image
self.text_rects = self.__detect_text() | -6,723,038,099,207,040,000 | Public set_image function, This will detect all text boxes in input image and
will saves them as internal list of text_rect to be used in get_filter_result
:param image: input image from which needs to be cropped
:type image: numpy array(opencv) | Katna/image_filters/text_detector.py | set_image | jibinmathew69/katna | python | def set_image(self, image):
'Public set_image function, This will detect all text boxes in input image and\n will saves them as internal list of text_rect to be used in get_filter_result\n\n :param image: input image from which needs to be cropped\n :type image: numpy array(opencv)\n '
if (image is None):
return None
self.image = image
self.text_rects = self.__detect_text() |
def get_filter_result(self, crop):
'Main public function of TextDetector filter class,\n this filter Returns false if crop contains no text, additionally\n checks for overlap between input crop rectangle and the detected\n text bounding box, returns True if No overlap (Filter will not discard input crop)\n otherwise returns False (signal for discarding input crop).\n \n :param crop: input crop rectangle to test\n :type crop: crop_rect\n :return: True if No overlap (Filter will not discard input crop) otherwise returns False \n :rtype: bool\n '
if ((self.text_rects is None) or (len(self.text_rects) == 0)):
return True
for rect in self.text_rects:
if (not ((rect[2] <= (crop.x + crop.w)) and (rect[0] >= crop.x) and (rect[1] >= crop.y) and (rect[3] <= (crop.y + crop.h)))):
return False
else:
return True
return True | 9,027,545,007,575,559,000 | Main public function of TextDetector filter class,
this filter Returns false if crop contains no text, additionally
checks for overlap between input crop rectangle and the detected
text bounding box, returns True if No overlap (Filter will not discard input crop)
otherwise returns False (signal for discarding input crop).
:param crop: input crop rectangle to test
:type crop: crop_rect
:return: True if No overlap (Filter will not discard input crop) otherwise returns False
:rtype: bool | Katna/image_filters/text_detector.py | get_filter_result | jibinmathew69/katna | python | def get_filter_result(self, crop):
'Main public function of TextDetector filter class,\n this filter Returns false if crop contains no text, additionally\n checks for overlap between input crop rectangle and the detected\n text bounding box, returns True if No overlap (Filter will not discard input crop)\n otherwise returns False (signal for discarding input crop).\n \n :param crop: input crop rectangle to test\n :type crop: crop_rect\n :return: True if No overlap (Filter will not discard input crop) otherwise returns False \n :rtype: bool\n '
if ((self.text_rects is None) or (len(self.text_rects) == 0)):
return True
for rect in self.text_rects:
if (not ((rect[2] <= (crop.x + crop.w)) and (rect[0] >= crop.x) and (rect[1] >= crop.y) and (rect[3] <= (crop.y + crop.h)))):
return False
else:
return True
return True |
def english_filter(tokens):
'\n Given a list of tokens, remove a small list of English stopwords.\n '
non_stopwords = [token for token in tokens if (token not in STOPWORDS)]
while (non_stopwords and (non_stopwords[0] in DROP_FIRST)):
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens | -5,594,310,101,230,834,000 | Given a list of tokens, remove a small list of English stopwords. | lightning_conceptnet/nodes.py | english_filter | ldtoolkit/lightning-conceptnet | python | def english_filter(tokens):
'\n \n '
non_stopwords = [token for token in tokens if (token not in STOPWORDS)]
while (non_stopwords and (non_stopwords[0] in DROP_FIRST)):
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens |
def standardized_concept_uri(lang, text, *more):
"\n Make the appropriate URI for a concept in a particular language, including\n removing English stopwords, normalizing the text in a way appropriate\n to that language (using the text normalization from wordfreq), and joining\n its tokens with underscores in a concept URI.\n\n This text normalization can smooth over some writing differences: for\n example, it removes vowel points from Arabic words, and it transliterates\n Serbian written in the Cyrillic alphabet to the Latin alphabet so that it\n can match other words written in Latin letters.\n\n 'more' contains information to distinguish word senses, such as a part\n of speech or a WordNet domain. The items in 'more' get lowercased and\n joined with underscores, but skip many of the other steps -- for example,\n they won't have stopwords removed.\n\n >>> standardized_concept_uri('en', 'this is a test')\n '/c/en/this_is_test'\n >>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')\n '/c/en/this_is_test/n/example_phrase'\n >>> standardized_concept_uri('sh', 'симетрија')\n '/c/sh/simetrija'\n "
lang = lang.lower()
if (lang == 'en'):
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if (token_filter is not None):
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if (item is not None):
tokens = simple_tokenize(item.replace('_', ' '))
if (token_filter is not None):
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text) | -705,133,688,007,534,700 | Make the appropriate URI for a concept in a particular language, including
removing English stopwords, normalizing the text in a way appropriate
to that language (using the text normalization from wordfreq), and joining
its tokens with underscores in a concept URI.
This text normalization can smooth over some writing differences: for
example, it removes vowel points from Arabic words, and it transliterates
Serbian written in the Cyrillic alphabet to the Latin alphabet so that it
can match other words written in Latin letters.
'more' contains information to distinguish word senses, such as a part
of speech or a WordNet domain. The items in 'more' get lowercased and
joined with underscores, but skip many of the other steps -- for example,
they won't have stopwords removed.
>>> standardized_concept_uri('en', 'this is a test')
'/c/en/this_is_test'
>>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')
'/c/en/this_is_test/n/example_phrase'
>>> standardized_concept_uri('sh', 'симетрија')
'/c/sh/simetrija' | lightning_conceptnet/nodes.py | standardized_concept_uri | ldtoolkit/lightning-conceptnet | python | def standardized_concept_uri(lang, text, *more):
"\n Make the appropriate URI for a concept in a particular language, including\n removing English stopwords, normalizing the text in a way appropriate\n to that language (using the text normalization from wordfreq), and joining\n its tokens with underscores in a concept URI.\n\n This text normalization can smooth over some writing differences: for\n example, it removes vowel points from Arabic words, and it transliterates\n Serbian written in the Cyrillic alphabet to the Latin alphabet so that it\n can match other words written in Latin letters.\n\n 'more' contains information to distinguish word senses, such as a part\n of speech or a WordNet domain. The items in 'more' get lowercased and\n joined with underscores, but skip many of the other steps -- for example,\n they won't have stopwords removed.\n\n >>> standardized_concept_uri('en', 'this is a test')\n '/c/en/this_is_test'\n >>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')\n '/c/en/this_is_test/n/example_phrase'\n >>> standardized_concept_uri('sh', 'симетрија')\n '/c/sh/simetrija'\n "
lang = lang.lower()
if (lang == 'en'):
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if (token_filter is not None):
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if (item is not None):
tokens = simple_tokenize(item.replace('_', ' '))
if (token_filter is not None):
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text) |
def get_sources_string_names(sources):
'\n For the specified list of @sources which can be strings, Files, or targets,\n get all the output basenames.\n '
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names | -1,150,719,354,015,871,500 | For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames. | mesonbuild/build.py | get_sources_string_names | jmesmon/meson | python | def get_sources_string_names(sources):
'\n For the specified list of @sources which can be strings, Files, or targets,\n get all the output basenames.\n '
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names |
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
'Construct target ID from subdir, name and type suffix.\n\n This helper function is made public mostly for tests.'
name_part = name.replace('/', '@').replace('\\', '@')
assert (not has_path_sep(type_suffix))
my_id = (name_part + type_suffix)
if subdir:
subdir_part = Target._get_id_hash(subdir)
return ((subdir_part + '@@') + my_id)
return my_id | 30,134,380,908,118,936 | Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests. | mesonbuild/build.py | construct_id_from_path | jmesmon/meson | python | @staticmethod
def construct_id_from_path(subdir, name, type_suffix):
'Construct target ID from subdir, name and type suffix.\n\n This helper function is made public mostly for tests.'
name_part = name.replace('/', '@').replace('\\', '@')
assert (not has_path_sep(type_suffix))
my_id = (name_part + type_suffix)
if subdir:
subdir_part = Target._get_id_hash(subdir)
return ((subdir_part + '@@') + my_id)
return my_id |
def process_compilers_late(self):
"Processes additional compilers after kwargs have been evaluated.\n\n This can add extra compilers that might be required by keyword\n arguments, such as link_with or dependencies. It will also try to guess\n which compiler to use if one hasn't been selected already.\n "
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
if (self.link_targets or self.link_whole_targets):
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for (name, compiler) in t.compilers.items():
if (name in clink_langs):
extra.add((name, compiler))
for (name, compiler) in sorted(extra, key=(lambda p: sort_clink(p[0]))):
self.compilers[name] = compiler
if (not self.compilers):
for lang in clink_langs:
if (lang in compilers):
self.compilers[lang] = compilers[lang]
break | -5,047,288,703,176,062,000 | Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already. | mesonbuild/build.py | process_compilers_late | jmesmon/meson | python | def process_compilers_late(self):
"Processes additional compilers after kwargs have been evaluated.\n\n This can add extra compilers that might be required by keyword\n arguments, such as link_with or dependencies. It will also try to guess\n which compiler to use if one hasn't been selected already.\n "
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
if (self.link_targets or self.link_whole_targets):
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for (name, compiler) in t.compilers.items():
if (name in clink_langs):
extra.add((name, compiler))
for (name, compiler) in sorted(extra, key=(lambda p: sort_clink(p[0]))):
self.compilers[name] = compiler
if (not self.compilers):
for lang in clink_langs:
if (lang in compilers):
self.compilers[lang] = compilers[lang]
break |
def process_compilers(self):
'\n Populate self.compilers, which is the list of compilers that this\n target will use for compiling all its sources.\n We also add compilers that were used by extracted objects to simplify\n dynamic linker determination.\n '
if ((not self.sources) and (not self.generated) and (not self.objects)):
return
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
sources = list(self.sources)
for gensrc in self.generated:
for s in gensrc.get_outputs():
if (not is_object(s)):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
for o in self.objects:
if (not isinstance(o, ExtractedObjects)):
continue
for s in o.srclist:
if (not s.endswith(lang_suffixes['vala'])):
sources.append(s)
if sources:
for s in sources:
for (lang, compiler) in compilers.items():
if compiler.can_compile(s):
if (lang not in self.compilers):
self.compilers[lang] = compiler
break
self.compilers = OrderedDict(sorted(self.compilers.items(), key=(lambda t: sort_clink(t[0]))))
if (('vala' in self.compilers) and ('c' not in self.compilers)):
self.compilers['c'] = compilers['c'] | 5,489,087,880,487,695,000 | Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination. | mesonbuild/build.py | process_compilers | jmesmon/meson | python | def process_compilers(self):
'\n Populate self.compilers, which is the list of compilers that this\n target will use for compiling all its sources.\n We also add compilers that were used by extracted objects to simplify\n dynamic linker determination.\n '
if ((not self.sources) and (not self.generated) and (not self.objects)):
return
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
sources = list(self.sources)
for gensrc in self.generated:
for s in gensrc.get_outputs():
if (not is_object(s)):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
for o in self.objects:
if (not isinstance(o, ExtractedObjects)):
continue
for s in o.srclist:
if (not s.endswith(lang_suffixes['vala'])):
sources.append(s)
if sources:
for s in sources:
for (lang, compiler) in compilers.items():
if compiler.can_compile(s):
if (lang not in self.compilers):
self.compilers[lang] = compiler
break
self.compilers = OrderedDict(sorted(self.compilers.items(), key=(lambda t: sort_clink(t[0]))))
if (('vala' in self.compilers) and ('c' not in self.compilers)):
self.compilers['c'] = compilers['c'] |
def process_link_depends(self, sources, environment):
"Process the link_depends keyword argument.\n\n This is designed to handle strings, Files, and the output of Custom\n Targets. Notably it doesn't handle generator() returned objects, since\n adding them as a link depends would inherently cause them to be\n generated twice, since the output needs to be passed to the ld_args and\n link_depends.\n "
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend([File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments('Link_depends arguments must be strings, Files, or a Custom Target, or lists thereof.') | 6,275,296,070,609,094,000 | Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends. | mesonbuild/build.py | process_link_depends | jmesmon/meson | python | def process_link_depends(self, sources, environment):
"Process the link_depends keyword argument.\n\n This is designed to handle strings, Files, and the output of Custom\n Targets. Notably it doesn't handle generator() returned objects, since\n adding them as a link depends would inherently cause them to be\n generated twice, since the output needs to be passed to the ld_args and\n link_depends.\n "
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend([File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments('Link_depends arguments must be strings, Files, or a Custom Target, or lists thereof.') |
def get_langs_used_by_deps(self):
'\n Sometimes you want to link to a C++ library that exports C API, which\n means the linker must link in the C++ stdlib, and we must use a C++\n compiler for linking. The same is also applicable for objc/objc++, etc,\n so we can keep using clink_langs for the priority order.\n\n See: https://github.com/mesonbuild/meson/issues/1653\n '
langs = []
for dep in self.external_deps:
if (dep.language is None):
continue
if (dep.language not in langs):
langs.append(dep.language)
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if (language not in langs):
langs.append(language)
return langs | 5,184,799,372,204,265,000 | Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653 | mesonbuild/build.py | get_langs_used_by_deps | jmesmon/meson | python | def get_langs_used_by_deps(self):
'\n Sometimes you want to link to a C++ library that exports C API, which\n means the linker must link in the C++ stdlib, and we must use a C++\n compiler for linking. The same is also applicable for objc/objc++, etc,\n so we can keep using clink_langs for the priority order.\n\n See: https://github.com/mesonbuild/meson/issues/1653\n '
langs = []
for dep in self.external_deps:
if (dep.language is None):
continue
if (dep.language not in langs):
langs.append(dep.language)
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if (language not in langs):
langs.append(language)
return langs |
def get_clink_dynamic_linker_and_stdlibs(self):
"\n We use the order of languages in `clink_langs` to determine which\n linker to use in case the target has sources compiled with multiple\n compilers. All languages other than those in this list have their own\n linker.\n Note that Vala outputs C code, so Vala sources can use any linker\n that can link compiled C. We don't actually need to add an exception\n for Vala here because of that.\n "
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
dep_langs = self.get_langs_used_by_deps()
for l in clink_langs:
if ((l in self.compilers) or (l in dep_langs)):
try:
linker = all_compilers[l]
except KeyError:
raise MesonException('Could not get a dynamic linker for build target {!r}. Requires a linker for language "{}", but that is not a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if (dl != linker.language):
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return (linker, stdlib_args)
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name)) | 8,917,164,519,977,908,000 | We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that. | mesonbuild/build.py | get_clink_dynamic_linker_and_stdlibs | jmesmon/meson | python | def get_clink_dynamic_linker_and_stdlibs(self):
"\n We use the order of languages in `clink_langs` to determine which\n linker to use in case the target has sources compiled with multiple\n compilers. All languages other than those in this list have their own\n linker.\n Note that Vala outputs C code, so Vala sources can use any linker\n that can link compiled C. We don't actually need to add an exception\n for Vala here because of that.\n "
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
dep_langs = self.get_langs_used_by_deps()
for l in clink_langs:
if ((l in self.compilers) or (l in dep_langs)):
try:
linker = all_compilers[l]
except KeyError:
raise MesonException('Could not get a dynamic linker for build target {!r}. Requires a linker for language "{}", but that is not a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if (dl != linker.language):
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return (linker, stdlib_args)
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name)) |
def get_using_msvc(self):
"\n Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,\n and SharedLibrary for deciding when to use MSVC-specific file naming\n and debug filenames.\n\n If at least some code is built with MSVC and the final library is\n linked with MSVC, we can be sure that some debug info will be\n generated. We only check the dynamic linker here because the static\n linker is guaranteed to be of the same type.\n\n Interesting cases:\n 1. The Vala compiler outputs C code to be compiled by whatever\n C compiler we're using, so all objects will still be created by the\n MSVC compiler.\n 2. If the target contains only objects, process_compilers guesses and\n picks the first compiler that smells right.\n "
(linker, _) = self.get_clink_dynamic_linker_and_stdlibs()
if (linker and (linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd'])):
return True
return False | 5,777,500,192,533,684,000 | Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right. | mesonbuild/build.py | get_using_msvc | jmesmon/meson | python | def get_using_msvc(self):
"\n Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,\n and SharedLibrary for deciding when to use MSVC-specific file naming\n and debug filenames.\n\n If at least some code is built with MSVC and the final library is\n linked with MSVC, we can be sure that some debug info will be\n generated. We only check the dynamic linker here because the static\n linker is guaranteed to be of the same type.\n\n Interesting cases:\n 1. The Vala compiler outputs C code to be compiled by whatever\n C compiler we're using, so all objects will still be created by the\n MSVC compiler.\n 2. If the target contains only objects, process_compilers guesses and\n picks the first compiler that smells right.\n "
(linker, _) = self.get_clink_dynamic_linker_and_stdlibs()
if (linker and (linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd'])):
return True
return False |
def check_module_linking(self):
'\n Warn if shared modules are linked with target: (link_with) #2865\n '
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('target links against shared modules.\nThis is not permitted on OSX')
else:
mlog.warning('target links against shared modules. This is not\nrecommended as it is not supported on some platforms')
return | -970,886,122,344,748,200 | Warn if shared modules are linked with target: (link_with) #2865 | mesonbuild/build.py | check_module_linking | jmesmon/meson | python | def check_module_linking(self):
'\n \n '
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('target links against shared modules.\nThis is not permitted on OSX')
else:
mlog.warning('target links against shared modules. This is not\nrecommended as it is not supported on some platforms')
return |
def description(self):
'Human friendly description of the executable'
return self.name | 4,660,257,787,278,046,000 | Human friendly description of the executable | mesonbuild/build.py | description | jmesmon/meson | python | def description(self):
return self.name |
def get_import_filename(self):
'\n The name of the import library that will be outputted by the compiler\n\n Returns None if there is no import library required for this platform\n '
return self.import_filename | -6,530,916,317,056,780,000 | The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform | mesonbuild/build.py | get_import_filename | jmesmon/meson | python | def get_import_filename(self):
'\n The name of the import library that will be outputted by the compiler\n\n Returns None if there is no import library required for this platform\n '
return self.import_filename |
def determine_filenames(self, is_cross, env):
'\n See https://github.com/mesonbuild/meson/pull/417 for details.\n\n First we determine the filename template (self.filename_tpl), then we\n set the output filename (self.filename).\n\n The template is needed while creating aliases (self.get_aliases),\n which are needed while generating .so shared libraries for Linux.\n\n Besides this, there\'s also the import library name, which is only used\n on Windows since on that platform the linker uses a separate library\n called the "import library" during linking instead of the shared\n library (DLL). The toolchain will output an import library in one of\n two formats: GCC or Visual Studio.\n\n When we\'re building with Visual Studio, the import library that will be\n generated by the toolchain is self.vs_import_filename, and with\n MinGW/GCC, it\'s self.gcc_import_filename. self.import_filename will\n always contain the import library name this target will generate.\n '
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
if ('cs' in self.compilers):
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format((self.prefix if (self.prefix is not None) else ''), self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format((self.prefix if (self.prefix is not None) else 'lib'), self.name)
if self.get_using_msvc():
prefix = ''
self.import_filename = self.vs_import_filename
else:
prefix = 'lib'
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format((self.prefix if (self.prefix is not None) else 'lib'), self.name)
prefix = 'cyg'
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if (self.prefix is None):
self.prefix = prefix
if (self.suffix is None):
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename] | -7,801,708,533,714,602,000 | See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate. | mesonbuild/build.py | determine_filenames | jmesmon/meson | python | def determine_filenames(self, is_cross, env):
'\n See https://github.com/mesonbuild/meson/pull/417 for details.\n\n First we determine the filename template (self.filename_tpl), then we\n set the output filename (self.filename).\n\n The template is needed while creating aliases (self.get_aliases),\n which are needed while generating .so shared libraries for Linux.\n\n Besides this, there\'s also the import library name, which is only used\n on Windows since on that platform the linker uses a separate library\n called the "import library" during linking instead of the shared\n library (DLL). The toolchain will output an import library in one of\n two formats: GCC or Visual Studio.\n\n When we\'re building with Visual Studio, the import library that will be\n generated by the toolchain is self.vs_import_filename, and with\n MinGW/GCC, it\'s self.gcc_import_filename. self.import_filename will\n always contain the import library name this target will generate.\n '
prefix =
suffix =
self.filename_tpl = self.basic_filename_tpl
if ('cs' in self.compilers):
prefix =
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format((self.prefix if (self.prefix is not None) else ), self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format((self.prefix if (self.prefix is not None) else 'lib'), self.name)
if self.get_using_msvc():
prefix =
self.import_filename = self.vs_import_filename
else:
prefix = 'lib'
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format((self.prefix if (self.prefix is not None) else 'lib'), self.name)
prefix = 'cyg'
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if (self.prefix is None):
self.prefix = prefix
if (self.suffix is None):
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename] |
def get_import_filename(self):
'\n The name of the import library that will be outputted by the compiler\n\n Returns None if there is no import library required for this platform\n '
return self.import_filename | -6,530,916,317,056,780,000 | The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform | mesonbuild/build.py | get_import_filename | jmesmon/meson | python | def get_import_filename(self):
'\n The name of the import library that will be outputted by the compiler\n\n Returns None if there is no import library required for this platform\n '
return self.import_filename |
def get_aliases(self):
'\n If the versioned library name is libfoo.so.0.100.0, aliases are:\n * libfoo.so.0 (soversion) -> libfoo.so.0.100.0\n * libfoo.so (unversioned; for linking) -> libfoo.so.0\n Same for dylib:\n * libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib\n '
aliases = {}
if ((self.suffix not in ('so', 'dylib')) or (not self.soversion)):
return {}
if ((self.suffix == 'so') and self.ltversion and (self.ltversion != self.soversion)):
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
else:
ltversion_filename = self.filename
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases | -5,496,365,589,624,999,000 | If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib | mesonbuild/build.py | get_aliases | jmesmon/meson | python | def get_aliases(self):
'\n If the versioned library name is libfoo.so.0.100.0, aliases are:\n * libfoo.so.0 (soversion) -> libfoo.so.0.100.0\n * libfoo.so (unversioned; for linking) -> libfoo.so.0\n Same for dylib:\n * libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib\n '
aliases = {}
if ((self.suffix not in ('so', 'dylib')) or (not self.soversion)):
return {}
if ((self.suffix == 'so') and self.ltversion and (self.ltversion != self.soversion)):
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
else:
ltversion_filename = self.filename
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases |
def get_transitive_build_target_deps(self):
'\n Recursively fetch the build targets that this custom target depends on,\n whether through `command:`, `depends:`, or `sources:` The recursion is\n only performed on custom targets.\n This is useful for setting PATH on Windows for finding required DLLs.\n F.ex, if you have a python script that loads a C module that links to\n other DLLs in your project.\n '
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps | 8,326,607,069,530,100,000 | Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project. | mesonbuild/build.py | get_transitive_build_target_deps | jmesmon/meson | python | def get_transitive_build_target_deps(self):
'\n Recursively fetch the build targets that this custom target depends on,\n whether through `command:`, `depends:`, or `sources:` The recursion is\n only performed on custom targets.\n This is useful for setting PATH on Windows for finding required DLLs.\n F.ex, if you have a python script that loads a C module that links to\n other DLLs in your project.\n '
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps |
def oauth_url(client_id: Union[(int, str)], *, permissions: Permissions=MISSING, guild: Snowflake=MISSING, redirect_uri: str=MISSING, scopes: Iterable[str]=MISSING, disable_guild_select: bool=False) -> str:
"A helper function that returns the OAuth2 URL for inviting the bot\n into guilds.\n\n Parameters\n -----------\n client_id: Union[:class:`int`, :class:`str`]\n The client ID for your bot.\n permissions: :class:`~discord.Permissions`\n The permissions you're requesting. If not given then you won't be requesting any\n permissions.\n guild: :class:`~discord.abc.Snowflake`\n The guild to pre-select in the authorization screen, if available.\n redirect_uri: :class:`str`\n An optional valid redirect URI.\n scopes: Iterable[:class:`str`]\n An optional valid list of scopes. Defaults to ``('bot',)``.\n\n .. versionadded:: 1.7\n disable_guild_select: :class:`bool`\n Whether to disallow the user from changing the guild dropdown.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n :class:`str`\n The OAuth2 URL for inviting the bot into guilds.\n "
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += ('&scope=' + '+'.join((scopes or ('bot',))))
if (permissions is not MISSING):
url += f'&permissions={permissions.value}'
if (guild is not MISSING):
url += f'&guild_id={guild.id}'
if (redirect_uri is not MISSING):
from urllib.parse import urlencode
url += ('&response_type=code&' + urlencode({'redirect_uri': redirect_uri}))
if disable_guild_select:
url += '&disable_guild_select=true'
return url | -5,366,905,059,735,044,000 | A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds. | discord/utils.py | oauth_url | Astrea49/enhanced-discord.py | python | def oauth_url(client_id: Union[(int, str)], *, permissions: Permissions=MISSING, guild: Snowflake=MISSING, redirect_uri: str=MISSING, scopes: Iterable[str]=MISSING, disable_guild_select: bool=False) -> str:
"A helper function that returns the OAuth2 URL for inviting the bot\n into guilds.\n\n Parameters\n -----------\n client_id: Union[:class:`int`, :class:`str`]\n The client ID for your bot.\n permissions: :class:`~discord.Permissions`\n The permissions you're requesting. If not given then you won't be requesting any\n permissions.\n guild: :class:`~discord.abc.Snowflake`\n The guild to pre-select in the authorization screen, if available.\n redirect_uri: :class:`str`\n An optional valid redirect URI.\n scopes: Iterable[:class:`str`]\n An optional valid list of scopes. Defaults to ``('bot',)``.\n\n .. versionadded:: 1.7\n disable_guild_select: :class:`bool`\n Whether to disallow the user from changing the guild dropdown.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n :class:`str`\n The OAuth2 URL for inviting the bot into guilds.\n "
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += ('&scope=' + '+'.join((scopes or ('bot',))))
if (permissions is not MISSING):
url += f'&permissions={permissions.value}'
if (guild is not MISSING):
url += f'&guild_id={guild.id}'
if (redirect_uri is not MISSING):
from urllib.parse import urlencode
url += ('&response_type=code&' + urlencode({'redirect_uri': redirect_uri}))
if disable_guild_select:
url += '&disable_guild_select=true'
return url |
def snowflake_time(id: int) -> datetime.datetime:
'\n Parameters\n -----------\n id: :class:`int`\n The snowflake ID.\n\n Returns\n --------\n :class:`datetime.datetime`\n An aware datetime in UTC representing the creation time of the snowflake.\n '
timestamp = (((id >> 22) + DISCORD_EPOCH) / 1000)
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) | -8,368,037,869,531,534,000 | Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake. | discord/utils.py | snowflake_time | Astrea49/enhanced-discord.py | python | def snowflake_time(id: int) -> datetime.datetime:
'\n Parameters\n -----------\n id: :class:`int`\n The snowflake ID.\n\n Returns\n --------\n :class:`datetime.datetime`\n An aware datetime in UTC representing the creation time of the snowflake.\n '
timestamp = (((id >> 22) + DISCORD_EPOCH) / 1000)
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) |
def time_snowflake(dt: datetime.datetime, high: bool=False) -> int:
'Returns a numeric snowflake pretending to be created at the given date.\n\n When using as the lower end of a range, use ``time_snowflake(high=False) - 1``\n to be inclusive, ``high=True`` to be exclusive.\n\n When using as the higher end of a range, use ``time_snowflake(high=True) + 1``\n to be inclusive, ``high=False`` to be exclusive\n\n Parameters\n -----------\n dt: :class:`datetime.datetime`\n A datetime object to convert to a snowflake.\n If naive, the timezone is assumed to be local time.\n high: :class:`bool`\n Whether or not to set the lower 22 bit to high or low.\n\n Returns\n --------\n :class:`int`\n The snowflake representing the time given.\n '
discord_millis = int(((dt.timestamp() * 1000) - DISCORD_EPOCH))
return ((discord_millis << 22) + (((2 ** 22) - 1) if high else 0)) | -2,900,649,015,111,074,300 | Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given. | discord/utils.py | time_snowflake | Astrea49/enhanced-discord.py | python | def time_snowflake(dt: datetime.datetime, high: bool=False) -> int:
'Returns a numeric snowflake pretending to be created at the given date.\n\n When using as the lower end of a range, use ``time_snowflake(high=False) - 1``\n to be inclusive, ``high=True`` to be exclusive.\n\n When using as the higher end of a range, use ``time_snowflake(high=True) + 1``\n to be inclusive, ``high=False`` to be exclusive\n\n Parameters\n -----------\n dt: :class:`datetime.datetime`\n A datetime object to convert to a snowflake.\n If naive, the timezone is assumed to be local time.\n high: :class:`bool`\n Whether or not to set the lower 22 bit to high or low.\n\n Returns\n --------\n :class:`int`\n The snowflake representing the time given.\n '
discord_millis = int(((dt.timestamp() * 1000) - DISCORD_EPOCH))
return ((discord_millis << 22) + (((2 ** 22) - 1) if high else 0)) |
def find(predicate: Callable[([T], Any)], seq: Iterable[T]) -> Optional[T]:
"A helper to return the first element found in the sequence\n that meets the predicate. For example: ::\n\n member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)\n\n would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.\n If an entry is not found, then ``None`` is returned.\n\n This is different from :func:`py:filter` due to the fact it stops the moment it finds\n a valid entry.\n\n Parameters\n -----------\n predicate\n A function that returns a boolean-like result.\n seq: :class:`collections.abc.Iterable`\n The iterable to search through.\n "
for element in seq:
if predicate(element):
return element
return None | 8,747,360,362,780,973,000 | A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through. | discord/utils.py | find | Astrea49/enhanced-discord.py | python | def find(predicate: Callable[([T], Any)], seq: Iterable[T]) -> Optional[T]:
"A helper to return the first element found in the sequence\n that meets the predicate. For example: ::\n\n member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)\n\n would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.\n If an entry is not found, then ``None`` is returned.\n\n This is different from :func:`py:filter` due to the fact it stops the moment it finds\n a valid entry.\n\n Parameters\n -----------\n predicate\n A function that returns a boolean-like result.\n seq: :class:`collections.abc.Iterable`\n The iterable to search through.\n "
for element in seq:
if predicate(element):
return element
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.