body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def field_isomorphism(a, b, **args):
'Construct an isomorphism between two number fields.'
if (not all((isinstance(_, AlgebraicField) for _ in (a, b)))):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if (a == b):
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if (a.domain == b.domain):
if (m % n):
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = (m // n)
for (p, q) in factorint(da).items():
if ((q % 2) and (db % (p ** k))):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if (result is not None):
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b) | 4,375,923,129,808,113,000 | Construct an isomorphism between two number fields. | diofant/polys/numberfields.py | field_isomorphism | diofant/diofant | python | def field_isomorphism(a, b, **args):
if (not all((isinstance(_, AlgebraicField) for _ in (a, b)))):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if (a == b):
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if (a.domain == b.domain):
if (m % n):
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = (m // n)
for (p, q) in factorint(da).items():
if ((q % 2) and (db % (p ** k))):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if (result is not None):
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b) |
def test_login_required(self):
'Test that login required for retrieving tags'
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) | -5,907,001,046,836,814,000 | Test that login required for retrieving tags | app/recipe/tests/test_tags_api.py | test_login_required | deborahoni/recipe-app-api | python | def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) |
def test_retrieve_tags(self):
'Test retrieving tags'
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | -3,301,511,521,326,451,700 | Test retrieving tags | app/recipe/tests/test_tags_api.py | test_retrieve_tags | deborahoni/recipe-app-api | python | def test_retrieve_tags(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) |
def test_tags_limited_to_user(self):
'Test that tags returned are for authenticated user'
user2 = get_user_model().objects.create_user('[email protected]', 'testpass')
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name) | -6,624,375,918,940,996,000 | Test that tags returned are for authenticated user | app/recipe/tests/test_tags_api.py | test_tags_limited_to_user | deborahoni/recipe-app-api | python | def test_tags_limited_to_user(self):
user2 = get_user_model().objects.create_user('[email protected]', 'testpass')
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name) |
def test_create_tag_successful(self):
'Test creating a new tag'
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists) | 6,388,799,538,000,849,000 | Test creating a new tag | app/recipe/tests/test_tags_api.py | test_create_tag_successful | deborahoni/recipe-app-api | python | def test_create_tag_successful(self):
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists) |
def test_create_tag_invalid(self):
'Test creating a new tag with invalid payload'
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | -5,803,848,339,914,584,000 | Test creating a new tag with invalid payload | app/recipe/tests/test_tags_api.py | test_create_tag_invalid | deborahoni/recipe-app-api | python | def test_create_tag_invalid(self):
payload = {'name': }
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) |
def test_retrieve_tags_assigned_to_recipes(self):
'Test filtering tags by those assigned to recipes'
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(title='Coriander eggs on toast', time_minutes=10, price=5.0, user=self.user)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data) | -7,826,462,116,507,644,000 | Test filtering tags by those assigned to recipes | app/recipe/tests/test_tags_api.py | test_retrieve_tags_assigned_to_recipes | deborahoni/recipe-app-api | python | def test_retrieve_tags_assigned_to_recipes(self):
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(title='Coriander eggs on toast', time_minutes=10, price=5.0, user=self.user)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data) |
def test_retrieve_tags_assigned_unique(self):
'Test filtering tags by assigned returns unique items'
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(title='Pancakes', time_minutes=5, price=3.0, user=self.user)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(title='Porridge', time_minutes=3, price=2.0, user=self.user)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1) | 1,714,289,436,581,793,000 | Test filtering tags by assigned returns unique items | app/recipe/tests/test_tags_api.py | test_retrieve_tags_assigned_unique | deborahoni/recipe-app-api | python | def test_retrieve_tags_assigned_unique(self):
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(title='Pancakes', time_minutes=5, price=3.0, user=self.user)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(title='Porridge', time_minutes=3, price=2.0, user=self.user)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1) |
def test_missing_name(self):
'\n every question needs a name (or alias of name)\n '
self.assertPyxformXform(name='invalidcols', ss_structure={'survey': [{'type': 'text', 'label': 'label'}]}, errored=True, error__contains=['no name']) | 8,243,894,922,048,404,000 | every question needs a name (or alias of name) | pyxform/tests_v1/test_sheet_columns.py | test_missing_name | medic/pyxform | python | def test_missing_name(self):
'\n \n '
self.assertPyxformXform(name='invalidcols', ss_structure={'survey': [{'type': 'text', 'label': 'label'}]}, errored=True, error__contains=['no name']) |
def test_column_case(self):
'\n Ensure that column name is case insensitive\n '
self.assertPyxformXform(name='mixedcasecolumns', md='\n | Survey | | | |\n | | Type | name | Label |\n | | text | Name | the name |\n | | integer | age | the age |\n | | text | gender | the gender |\n ', errored=False, debug=True) | 1,118,352,065,668,415,700 | Ensure that column name is case insensitive | pyxform/tests_v1/test_sheet_columns.py | test_column_case | medic/pyxform | python | def test_column_case(self):
'\n \n '
self.assertPyxformXform(name='mixedcasecolumns', md='\n | Survey | | | |\n | | Type | name | Label |\n | | text | Name | the name |\n | | integer | age | the age |\n | | text | gender | the gender |\n ', errored=False, debug=True) |
def test_value_and_name(self):
"\n confirm that both 'name' and 'value' columns of choice list work\n "
for name_alias in ['name', 'value']:
self.assertPyxformXform(name='aliases', md=('\n | survey | | | |\n | | type | name | label |\n | | select_one yn | q1 | Question 1 |\n | choices | | | |\n | | list name | %(name_alias)s | label |\n | | yn | yes | Yes |\n | | yn | no | No |\n ' % {u'name_alias': name_alias}), instance__contains=['<q1/>'], model__contains=['<bind nodeset="/aliases/q1" type="select1"/>'], xml__contains=['<select1 ref="/aliases/q1">', '<value>yes</value>', '<value>no</value>', '</select1>']) | -3,907,622,770,719,091,000 | confirm that both 'name' and 'value' columns of choice list work | pyxform/tests_v1/test_sheet_columns.py | test_value_and_name | medic/pyxform | python | def test_value_and_name(self):
"\n \n "
for name_alias in ['name', 'value']:
self.assertPyxformXform(name='aliases', md=('\n | survey | | | |\n | | type | name | label |\n | | select_one yn | q1 | Question 1 |\n | choices | | | |\n | | list name | %(name_alias)s | label |\n | | yn | yes | Yes |\n | | yn | no | No |\n ' % {u'name_alias': name_alias}), instance__contains=['<q1/>'], model__contains=['<bind nodeset="/aliases/q1" type="select1"/>'], xml__contains=['<select1 ref="/aliases/q1">', '<value>yes</value>', '<value>no</value>', '</select1>']) |
def add(self, log_prob, next_word_prob):
' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen '
if (next_word_prob is not None):
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1 | -5,975,998,106,554,211,000 | increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen | fairseq_cli/eval_lm.py | add | liangan1/fairseq | python | def add(self, log_prob, next_word_prob):
' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen '
if (next_word_prob is not None):
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1 |
def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._s == ''):
return ''
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
try:
eval(((self.quote + s) + self.quote))
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert (eval(((self.quote + s) + self.quote)) == self._s)
return s | 1,014,733,903,730,512,800 | The smallest python literal representation of a string
:rtype: str | src/python_minifier/ministring.py | __str__ | clbarnes/python-minifier | python | def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._s == ):
return
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
try:
eval(((self.quote + s) + self.quote))
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert (eval(((self.quote + s) + self.quote)) == self._s)
return s |
def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._b == b''):
return ''
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
assert (eval(((('b' + self.quote) + s) + self.quote)) == self._b)
return s | -7,563,030,493,167,593,000 | The smallest python literal representation of a string
:rtype: str | src/python_minifier/ministring.py | __str__ | clbarnes/python-minifier | python | def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._b == b):
return
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
assert (eval(((('b' + self.quote) + s) + self.quote)) == self._b)
return s |
def test_upload_file(self):
'Integration test for the QuizSubmissionFilesAPI.upload_file method.'
pass | 3,796,981,798,016,427,000 | Integration test for the QuizSubmissionFilesAPI.upload_file method. | py3canvas/tests/quiz_submission_files.py | test_upload_file | tylerclair/py3canvas | python | def test_upload_file(self):
pass |
def dict_to_data(d: dict) -> dict:
'Recursively calls to_data on dict'
return {key: to_data(d[key]) for key in d} | -4,699,255,432,109,351,000 | Recursively calls to_data on dict | cyber_sdk/util/json.py | dict_to_data | SaveTheAles/cyber.py | python | def dict_to_data(d: dict) -> dict:
return {key: to_data(d[key]) for key in d} |
def to_data(self) -> Any:
'Converts the object to its JSON-serializable Python data representation.'
return dict_to_data(copy.deepcopy(self.__dict__)) | -8,924,841,611,247,736,000 | Converts the object to its JSON-serializable Python data representation. | cyber_sdk/util/json.py | to_data | SaveTheAles/cyber.py | python | def to_data(self) -> Any:
return dict_to_data(copy.deepcopy(self.__dict__)) |
def to_json(self) -> str:
'Marshals the object into a stringified JSON serialization. Keys are first sorted\n and the JSON rendered removes all unnecessary whitespace.\n\n Returns:\n str: JSON string representation\n '
return json.dumps(self.to_data(), sort_keys=True, separators=(',', ':')) | -9,101,289,065,471,352,000 | Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation | cyber_sdk/util/json.py | to_json | SaveTheAles/cyber.py | python | def to_json(self) -> str:
'Marshals the object into a stringified JSON serialization. Keys are first sorted\n and the JSON rendered removes all unnecessary whitespace.\n\n Returns:\n str: JSON string representation\n '
return json.dumps(self.to_data(), sort_keys=True, separators=(',', ':')) |
def _set_artifact_properties(artifact: types.Artifact, properties: Optional[Dict[(str, Any)]], custom_properties: Optional[Dict[(str, Any)]]):
'Sets properties and custom_properties to the given artifact.'
if (properties is not None):
for (key, value) in properties.items():
setattr(artifact, key, value)
if (custom_properties is not None):
for (key, value) in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(f'Unexpected custom_property value type:{type(value)}') | 8,432,907,034,830,028,000 | Sets properties and custom_properties to the given artifact. | tfx/dsl/components/common/importer.py | _set_artifact_properties | SunitRoy2703/tfx | python | def _set_artifact_properties(artifact: types.Artifact, properties: Optional[Dict[(str, Any)]], custom_properties: Optional[Dict[(str, Any)]]):
if (properties is not None):
for (key, value) in properties.items():
setattr(artifact, key, value)
if (custom_properties is not None):
for (key, value) in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(f'Unexpected custom_property value type:{type(value)}') |
def _prepare_artifact(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]) -> types.Artifact:
"Prepares the Importer's output artifact.\n\n If there is already an artifact in MLMD with the same URI and properties /\n custom properties, that artifact will be reused unless the `reimport`\n argument is set to True.\n\n Args:\n metadata_handler: The handler of MLMD.\n uri: The uri of the artifact.\n properties: The properties of the artifact, given as a dictionary from\n string keys to integer / string values. Must conform to the declared\n properties of the destination channel's output type.\n custom_properties: The custom properties of the artifact, given as a\n dictionary from string keys to integer / string values.\n reimport: If set to True, will register a new artifact even if it already\n exists in the database.\n output_artifact_class: The class of the output artifact.\n mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.\n\n Returns:\n An Artifact object representing the imported artifact.\n "
absl.logging.info(('Processing source uri: %s, properties: %s, custom_properties: %s' % (uri, properties, custom_properties)))
for (key, value) in custom_properties.items():
if (not isinstance(value, (int, str, bytes))):
raise ValueError(('Custom property value for key %r must be a string or integer (got %r instead)' % (key, value)))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(uri)
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for (key, value) in properties.items():
if (getattr(candidate_artifact, key) != value):
is_candidate = False
break
for (key, value) in custom_properties.items():
if isinstance(value, int):
if (candidate_artifact.get_int_custom_property(key) != value):
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if (candidate_artifact.get_string_custom_property(key) != value):
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
if (bool(previous_artifacts) and (not reimport)):
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=(lambda m: m.id)))
return result | -617,003,016,772,959,400 | Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact. | tfx/dsl/components/common/importer.py | _prepare_artifact | SunitRoy2703/tfx | python | def _prepare_artifact(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]) -> types.Artifact:
"Prepares the Importer's output artifact.\n\n If there is already an artifact in MLMD with the same URI and properties /\n custom properties, that artifact will be reused unless the `reimport`\n argument is set to True.\n\n Args:\n metadata_handler: The handler of MLMD.\n uri: The uri of the artifact.\n properties: The properties of the artifact, given as a dictionary from\n string keys to integer / string values. Must conform to the declared\n properties of the destination channel's output type.\n custom_properties: The custom properties of the artifact, given as a\n dictionary from string keys to integer / string values.\n reimport: If set to True, will register a new artifact even if it already\n exists in the database.\n output_artifact_class: The class of the output artifact.\n mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.\n\n Returns:\n An Artifact object representing the imported artifact.\n "
absl.logging.info(('Processing source uri: %s, properties: %s, custom_properties: %s' % (uri, properties, custom_properties)))
for (key, value) in custom_properties.items():
if (not isinstance(value, (int, str, bytes))):
raise ValueError(('Custom property value for key %r must be a string or integer (got %r instead)' % (key, value)))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(uri)
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for (key, value) in properties.items():
if (getattr(candidate_artifact, key) != value):
is_candidate = False
break
for (key, value) in custom_properties.items():
if isinstance(value, int):
if (candidate_artifact.get_int_custom_property(key) != value):
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if (candidate_artifact.get_string_custom_property(key) != value):
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
if (bool(previous_artifacts) and (not reimport)):
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=(lambda m: m.id)))
return result |
def generate_output_dict(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]=None) -> Dict[(str, List[types.Artifact])]:
"Generates importer's output dict.\n\n If there is already an artifact in MLMD with the same URI and properties /\n custom properties, that artifact will be reused unless the `reimport`\n argument is set to True.\n\n Args:\n metadata_handler: The handler of MLMD.\n uri: The uri of the artifact.\n properties: The properties of the artifact, given as a dictionary from\n string keys to integer / string values. Must conform to the declared\n properties of the destination channel's output type.\n custom_properties: The custom properties of the artifact, given as a\n dictionary from string keys to integer / string values.\n reimport: If set to True, will register a new artifact even if it already\n exists in the database.\n output_artifact_class: The class of the output artifact.\n mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.\n\n Returns:\n a dictionary with the only key `result` whose value is the Artifact.\n "
return {IMPORT_RESULT_KEY: [_prepare_artifact(metadata_handler, uri=uri, properties=properties, custom_properties=custom_properties, output_artifact_class=output_artifact_class, mlmd_artifact_type=mlmd_artifact_type, reimport=reimport)]} | -1,837,062,771,133,348,600 | Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact. | tfx/dsl/components/common/importer.py | generate_output_dict | SunitRoy2703/tfx | python | def generate_output_dict(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]=None) -> Dict[(str, List[types.Artifact])]:
"Generates importer's output dict.\n\n If there is already an artifact in MLMD with the same URI and properties /\n custom properties, that artifact will be reused unless the `reimport`\n argument is set to True.\n\n Args:\n metadata_handler: The handler of MLMD.\n uri: The uri of the artifact.\n properties: The properties of the artifact, given as a dictionary from\n string keys to integer / string values. Must conform to the declared\n properties of the destination channel's output type.\n custom_properties: The custom properties of the artifact, given as a\n dictionary from string keys to integer / string values.\n reimport: If set to True, will register a new artifact even if it already\n exists in the database.\n output_artifact_class: The class of the output artifact.\n mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.\n\n Returns:\n a dictionary with the only key `result` whose value is the Artifact.\n "
return {IMPORT_RESULT_KEY: [_prepare_artifact(metadata_handler, uri=uri, properties=properties, custom_properties=custom_properties, output_artifact_class=output_artifact_class, mlmd_artifact_type=mlmd_artifact_type, reimport=reimport)]} |
def __init__(self, source_uri: str, artifact_type: Type[types.Artifact], reimport: Optional[bool]=False, properties: Optional[Dict[(str, Union[(str, int)])]]=None, custom_properties: Optional[Dict[(str, Union[(str, int)])]]=None):
'Init function for the Importer.\n\n Args:\n source_uri: the URI of the resource that needs to be registered.\n artifact_type: the type of the artifact to import.\n reimport: whether or not to re-import as a new artifact if the URI has\n been imported in before.\n properties: Dictionary of properties for the imported Artifact. These\n properties should be ones declared for the given artifact_type (see the\n PROPERTIES attribute of the definition of the type for details).\n custom_properties: Dictionary of custom properties for the imported\n Artifact. These properties should be of type Text or int.\n '
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
self._output_dict = {IMPORT_RESULT_KEY: types.Channel(type=artifact_type, additional_properties=properties, additional_custom_properties=custom_properties).set_artifacts([artifact])}
super().__init__(driver_class=ImporterDriver) | -3,162,124,771,600,060,400 | Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int. | tfx/dsl/components/common/importer.py | __init__ | SunitRoy2703/tfx | python | def __init__(self, source_uri: str, artifact_type: Type[types.Artifact], reimport: Optional[bool]=False, properties: Optional[Dict[(str, Union[(str, int)])]]=None, custom_properties: Optional[Dict[(str, Union[(str, int)])]]=None):
'Init function for the Importer.\n\n Args:\n source_uri: the URI of the resource that needs to be registered.\n artifact_type: the type of the artifact to import.\n reimport: whether or not to re-import as a new artifact if the URI has\n been imported in before.\n properties: Dictionary of properties for the imported Artifact. These\n properties should be ones declared for the given artifact_type (see the\n PROPERTIES attribute of the definition of the type for details).\n custom_properties: Dictionary of custom properties for the imported\n Artifact. These properties should be of type Text or int.\n '
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
self._output_dict = {IMPORT_RESULT_KEY: types.Channel(type=artifact_type, additional_properties=properties, additional_custom_properties=custom_properties).set_artifacts([artifact])}
super().__init__(driver_class=ImporterDriver) |
@property
def outputs(self) -> Dict[(str, Any)]:
'Output Channel dict that contains imported artifacts.'
return self._output_dict | -4,582,220,960,787,554,000 | Output Channel dict that contains imported artifacts. | tfx/dsl/components/common/importer.py | outputs | SunitRoy2703/tfx | python | @property
def outputs(self) -> Dict[(str, Any)]:
return self._output_dict |
def on_transaction_end(session):
'\n Decorator for a function which should run after a top-level transaction ended.\n\n Transactions that are either implicitly or explicitly committed or rolled back will be\n closed at the end of a Pyramid view. This is here for cleaning up caches so that\n code after the view, exception views for example, will not be able to access\n detached instances.\n\n Example usage:\n\n .. code-block:: python\n\n @util.db.on_transaction_end(session)\n def flush_cache():\n self._cache = {}\n\n '
def decorate(func):
def _handler(_, transaction):
if (transaction.parent is None):
func()
sqlalchemy.event.listen(session, 'after_transaction_end', _handler)
return func
return decorate | -8,933,014,282,677,295,000 | Decorator for a function which should run after a top-level transaction ended.
Transactions that are either implicitly or explicitly committed or rolled back will be
closed at the end of a Pyramid view. This is here for cleaning up caches so that
code after the view, exception views for example, will not be able to access
detached instances.
Example usage:
.. code-block:: python
@util.db.on_transaction_end(session)
def flush_cache():
self._cache = {} | h/util/db.py | on_transaction_end | Brahim109/h | python | def on_transaction_end(session):
'\n Decorator for a function which should run after a top-level transaction ended.\n\n Transactions that are either implicitly or explicitly committed or rolled back will be\n closed at the end of a Pyramid view. This is here for cleaning up caches so that\n code after the view, exception views for example, will not be able to access\n detached instances.\n\n Example usage:\n\n .. code-block:: python\n\n @util.db.on_transaction_end(session)\n def flush_cache():\n self._cache = {}\n\n '
def decorate(func):
def _handler(_, transaction):
if (transaction.parent is None):
func()
sqlalchemy.event.listen(session, 'after_transaction_end', _handler)
return func
return decorate |
def load(path, num_cpu=16):
'Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n num_cpu: int\n number of cpus to use for executing the policy\n\n Returns\n -------\n act: ActWrapper\n function that takes a batch of observations\n and returns actions.\n '
return ActWrapper.load(path, num_cpu=num_cpu) | 4,148,967,975,659,665,000 | Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions. | baselines/deepq/simple.py | load | hyperdo/python2-baselines | python | def load(path, num_cpu=16):
'Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n num_cpu: int\n number of cpus to use for executing the policy\n\n Returns\n -------\n act: ActWrapper\n function that takes a batch of observations\n and returns actions.\n '
return ActWrapper.load(path, num_cpu=num_cpu) |
def learn(env, q_func, lr=0.0005, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-06, num_cpu=16, callback=None):
'Train a deepq model.\n\n Parameters\n -------\n env : gym.Env\n environment to train on\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n lr: float\n learning rate for adam optimizer\n max_timesteps: int\n number of env steps to optimizer for\n buffer_size: int\n size of the replay buffer\n exploration_fraction: float\n fraction of entire training period over which the exploration rate is annealed\n exploration_final_eps: float\n final value of random action probability\n train_freq: int\n update the model every `train_freq` steps.\n batch_size: int\n size of a batched sampled from replay buffer for training\n print_freq: int\n how often to print out training progress\n set to None to disable printing\n checkpoint_freq: int\n how often to save the model. This is so that the best version is restored\n at the end of the training. If you do not wish to restore the best version at\n the end of the training set this variable to None.\n learning_starts: int\n how many steps of the model to collect transitions for before learning starts\n gamma: float\n discount factor\n target_network_update_freq: int\n update the target network every `target_network_update_freq` steps.\n prioritized_replay: True\n if True prioritized replay buffer will be used.\n prioritized_replay_alpha: float\n alpha parameter for prioritized replay buffer\n prioritized_replay_beta0: float\n initial value of beta for prioritized replay buffer\n prioritized_replay_beta_iters: int\n number of iterations over which beta will be annealed from initial value\n to 1.0. If set to None equals to max_timesteps.\n prioritized_replay_eps: float\n epsilon to add to the TD errors when updating priorities.\n num_cpu: int\n number of cpus to use for training\n callback: (locals, globals) -> None\n function called at every steps with state of the algorithm.\n If callback returns true training stops.\n\n Returns\n -------\n act: ActWrapper\n Wrapper over act function. Adds ability to save it and load it.\n See header of baselines/deepq/categorical.py for details on the act function.\n '
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
(act, train, update_target, debug) = build_train(make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10)
act_params = {'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n}
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if (prioritized_replay_beta_iters is None):
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
exploration = LinearSchedule(schedule_timesteps=int((exploration_fraction * max_timesteps)), initial_p=1.0, final_p=exploration_final_eps)
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, 'model')
for t in range(max_timesteps):
if (callback is not None):
if callback(locals(), globals()):
break
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
(new_obs, rew, done, _) = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[(- 1)] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if ((t > learning_starts) and ((t % train_freq) == 0)):
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
(obses_t, actions, rewards, obses_tp1, dones) = replay_buffer.sample(batch_size)
(weights, batch_idxes) = (np.ones_like(rewards), None)
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = (np.abs(td_errors) + prioritized_replay_eps)
replay_buffer.update_priorities(batch_idxes, new_priorities)
if ((t > learning_starts) and ((t % target_network_update_freq) == 0)):
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[(- 101):(- 1)]), 1)
num_episodes = len(episode_rewards)
if (done and (print_freq is not None) and ((len(episode_rewards) % print_freq) == 0)):
logger.record_tabular('steps', t)
logger.record_tabular('episodes', num_episodes)
logger.record_tabular('mean 100 episode reward', mean_100ep_reward)
logger.record_tabular('% time spent exploring', int((100 * exploration.value(t))))
logger.dump_tabular()
if ((checkpoint_freq is not None) and (t > learning_starts) and (num_episodes > 100) and ((t % checkpoint_freq) == 0)):
if ((saved_mean_reward is None) or (mean_100ep_reward > saved_mean_reward)):
if (print_freq is not None):
logger.log('Saving model due to mean reward increase: {} -> {}'.format(saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if (print_freq is not None):
logger.log('Restored model with mean reward: {}'.format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params) | -7,016,482,687,221,096,000 | Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function. | baselines/deepq/simple.py | learn | hyperdo/python2-baselines | python | def learn(env, q_func, lr=0.0005, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-06, num_cpu=16, callback=None):
'Train a deepq model.\n\n Parameters\n -------\n env : gym.Env\n environment to train on\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n lr: float\n learning rate for adam optimizer\n max_timesteps: int\n number of env steps to optimizer for\n buffer_size: int\n size of the replay buffer\n exploration_fraction: float\n fraction of entire training period over which the exploration rate is annealed\n exploration_final_eps: float\n final value of random action probability\n train_freq: int\n update the model every `train_freq` steps.\n batch_size: int\n size of a batched sampled from replay buffer for training\n print_freq: int\n how often to print out training progress\n set to None to disable printing\n checkpoint_freq: int\n how often to save the model. This is so that the best version is restored\n at the end of the training. If you do not wish to restore the best version at\n the end of the training set this variable to None.\n learning_starts: int\n how many steps of the model to collect transitions for before learning starts\n gamma: float\n discount factor\n target_network_update_freq: int\n update the target network every `target_network_update_freq` steps.\n prioritized_replay: True\n if True prioritized replay buffer will be used.\n prioritized_replay_alpha: float\n alpha parameter for prioritized replay buffer\n prioritized_replay_beta0: float\n initial value of beta for prioritized replay buffer\n prioritized_replay_beta_iters: int\n number of iterations over which beta will be annealed from initial value\n to 1.0. If set to None equals to max_timesteps.\n prioritized_replay_eps: float\n epsilon to add to the TD errors when updating priorities.\n num_cpu: int\n number of cpus to use for training\n callback: (locals, globals) -> None\n function called at every steps with state of the algorithm.\n If callback returns true training stops.\n\n Returns\n -------\n act: ActWrapper\n Wrapper over act function. Adds ability to save it and load it.\n See header of baselines/deepq/categorical.py for details on the act function.\n '
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
(act, train, update_target, debug) = build_train(make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10)
act_params = {'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n}
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if (prioritized_replay_beta_iters is None):
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
exploration = LinearSchedule(schedule_timesteps=int((exploration_fraction * max_timesteps)), initial_p=1.0, final_p=exploration_final_eps)
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, 'model')
for t in range(max_timesteps):
if (callback is not None):
if callback(locals(), globals()):
break
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
(new_obs, rew, done, _) = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[(- 1)] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if ((t > learning_starts) and ((t % train_freq) == 0)):
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
(obses_t, actions, rewards, obses_tp1, dones) = replay_buffer.sample(batch_size)
(weights, batch_idxes) = (np.ones_like(rewards), None)
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = (np.abs(td_errors) + prioritized_replay_eps)
replay_buffer.update_priorities(batch_idxes, new_priorities)
if ((t > learning_starts) and ((t % target_network_update_freq) == 0)):
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[(- 101):(- 1)]), 1)
num_episodes = len(episode_rewards)
if (done and (print_freq is not None) and ((len(episode_rewards) % print_freq) == 0)):
logger.record_tabular('steps', t)
logger.record_tabular('episodes', num_episodes)
logger.record_tabular('mean 100 episode reward', mean_100ep_reward)
logger.record_tabular('% time spent exploring', int((100 * exploration.value(t))))
logger.dump_tabular()
if ((checkpoint_freq is not None) and (t > learning_starts) and (num_episodes > 100) and ((t % checkpoint_freq) == 0)):
if ((saved_mean_reward is None) or (mean_100ep_reward > saved_mean_reward)):
if (print_freq is not None):
logger.log('Saving model due to mean reward increase: {} -> {}'.format(saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if (print_freq is not None):
logger.log('Restored model with mean reward: {}'.format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params) |
def save(self, path):
'Save model to a pickle located at `path`'
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, 'model'))
arc_name = os.path.join(td, 'packed.zip')
with zipfile.ZipFile(arc_name, 'w') as zipf:
for (root, dirs, files) in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if (file_path != arc_name):
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, 'rb') as f:
model_data = f.read()
with open(path, 'wb') as f:
dill.dump((model_data, self._act_params), f) | 1,592,568,679,855,833,900 | Save model to a pickle located at `path` | baselines/deepq/simple.py | save | hyperdo/python2-baselines | python | def save(self, path):
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, 'model'))
arc_name = os.path.join(td, 'packed.zip')
with zipfile.ZipFile(arc_name, 'w') as zipf:
for (root, dirs, files) in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if (file_path != arc_name):
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, 'rb') as f:
model_data = f.read()
with open(path, 'wb') as f:
dill.dump((model_data, self._act_params), f) |
def _generate_detections_v1(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections given the model outputs.\n\n The implementation unrolls the batch dimension and process images one by one.\n It required the batch dimension to be statically known and it is TPU\n compatible.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]` for box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n attributes: None or a dict of (attribute_name, attributes) pairs. Each\n attributes is a `tf.Tensor` with shape\n `[batch_size, N, num_classes, attribute_size]` or\n `[batch_size, N, 1, attribute_size]` for attribute predictions on all\n feature levels. The N is the number of total anchors on all levels. Can\n be None if no attribute learning is required.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A scalar representing maximum number of boxes retained\n over all classes.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.\n\n Returns:\n nms_boxes: A `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections, 4]` representing top detected boxes in\n `[y1, x1, y2, x2]`.\n nms_scores: A `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections]` representing sorted confidence scores\n for detected boxes. The values are between `[0, 1]`.\n nms_classes: An `int` type `tf.Tensor` of shape\n `[batch_size, max_num_detections]` representing classes for detected\n boxes.\n valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the\n top `valid_detections` boxes are valid detections.\n nms_attributes: None or a dict of (attribute_name, attributes). Each\n attribute is a `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections, attribute_size]` representing attribute\n predictions for detected boxes. Can be an empty dict if no attribute\n learning is required.\n '
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i, nmsed_att_i) = _generate_detections_per_image(boxes[i], scores[i], attributes=({att_name: att[i] for (att_name, att) in attributes.items()} if attributes else {}), pre_nms_top_k=pre_nms_top_k, pre_nms_score_threshold=pre_nms_score_threshold, nms_iou_threshold=nms_iou_threshold, max_num_detections=max_num_detections, soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) | -4,265,795,419,135,677,000 | Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required. | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_v1 | 915067906/models | python | def _generate_detections_v1(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections given the model outputs.\n\n The implementation unrolls the batch dimension and process images one by one.\n It required the batch dimension to be statically known and it is TPU\n compatible.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]` for box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n attributes: None or a dict of (attribute_name, attributes) pairs. Each\n attributes is a `tf.Tensor` with shape\n `[batch_size, N, num_classes, attribute_size]` or\n `[batch_size, N, 1, attribute_size]` for attribute predictions on all\n feature levels. The N is the number of total anchors on all levels. Can\n be None if no attribute learning is required.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A scalar representing maximum number of boxes retained\n over all classes.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.\n\n Returns:\n nms_boxes: A `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections, 4]` representing top detected boxes in\n `[y1, x1, y2, x2]`.\n nms_scores: A `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections]` representing sorted confidence scores\n for detected boxes. The values are between `[0, 1]`.\n nms_classes: An `int` type `tf.Tensor` of shape\n `[batch_size, max_num_detections]` representing classes for detected\n boxes.\n valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the\n top `valid_detections` boxes are valid detections.\n nms_attributes: None or a dict of (attribute_name, attributes). Each\n attribute is a `float` type `tf.Tensor` of shape\n `[batch_size, max_num_detections, attribute_size]` representing attribute\n predictions for detected boxes. Can be an empty dict if no attribute\n learning is required.\n '
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i, nmsed_att_i) = _generate_detections_per_image(boxes[i], scores[i], attributes=({att_name: att[i] for (att_name, att) in attributes.items()} if attributes else {}), pre_nms_top_k=pre_nms_top_k, pre_nms_score_threshold=pre_nms_score_threshold, nms_iou_threshold=nms_iou_threshold, max_num_detections=max_num_detections, soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) |
def _generate_detections_per_image(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections per image given the model outputs.\n\n Args:\n boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which\n box predictions on all feature levels. The N is the number of total\n anchors on all levels.\n scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class\n probability on all feature levels. The N is the number of total anchors on\n all levels. The num_classes is the number of classes predicted by the\n model. Note that the class_outputs here is the raw score.\n attributes: If not None, a dict of `tf.Tensor`. Each value is in shape\n `[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of\n attribute predictions on all feature levels. The N is the number of total\n anchors on all levels.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n If set to None, `tf.image.non_max_suppression_padded` is called instead.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`\n representing top detected boxes in `[y1, x1, y2, x2]`.\n nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing\n sorted confidence scores for detected boxes. The values are between [0,\n 1].\n nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing\n classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [1] only the top\n `valid_detections` boxes are valid detections.\n nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape\n `[max_num_detections, attribute_size]` representing attribute predictions\n for detected boxes. Can be an empty dict if `attributes` is None.\n '
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min((num_classes_for_box - 1), i)]
scores_i = scores[:, i]
(scores_i, indices) = tf.nn.top_k(scores_i, k=tf.minimum(tf.shape(scores_i)[(- 1)], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if (soft_nms_sigma is not None):
(nmsed_indices_i, nmsed_scores_i) = tf.image.non_max_suppression_with_scores(tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, soft_nms_sigma=soft_nms_sigma, name=('nms_detections_' + str(i)))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_scores_i, max_num_detections, (- 1.0))
else:
(nmsed_indices_i, nmsed_num_valid_i) = tf.image.non_max_suppression_padded(tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_to_max_output_size=True, name=('nms_detections_' + str(i)))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
nmsed_scores_i = tf.where(tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]), nmsed_scores_i, (- tf.ones_like(nmsed_scores_i)))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for (att_name, att) in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min((num_classes_for_attr - 1), i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
(nmsed_scores, indices) = tf.nn.top_k(nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(tf.cast(tf.greater(nmsed_scores, (- 1)), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name], indices)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) | 8,376,549,356,609,634,000 | Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None. | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_per_image | 915067906/models | python | def _generate_detections_per_image(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections per image given the model outputs.\n\n Args:\n boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which\n box predictions on all feature levels. The N is the number of total\n anchors on all levels.\n scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class\n probability on all feature levels. The N is the number of total anchors on\n all levels. The num_classes is the number of classes predicted by the\n model. Note that the class_outputs here is the raw score.\n attributes: If not None, a dict of `tf.Tensor`. Each value is in shape\n `[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of\n attribute predictions on all feature levels. The N is the number of total\n anchors on all levels.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n If set to None, `tf.image.non_max_suppression_padded` is called instead.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`\n representing top detected boxes in `[y1, x1, y2, x2]`.\n nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing\n sorted confidence scores for detected boxes. The values are between [0,\n 1].\n nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing\n classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [1] only the top\n `valid_detections` boxes are valid detections.\n nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape\n `[max_num_detections, attribute_size]` representing attribute predictions\n for detected boxes. Can be an empty dict if `attributes` is None.\n '
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min((num_classes_for_box - 1), i)]
scores_i = scores[:, i]
(scores_i, indices) = tf.nn.top_k(scores_i, k=tf.minimum(tf.shape(scores_i)[(- 1)], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if (soft_nms_sigma is not None):
(nmsed_indices_i, nmsed_scores_i) = tf.image.non_max_suppression_with_scores(tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, soft_nms_sigma=soft_nms_sigma, name=('nms_detections_' + str(i)))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_scores_i, max_num_detections, (- 1.0))
else:
(nmsed_indices_i, nmsed_num_valid_i) = tf.image.non_max_suppression_padded(tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_to_max_output_size=True, name=('nms_detections_' + str(i)))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
nmsed_scores_i = tf.where(tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]), nmsed_scores_i, (- tf.ones_like(nmsed_scores_i)))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for (att_name, att) in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min((num_classes_for_attr - 1), i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
(nmsed_scores, indices) = tf.nn.top_k(nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(tf.cast(tf.greater(nmsed_scores, (- 1)), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name], indices)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) |
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
'Selects top_k scores and indices for each class.\n\n Args:\n scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class logit outputs on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model.\n pre_nms_num_detections: Number of candidates before NMS.\n\n Returns:\n scores and indices: A `tf.Tensor` with shape\n `[batch_size, pre_nms_num_detections, num_classes]`.\n '
(batch_size, num_anchors, num_class) = scores_in.get_shape().as_list()
if (batch_size is None):
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [(- 1), num_anchors])
(top_k_scores, top_k_indices) = tf.nn.top_k(scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores, [batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices, [batch_size, num_class, pre_nms_num_detections])
return (tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])) | -961,165,520,036,506,900 | Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`. | official/vision/beta/modeling/layers/detection_generator.py | _select_top_k_scores | 915067906/models | python | def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
'Selects top_k scores and indices for each class.\n\n Args:\n scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class logit outputs on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model.\n pre_nms_num_detections: Number of candidates before NMS.\n\n Returns:\n scores and indices: A `tf.Tensor` with shape\n `[batch_size, pre_nms_num_detections, num_classes]`.\n '
(batch_size, num_anchors, num_class) = scores_in.get_shape().as_list()
if (batch_size is None):
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [(- 1), num_anchors])
(top_k_scores, top_k_indices) = tf.nn.top_k(scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores, [batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices, [batch_size, num_class, pre_nms_num_detections])
return (tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])) |
def _generate_detections_v2(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100):
'Generates the final detections given the model outputs.\n\n This implementation unrolls classes dimension while using the tf.while_loop\n to implement the batched NMS, so that it can be parallelized at the batch\n dimension. It should give better performance comparing to v1 implementation.\n It is TPU compatible.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]\n representing top detected boxes in [y1, x1, y2, x2].\n nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]\n representing sorted confidence scores for detected boxes. The values are\n between [0, 1].\n nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]\n representing classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [batch_size] only the top\n `valid_detections` boxes are valid detections.\n '
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
(batch_size, _, num_classes_for_box, _) = boxes.get_shape().as_list()
if (batch_size is None):
batch_size = tf.shape(boxes)[0]
(_, total_anchors, num_classes) = scores.get_shape().as_list()
(scores, indices) = _select_top_k_scores(scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min((num_classes_for_box - 1), i), :]
scores_i = scores[:, :, i]
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
(boxes_i, scores_i) = box_ops.filter_boxes_by_scores(boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(tf.cast(scores_i, tf.float32), tf.cast(boxes_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
(nmsed_scores, indices) = tf.nn.top_k(nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(input_tensor=tf.cast(tf.greater(nmsed_scores, (- 1)), tf.int32), axis=1)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) | -413,168,451,629,743,200 | Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections. | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_v2 | 915067906/models | python | def _generate_detections_v2(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100):
'Generates the final detections given the model outputs.\n\n This implementation unrolls classes dimension while using the tf.while_loop\n to implement the batched NMS, so that it can be parallelized at the batch\n dimension. It should give better performance comparing to v1 implementation.\n It is TPU compatible.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n pre_nms_top_k: An `int` number of top candidate detections per class before\n NMS.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]\n representing top detected boxes in [y1, x1, y2, x2].\n nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]\n representing sorted confidence scores for detected boxes. The values are\n between [0, 1].\n nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]\n representing classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [batch_size] only the top\n `valid_detections` boxes are valid detections.\n '
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
(batch_size, _, num_classes_for_box, _) = boxes.get_shape().as_list()
if (batch_size is None):
batch_size = tf.shape(boxes)[0]
(_, total_anchors, num_classes) = scores.get_shape().as_list()
(scores, indices) = _select_top_k_scores(scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min((num_classes_for_box - 1), i), :]
scores_i = scores[:, :, i]
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
(boxes_i, scores_i) = box_ops.filter_boxes_by_scores(boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(tf.cast(scores_i, tf.float32), tf.cast(boxes_i, tf.float32), max_num_detections, iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
(nmsed_scores, indices) = tf.nn.top_k(nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(input_tensor=tf.cast(tf.greater(nmsed_scores, (- 1)), tf.int32), axis=1)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) |
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float, nms_iou_threshold: float, max_num_detections: int):
'Generates detected boxes with scores and classes for one-stage detector.\n\n The function takes output of multi-level ConvNets and anchor boxes and\n generates detected boxes. Note that this used batched nms, which is not\n supported on TPU currently.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]\n representing top detected boxes in [y1, x1, y2, x2].\n nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]\n representing sorted confidence scores for detected boxes. The values are\n between [0, 1].\n nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]\n representing classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [batch_size] only the top\n `valid_detections` boxes are valid detections.\n '
with tf.name_scope('generate_detections'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = tf.image.combined_non_max_suppression(boxes, scores, max_output_size_per_class=max_num_detections, max_total_size=max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_per_class=False, clip_boxes=False)
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) | 133,743,826,918,885,950 | Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections. | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_batched | 915067906/models | python | def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float, nms_iou_threshold: float, max_num_detections: int):
'Generates detected boxes with scores and classes for one-stage detector.\n\n The function takes output of multi-level ConvNets and anchor boxes and\n generates detected boxes. Note that this used batched nms, which is not\n supported on TPU currently.\n\n Args:\n boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or\n `[batch_size, N, 1, 4]`, which box predictions on all feature levels. The\n N is the number of total anchors on all levels.\n scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class probability on all feature levels. The N is the number of\n total anchors on all levels. The num_classes is the number of classes\n predicted by the model. Note that the class_outputs here is the raw score.\n pre_nms_score_threshold: A `float` representing the threshold for deciding\n when to remove boxes based on score.\n nms_iou_threshold: A `float` representing the threshold for deciding whether\n boxes overlap too much with respect to IOU.\n max_num_detections: A `scalar` representing maximum number of boxes retained\n over all classes.\n\n Returns:\n nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]\n representing top detected boxes in [y1, x1, y2, x2].\n nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]\n representing sorted confidence scores for detected boxes. The values are\n between [0, 1].\n nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]\n representing classes for detected boxes.\n valid_detections: An `int` tf.Tensor of shape [batch_size] only the top\n `valid_detections` boxes are valid detections.\n '
with tf.name_scope('generate_detections'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = tf.image.combined_non_max_suppression(boxes, scores, max_output_size_per_class=max_num_detections, max_total_size=max_num_detections, iou_threshold=nms_iou_threshold, score_threshold=pre_nms_score_threshold, pad_per_class=False, clip_boxes=False)
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return (nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) |
def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v2', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a detection generator.\n\n Args:\n apply_nms: A `bool` of whether or not apply non maximum suppression.\n If False, the decoded boxes and their scores are returned.\n pre_nms_top_k: An `int` of the number of top scores proposals to be kept\n before applying NMS.\n pre_nms_score_threshold: A `float` of the score threshold to apply before\n applying NMS. Proposals whose scores are below this threshold are\n thrown away.\n nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.\n max_num_detections: An `int` of the final number of total detections to\n generate.\n nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.\n use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n **kwargs: Additional keyword arguments passed to Layer.\n '
self._config_dict = {'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma}
super(DetectionGenerator, self).__init__(**kwargs) | -2,313,276,793,241,696,000 | Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer. | official/vision/beta/modeling/layers/detection_generator.py | __init__ | 915067906/models | python | def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v2', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a detection generator.\n\n Args:\n apply_nms: A `bool` of whether or not apply non maximum suppression.\n If False, the decoded boxes and their scores are returned.\n pre_nms_top_k: An `int` of the number of top scores proposals to be kept\n before applying NMS.\n pre_nms_score_threshold: A `float` of the score threshold to apply before\n applying NMS. Proposals whose scores are below this threshold are\n thrown away.\n nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.\n max_num_detections: An `int` of the final number of total detections to\n generate.\n nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.\n use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n **kwargs: Additional keyword arguments passed to Layer.\n '
self._config_dict = {'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma}
super(DetectionGenerator, self).__init__(**kwargs) |
def __call__(self, raw_boxes: tf.Tensor, raw_scores: tf.Tensor, anchor_boxes: tf.Tensor, image_shape: tf.Tensor, regression_weights: Optional[List[float]]=None, bbox_per_class: bool=True):
'Generates final detections.\n\n Args:\n raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`\n representing the class-specific box coordinates relative to anchors.\n raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`\n representing the class logits before applying score activiation.\n anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing\n the corresponding anchor boxes w.r.t `box_outputs`.\n image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image\n height and width w.r.t. the scaled image, i.e. the same image space as\n `box_outputs` and `anchor_boxes`.\n regression_weights: A list of four float numbers to scale coordinates.\n bbox_per_class: A `bool`. If True, perform per-class box regression.\n\n Returns:\n If `apply_nms` = True, the return is a dictionary with keys:\n `detection_boxes`: A `float` tf.Tensor of shape\n [batch, max_num_detections, 4] representing top detected boxes in\n [y1, x1, y2, x2].\n `detection_scores`: A `float` `tf.Tensor` of shape\n [batch, max_num_detections] representing sorted confidence scores for\n detected boxes. The values are between [0, 1].\n `detection_classes`: An `int` tf.Tensor of shape\n [batch, max_num_detections] representing classes for detected boxes.\n `num_detections`: An `int` tf.Tensor of shape [batch] only the first\n `num_detections` boxes are valid detections\n If `apply_nms` = False, the return is a dictionary with keys:\n `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]\n representing all the decoded boxes.\n `decoded_box_scores`: A `float` tf.Tensor of shape\n [batch, num_raw_boxes] representing socres of all the decoded boxes.\n '
box_scores = tf.nn.softmax(raw_scores, axis=(- 1))
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[(- 1)]
box_scores = tf.slice(box_scores, [0, 0, 1], [(- 1), (- 1), (- 1)])
if bbox_per_class:
num_detections = (num_locations * (num_classes - 1))
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [(- 1), (- 1), (- 1), (- 1)])
anchor_boxes = tf.tile(tf.expand_dims(anchor_boxes, axis=2), [1, 1, (num_classes - 1), 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
decoded_boxes = box_ops.decode_boxes(raw_boxes, anchor_boxes, weights=regression_weights)
decoded_boxes = box_ops.clip_boxes(decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(decoded_boxes, [batch_size, num_locations, (num_classes - 1), 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if (not self._config_dict['apply_nms']):
return {'decoded_boxes': decoded_boxes, 'decoded_box_scores': box_scores}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if (self._config_dict['nms_version'] == 'batched'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_batched(decoded_boxes, box_scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'])
elif (self._config_dict['nms_version'] == 'v1'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = _generate_detections_v1(decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'])
elif (self._config_dict['nms_version'] == 'v2'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_v2(decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'])
else:
raise ValueError('NMS version {} not supported.'.format(self._config_dict['nms_version']))
nmsed_classes += 1
return {'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores} | -3,888,970,305,849,663,000 | Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes. | official/vision/beta/modeling/layers/detection_generator.py | __call__ | 915067906/models | python | def __call__(self, raw_boxes: tf.Tensor, raw_scores: tf.Tensor, anchor_boxes: tf.Tensor, image_shape: tf.Tensor, regression_weights: Optional[List[float]]=None, bbox_per_class: bool=True):
'Generates final detections.\n\n Args:\n raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`\n representing the class-specific box coordinates relative to anchors.\n raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`\n representing the class logits before applying score activiation.\n anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing\n the corresponding anchor boxes w.r.t `box_outputs`.\n image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image\n height and width w.r.t. the scaled image, i.e. the same image space as\n `box_outputs` and `anchor_boxes`.\n regression_weights: A list of four float numbers to scale coordinates.\n bbox_per_class: A `bool`. If True, perform per-class box regression.\n\n Returns:\n If `apply_nms` = True, the return is a dictionary with keys:\n `detection_boxes`: A `float` tf.Tensor of shape\n [batch, max_num_detections, 4] representing top detected boxes in\n [y1, x1, y2, x2].\n `detection_scores`: A `float` `tf.Tensor` of shape\n [batch, max_num_detections] representing sorted confidence scores for\n detected boxes. The values are between [0, 1].\n `detection_classes`: An `int` tf.Tensor of shape\n [batch, max_num_detections] representing classes for detected boxes.\n `num_detections`: An `int` tf.Tensor of shape [batch] only the first\n `num_detections` boxes are valid detections\n If `apply_nms` = False, the return is a dictionary with keys:\n `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]\n representing all the decoded boxes.\n `decoded_box_scores`: A `float` tf.Tensor of shape\n [batch, num_raw_boxes] representing socres of all the decoded boxes.\n '
box_scores = tf.nn.softmax(raw_scores, axis=(- 1))
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[(- 1)]
box_scores = tf.slice(box_scores, [0, 0, 1], [(- 1), (- 1), (- 1)])
if bbox_per_class:
num_detections = (num_locations * (num_classes - 1))
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [(- 1), (- 1), (- 1), (- 1)])
anchor_boxes = tf.tile(tf.expand_dims(anchor_boxes, axis=2), [1, 1, (num_classes - 1), 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
decoded_boxes = box_ops.decode_boxes(raw_boxes, anchor_boxes, weights=regression_weights)
decoded_boxes = box_ops.clip_boxes(decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(decoded_boxes, [batch_size, num_locations, (num_classes - 1), 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if (not self._config_dict['apply_nms']):
return {'decoded_boxes': decoded_boxes, 'decoded_box_scores': box_scores}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if (self._config_dict['nms_version'] == 'batched'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_batched(decoded_boxes, box_scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'])
elif (self._config_dict['nms_version'] == 'v1'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = _generate_detections_v1(decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'])
elif (self._config_dict['nms_version'] == 'v2'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_v2(decoded_boxes, box_scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'])
else:
raise ValueError('NMS version {} not supported.'.format(self._config_dict['nms_version']))
nmsed_classes += 1
return {'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores} |
def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v1', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a multi-level detection generator.\n\n Args:\n apply_nms: A `bool` of whether or not apply non maximum suppression. If\n False, the decoded boxes and their scores are returned.\n pre_nms_top_k: An `int` of the number of top scores proposals to be kept\n before applying NMS.\n pre_nms_score_threshold: A `float` of the score threshold to apply before\n applying NMS. Proposals whose scores are below this threshold are thrown\n away.\n nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.\n max_num_detections: An `int` of the final number of total detections to\n generate.\n nms_version: A string of `batched`, `v1` or `v2` specifies NMS version\n use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n **kwargs: Additional keyword arguments passed to Layer.\n '
self._config_dict = {'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma}
super(MultilevelDetectionGenerator, self).__init__(**kwargs) | 3,301,950,544,126,950,400 | Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer. | official/vision/beta/modeling/layers/detection_generator.py | __init__ | 915067906/models | python | def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v1', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a multi-level detection generator.\n\n Args:\n apply_nms: A `bool` of whether or not apply non maximum suppression. If\n False, the decoded boxes and their scores are returned.\n pre_nms_top_k: An `int` of the number of top scores proposals to be kept\n before applying NMS.\n pre_nms_score_threshold: A `float` of the score threshold to apply before\n applying NMS. Proposals whose scores are below this threshold are thrown\n away.\n nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.\n max_num_detections: An `int` of the final number of total detections to\n generate.\n nms_version: A string of `batched`, `v1` or `v2` specifies NMS version\n use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.\n soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.\n When soft_nms_sigma=0.0, we fall back to standard NMS.\n **kwargs: Additional keyword arguments passed to Layer.\n '
self._config_dict = {'apply_nms': apply_nms, 'pre_nms_top_k': pre_nms_top_k, 'pre_nms_score_threshold': pre_nms_score_threshold, 'nms_iou_threshold': nms_iou_threshold, 'max_num_detections': max_num_detections, 'nms_version': nms_version, 'use_cpu_nms': use_cpu_nms, 'soft_nms_sigma': soft_nms_sigma}
super(MultilevelDetectionGenerator, self).__init__(**kwargs) |
def _decode_multilevel_outputs(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Collects dict of multilevel boxes, scores, attributes into lists.'
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, (max_level + 1)):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i, num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = (feature_h_i * feature_w_i)
num_anchors_per_locations = (num_anchors_per_locations_times_4 // 4)
num_classes = (raw_scores_i.get_shape().as_list()[(- 1)] // num_anchors_per_locations)
scores_i = tf.sigmoid(tf.reshape(raw_scores_i, [batch_size, (num_locations * num_anchors_per_locations), num_classes]))
scores_i = tf.slice(scores_i, [0, 0, 1], [(- 1), (- 1), (- 1)])
anchor_boxes_i = tf.reshape(anchor_boxes[str(i)], [batch_size, (num_locations * num_anchors_per_locations), 4])
raw_boxes_i = tf.reshape(raw_boxes_i, [batch_size, (num_locations * num_anchors_per_locations), 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
boxes_i = box_ops.clip_boxes(boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for (att_name, raw_att) in raw_attributes.items():
attribute_size = (raw_att[str(i)].get_shape().as_list()[(- 1)] // num_anchors_per_locations)
att_i = tf.reshape(raw_att[str(i)], [batch_size, (num_locations * num_anchors_per_locations), attribute_size])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return (boxes, scores, attributes) | 6,158,989,498,364,521,000 | Collects dict of multilevel boxes, scores, attributes into lists. | official/vision/beta/modeling/layers/detection_generator.py | _decode_multilevel_outputs | 915067906/models | python | def _decode_multilevel_outputs(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, (max_level + 1)):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i, num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = (feature_h_i * feature_w_i)
num_anchors_per_locations = (num_anchors_per_locations_times_4 // 4)
num_classes = (raw_scores_i.get_shape().as_list()[(- 1)] // num_anchors_per_locations)
scores_i = tf.sigmoid(tf.reshape(raw_scores_i, [batch_size, (num_locations * num_anchors_per_locations), num_classes]))
scores_i = tf.slice(scores_i, [0, 0, 1], [(- 1), (- 1), (- 1)])
anchor_boxes_i = tf.reshape(anchor_boxes[str(i)], [batch_size, (num_locations * num_anchors_per_locations), 4])
raw_boxes_i = tf.reshape(raw_boxes_i, [batch_size, (num_locations * num_anchors_per_locations), 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
boxes_i = box_ops.clip_boxes(boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for (att_name, raw_att) in raw_attributes.items():
attribute_size = (raw_att[str(i)].get_shape().as_list()[(- 1)] // num_anchors_per_locations)
att_i = tf.reshape(raw_att[str(i)], [batch_size, (num_locations * num_anchors_per_locations), attribute_size])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return (boxes, scores, attributes) |
def __call__(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Generates final detections.\n\n Args:\n raw_boxes: A `dict` with keys representing FPN levels and values\n representing box tenors of shape `[batch, feature_h, feature_w,\n num_anchors * 4]`.\n raw_scores: A `dict` with keys representing FPN levels and values\n representing logit tensors of shape `[batch, feature_h, feature_w,\n num_anchors]`.\n anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing\n the corresponding anchor boxes w.r.t `box_outputs`.\n image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image\n height and width w.r.t. the scaled image, i.e. the same image space as\n `box_outputs` and `anchor_boxes`.\n raw_attributes: If not None, a `dict` of (attribute_name,\n attribute_prediction) pairs. `attribute_prediction` is a dict that\n contains keys representing FPN levels and values representing tenors of\n shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.\n\n Returns:\n If `apply_nms` = True, the return is a dictionary with keys:\n `detection_boxes`: A `float` tf.Tensor of shape\n [batch, max_num_detections, 4] representing top detected boxes in\n [y1, x1, y2, x2].\n `detection_scores`: A `float` tf.Tensor of shape\n [batch, max_num_detections] representing sorted confidence scores for\n detected boxes. The values are between [0, 1].\n `detection_classes`: An `int` tf.Tensor of shape\n [batch, max_num_detections] representing classes for detected boxes.\n `num_detections`: An `int` tf.Tensor of shape [batch] only the first\n `num_detections` boxes are valid detections\n `detection_attributes`: A dict. Values of the dict is a `float`\n tf.Tensor of shape [batch, max_num_detections, attribute_size]\n representing attribute predictions for detected boxes.\n If `apply_nms` = False, the return is a dictionary with keys:\n `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]\n representing all the decoded boxes.\n `decoded_box_scores`: A `float` tf.Tensor of shape\n [batch, num_raw_boxes] representing socres of all the decoded boxes.\n `decoded_box_attributes`: A dict. Values in the dict is a\n `float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]\n representing attribute predictions of all the decoded boxes.\n '
(boxes, scores, attributes) = self._decode_multilevel_outputs(raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if (not self._config_dict['apply_nms']):
return {'decoded_boxes': boxes, 'decoded_box_scores': scores, 'decoded_box_attributes': attributes}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if (raw_attributes and (self._config_dict['nms_version'] != 'v1')):
raise ValueError('Attribute learning is only supported for NMSv1 but NMS {} is used.'.format(self._config_dict['nms_version']))
if (self._config_dict['nms_version'] == 'batched'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_batched(boxes, scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'])
nmsed_attributes = {}
elif (self._config_dict['nms_version'] == 'v1'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) = _generate_detections_v1(boxes, scores, attributes=(attributes if raw_attributes else None), pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'])
elif (self._config_dict['nms_version'] == 'v2'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_v2(boxes, scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'])
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(self._config_dict['nms_version']))
nmsed_classes += 1
return {'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores, 'detection_attributes': nmsed_attributes} | 2,912,919,324,711,744,500 | Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes. | official/vision/beta/modeling/layers/detection_generator.py | __call__ | 915067906/models | python | def __call__(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Generates final detections.\n\n Args:\n raw_boxes: A `dict` with keys representing FPN levels and values\n representing box tenors of shape `[batch, feature_h, feature_w,\n num_anchors * 4]`.\n raw_scores: A `dict` with keys representing FPN levels and values\n representing logit tensors of shape `[batch, feature_h, feature_w,\n num_anchors]`.\n anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing\n the corresponding anchor boxes w.r.t `box_outputs`.\n image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image\n height and width w.r.t. the scaled image, i.e. the same image space as\n `box_outputs` and `anchor_boxes`.\n raw_attributes: If not None, a `dict` of (attribute_name,\n attribute_prediction) pairs. `attribute_prediction` is a dict that\n contains keys representing FPN levels and values representing tenors of\n shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.\n\n Returns:\n If `apply_nms` = True, the return is a dictionary with keys:\n `detection_boxes`: A `float` tf.Tensor of shape\n [batch, max_num_detections, 4] representing top detected boxes in\n [y1, x1, y2, x2].\n `detection_scores`: A `float` tf.Tensor of shape\n [batch, max_num_detections] representing sorted confidence scores for\n detected boxes. The values are between [0, 1].\n `detection_classes`: An `int` tf.Tensor of shape\n [batch, max_num_detections] representing classes for detected boxes.\n `num_detections`: An `int` tf.Tensor of shape [batch] only the first\n `num_detections` boxes are valid detections\n `detection_attributes`: A dict. Values of the dict is a `float`\n tf.Tensor of shape [batch, max_num_detections, attribute_size]\n representing attribute predictions for detected boxes.\n If `apply_nms` = False, the return is a dictionary with keys:\n `decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]\n representing all the decoded boxes.\n `decoded_box_scores`: A `float` tf.Tensor of shape\n [batch, num_raw_boxes] representing socres of all the decoded boxes.\n `decoded_box_attributes`: A dict. Values in the dict is a\n `float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]\n representing attribute predictions of all the decoded boxes.\n '
(boxes, scores, attributes) = self._decode_multilevel_outputs(raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if (not self._config_dict['apply_nms']):
return {'decoded_boxes': boxes, 'decoded_box_scores': scores, 'decoded_box_attributes': attributes}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if (raw_attributes and (self._config_dict['nms_version'] != 'v1')):
raise ValueError('Attribute learning is only supported for NMSv1 but NMS {} is used.'.format(self._config_dict['nms_version']))
if (self._config_dict['nms_version'] == 'batched'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_batched(boxes, scores, self._config_dict['pre_nms_score_threshold'], self._config_dict['nms_iou_threshold'], self._config_dict['max_num_detections'])
nmsed_attributes = {}
elif (self._config_dict['nms_version'] == 'v1'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes) = _generate_detections_v1(boxes, scores, attributes=(attributes if raw_attributes else None), pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'], soft_nms_sigma=self._config_dict['soft_nms_sigma'])
elif (self._config_dict['nms_version'] == 'v2'):
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = _generate_detections_v2(boxes, scores, pre_nms_top_k=self._config_dict['pre_nms_top_k'], pre_nms_score_threshold=self._config_dict['pre_nms_score_threshold'], nms_iou_threshold=self._config_dict['nms_iou_threshold'], max_num_detections=self._config_dict['max_num_detections'])
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(self._config_dict['nms_version']))
nmsed_classes += 1
return {'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_scores': nmsed_scores, 'detection_attributes': nmsed_attributes} |
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
'Check if it is a valid MacOS plist file name.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n plist_name (str): name of the plist.\n top_level (dict[str, object]): plist top-level key.\n '
super(LaunchdPlugin, self).Process(parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level) | -4,250,799,335,899,952,600 | Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key. | plaso/parsers/plist_plugins/launchd.py | Process | ddm1004/plaso | python | def Process(self, parser_mediator, plist_name, top_level, **kwargs):
'Check if it is a valid MacOS plist file name.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n plist_name (str): name of the plist.\n top_level (dict[str, object]): plist top-level key.\n '
super(LaunchdPlugin, self).Process(parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level) |
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
'Extracts launchd information from the plist.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.\n '
label = top_level.get('Label')
command = top_level.get('Program', '')
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += (' %s' % argument)
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Launchd service config {0:s} points to {1:s} with user:{2:s} group:{3:s}'.format(label, command, user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data) | -2,750,604,028,025,562,000 | Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | plaso/parsers/plist_plugins/launchd.py | GetEntries | ddm1004/plaso | python | def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
'Extracts launchd information from the plist.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.\n '
label = top_level.get('Label')
command = top_level.get('Program', )
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += (' %s' % argument)
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Launchd service config {0:s} points to {1:s} with user:{2:s} group:{3:s}'.format(label, command, user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def _write_value(self, value):
'Sets motor value between [-1, 1]'
if (abs(value) <= 0.05):
value = 0.0
mapped_value = int((1300.0 * ((self.alpha * value) + self.beta)))
speed = min(max(mapped_value, (- 1300)), 1300)
self._motor.setSpeed(speed) | -5,645,086,418,558,018,000 | Sets motor value between [-1, 1] | jetbot/motor.py | _write_value | vstoneofficial/jetbot-mecanum | python | def _write_value(self, value):
if (abs(value) <= 0.05):
value = 0.0
mapped_value = int((1300.0 * ((self.alpha * value) + self.beta)))
speed = min(max(mapped_value, (- 1300)), 1300)
self._motor.setSpeed(speed) |
def _release(self):
'Stops motor by releasing control'
self._motor.setSpeed(0) | -3,456,137,181,658,730,500 | Stops motor by releasing control | jetbot/motor.py | _release | vstoneofficial/jetbot-mecanum | python | def _release(self):
self._motor.setSpeed(0) |
def index():
" Module's Home Page "
try:
module_name = settings.modules[module].name_nice
except:
module_name = T('Disaster Victim Identification')
table = s3db.dvi_body
total = db((table.deleted == False)).count()
itable = s3db.dvi_identification
query = ((((table.deleted == False) & (itable.pe_id == table.pe_id)) & (itable.deleted == False)) & (itable.status == 3))
identified = db(query).count()
status = [[str(T('identified')), int(identified)], [str(T('unidentified')), int((total - identified))]]
response.title = module_name
return dict(module_name=module_name, total=total, status=json.dumps(status)) | -8,932,594,271,398,314,000 | Module's Home Page | controllers/dvi.py | index | andygimma/eden | python | def index():
" "
try:
module_name = settings.modules[module].name_nice
except:
module_name = T('Disaster Victim Identification')
table = s3db.dvi_body
total = db((table.deleted == False)).count()
itable = s3db.dvi_identification
query = ((((table.deleted == False) & (itable.pe_id == table.pe_id)) & (itable.deleted == False)) & (itable.status == 3))
identified = db(query).count()
status = [[str(T('identified')), int(identified)], [str(T('unidentified')), int((total - identified))]]
response.title = module_name
return dict(module_name=module_name, total=total, status=json.dumps(status)) |
def recreq():
' Recovery Requests List '
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if (r.interactive and (not r.record)):
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output | 3,506,701,886,621,570,000 | Recovery Requests List | controllers/dvi.py | recreq | andygimma/eden | python | def recreq():
' '
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if (r.interactive and (not r.record)):
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output |
def morgue():
' Morgue Registry '
morgue_tabs = [(T('Morgue Details'), ''), (T('Bodies'), 'body')]
rheader = S3ResourceHeader([[(T('Morgue'), 'name')]], tabs=morgue_tabs)
def prep(r):
s3db.gis_location_filter(r)
if (r.interactive and r.id and (not r.component)):
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output | -1,926,710,972,500,599,600 | Morgue Registry | controllers/dvi.py | morgue | andygimma/eden | python | def morgue():
' '
morgue_tabs = [(T('Morgue Details'), ), (T('Bodies'), 'body')]
rheader = S3ResourceHeader([[(T('Morgue'), 'name')]], tabs=morgue_tabs)
def prep(r):
s3db.gis_location_filter(r)
if (r.interactive and r.id and (not r.component)):
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output |
def body():
' Dead Bodies Registry '
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T('unknown')
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get('status', None)
if (status == 'unidentified'):
query = ((itable.deleted == False) & (itable.status == 3))
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~ btable.pe_id.belongs(ids))
s3db.configure('dvi_body', main='pe_label', extra='gender')
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T('Recovery'), ''), (T('Checklist'), 'checklist'), (T('Images'), 'image'), (T('Physical Description'), 'physical_description'), (T('Effects Inventory'), 'effects'), (T('Journal'), 'note'), (T('Identification'), 'identification')]
rheader = S3ResourceHeader([[(T('ID Tag Number'), 'pe_label')], ['gender'], ['age_group']], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output | 4,108,203,930,432,968,000 | Dead Bodies Registry | controllers/dvi.py | body | andygimma/eden | python | def body():
' '
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T('unknown')
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get('status', None)
if (status == 'unidentified'):
query = ((itable.deleted == False) & (itable.status == 3))
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~ btable.pe_id.belongs(ids))
s3db.configure('dvi_body', main='pe_label', extra='gender')
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T('Recovery'), ), (T('Checklist'), 'checklist'), (T('Images'), 'image'), (T('Physical Description'), 'physical_description'), (T('Effects Inventory'), 'effects'), (T('Journal'), 'note'), (T('Identification'), 'identification')]
rheader = S3ResourceHeader([[(T('ID Tag Number'), 'pe_label')], ['gender'], ['age_group']], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output |
def person():
' Missing Persons Registry (Match Finder) '
table = s3db.pr_person
s3.crud_strings['pr_person'].update(title_display=T('Missing Person Details'), title_list=T('Missing Persons'), label_list_button=T('List Missing Persons'), msg_list_empty=T('No Persons found'), msg_no_match=T('No Persons currently reported missing'))
s3db.configure('pr_group_membership', list_fields=['id', 'group_id', 'group_head', 'description'])
s3db.configure('pr_person', listadd=False, editable=False, deletable=False, list_fields=['id', 'first_name', 'middle_name', 'last_name', 'picture', 'gender', 'age_group'])
def prep(r):
if ((not r.id) and (not r.method) and (not r.component)):
body_id = r.get_vars.get('match', None)
body = db((db.dvi_body.id == body_id)).select(db.dvi_body.pe_label, limitby=(0, 1)).first()
label = ((body and body.pe_label) or ('#%s' % body_id))
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings['pr_person'].update(msg_no_match=T('No matching records found'))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
if (len(request.args) == 0):
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [(T('Missing Report'), 'missing_report'), (T('Person Details'), None), (T('Physical Description'), 'physical_description'), (T('Images'), 'image'), (T('Identity'), 'identity'), (T('Address'), 'address'), (T('Contact Data'), 'contact'), (T('Journal'), 'note')]
rheader = (lambda r: s3db.pr_rheader(r, tabs=mpr_tabs))
output = s3_rest_controller('pr', 'person', main='first_name', extra='last_name', rheader=rheader)
return output | 7,416,614,646,194,750,000 | Missing Persons Registry (Match Finder) | controllers/dvi.py | person | andygimma/eden | python | def person():
' '
table = s3db.pr_person
s3.crud_strings['pr_person'].update(title_display=T('Missing Person Details'), title_list=T('Missing Persons'), label_list_button=T('List Missing Persons'), msg_list_empty=T('No Persons found'), msg_no_match=T('No Persons currently reported missing'))
s3db.configure('pr_group_membership', list_fields=['id', 'group_id', 'group_head', 'description'])
s3db.configure('pr_person', listadd=False, editable=False, deletable=False, list_fields=['id', 'first_name', 'middle_name', 'last_name', 'picture', 'gender', 'age_group'])
def prep(r):
if ((not r.id) and (not r.method) and (not r.component)):
body_id = r.get_vars.get('match', None)
body = db((db.dvi_body.id == body_id)).select(db.dvi_body.pe_label, limitby=(0, 1)).first()
label = ((body and body.pe_label) or ('#%s' % body_id))
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings['pr_person'].update(msg_no_match=T('No matching records found'))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
if (len(request.args) == 0):
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [(T('Missing Report'), 'missing_report'), (T('Person Details'), None), (T('Physical Description'), 'physical_description'), (T('Images'), 'image'), (T('Identity'), 'identity'), (T('Address'), 'address'), (T('Contact Data'), 'contact'), (T('Journal'), 'note')]
rheader = (lambda r: s3db.pr_rheader(r, tabs=mpr_tabs))
output = s3_rest_controller('pr', 'person', main='first_name', extra='last_name', rheader=rheader)
return output |
def dvi_match_query(body_id):
'\n Get a query for candidate matches between the missing\n persons registry and a dead body\n\n @param body_id: the dvi_body record ID\n '
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((((ptable.deleted == False) & (ptable.missing == True)) & (ntable.pe_id == ptable.pe_id)) & (ntable.status == 1))
body = btable[body_id]
if (not body):
return query
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) | (ntable.timestmp == None))
query = (query & q)
if (body.age_group and (body.age_group != 1)):
q = (((ptable.age_group == None) | (ptable.age_group == 1)) | (ptable.age_group == body.age_group))
query = (query & q)
if (body.gender and (body.gender != 1)):
q = (((ptable.gender == None) | (ptable.gender == 1)) | (ptable.gender == body.gender))
return query | -398,216,051,305,224,960 | Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID | controllers/dvi.py | dvi_match_query | andygimma/eden | python | def dvi_match_query(body_id):
'\n Get a query for candidate matches between the missing\n persons registry and a dead body\n\n @param body_id: the dvi_body record ID\n '
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((((ptable.deleted == False) & (ptable.missing == True)) & (ntable.pe_id == ptable.pe_id)) & (ntable.status == 1))
body = btable[body_id]
if (not body):
return query
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) | (ntable.timestmp == None))
query = (query & q)
if (body.age_group and (body.age_group != 1)):
q = (((ptable.age_group == None) | (ptable.age_group == 1)) | (ptable.age_group == body.age_group))
query = (query & q)
if (body.gender and (body.gender != 1)):
q = (((ptable.gender == None) | (ptable.gender == 1)) | (ptable.gender == body.gender))
return query |
def tooltip():
' Ajax Tooltips '
formfield = request.vars.get('formfield', None)
if formfield:
response.view = ('pr/ajaxtips/%s.html' % formfield)
return dict() | 1,023,201,173,793,866,200 | Ajax Tooltips | controllers/dvi.py | tooltip | andygimma/eden | python | def tooltip():
' '
formfield = request.vars.get('formfield', None)
if formfield:
response.view = ('pr/ajaxtips/%s.html' % formfield)
return dict() |
def _get_example_figures():
'Create two example figures.'
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2] | 6,730,110,490,721,703,000 | Create two example figures. | mne/tests/test_report.py | _get_example_figures | NataKozh/mne-python | python | def _get_example_figures():
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2] |
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
'Test rendering -*.fif files for mne report.'
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for (a, b) in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, (- 0.2), 0.2)
epochs.save(epochs_fname, overwrite=True)
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != (- 1))
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert op.isfile(fname)
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert ('(MaxShield on)' in html)
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert op.isfile(op.join(tempdir, 'report2.html'))
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True)
assert op.isfile(op.join(tempdir, 'report.html'))
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert repr(report)
fnames = (glob.glob(op.join(tempdir, '*.raw')) + glob.glob(op.join(tempdir, '*.raw')))
for fname in fnames:
assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != (- 1))
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section') | 3,057,919,541,365,986,000 | Test rendering -*.fif files for mne report. | mne/tests/test_report.py | test_render_report | NataKozh/mne-python | python | @pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for (a, b) in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, (- 0.2), 0.2)
epochs.save(epochs_fname, overwrite=True)
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
assert (.join(report.html).find(op.basename(fname)) != (- 1))
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert op.isfile(fname)
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert ('(MaxShield on)' in html)
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert op.isfile(op.join(tempdir, 'report2.html'))
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True)
assert op.isfile(op.join(tempdir, 'report.html'))
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert repr(report)
fnames = (glob.glob(op.join(tempdir, '*.raw')) + glob.glob(op.join(tempdir, '*.raw')))
for fname in fnames:
assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
assert (.join(report.html).find(op.basename(fname)) != (- 1))
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section') |
@testing.requires_testing_data
def test_report_raw_psd_and_date():
'Test report raw PSD and DATE_NONE functionality.'
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.0).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise')
assert isinstance(report.html, list)
assert ('PSD' in ''.join(report.html))
assert ('GMT' in ''.join(report.html))
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise')
assert isinstance(report.html, list)
assert ('GMT' not in ''.join(report.html)) | -7,349,168,060,396,265,000 | Test report raw PSD and DATE_NONE functionality. | mne/tests/test_report.py | test_report_raw_psd_and_date | NataKozh/mne-python | python | @testing.requires_testing_data
def test_report_raw_psd_and_date():
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.0).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise')
assert isinstance(report.html, list)
assert ('PSD' in .join(report.html))
assert ('GMT' in .join(report.html))
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise')
assert isinstance(report.html, list)
assert ('GMT' not in .join(report.html)) |
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
'Test adding figures/images to section.'
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, captions=['evoked response'], scale=1.2, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig], captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=1e-10, image_format='svg')
fig = plt.plot([1, 2], [1, 2])[0].figure
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname], captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname], captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section, fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section, fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=((- 0.2), 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample', subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, captions='random image', scale=1.2)
assert repr(report) | -1,994,883,436,208,848,600 | Test adding figures/images to section. | mne/tests/test_report.py | test_render_add_sections | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, captions=['evoked response'], scale=1.2, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig], captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig, captions=['foo'], scale=1e-10, image_format='svg')
fig = plt.plot([1, 2], [1, 2])[0].figure
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname], captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname], captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section, fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section, fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=((- 0.2), 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample', subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, captions='random image', scale=1.2)
assert repr(report) |
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
'Test rendering MRI for mne report.'
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for (a, b) in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo', subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) | -644,899,421,329,662,000 | Test rendering MRI for mne report. | mne/tests/test_report.py | test_render_mri | NataKozh/mne-python | python | @pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for (a, b) in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo', subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) |
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
'Test rendering MRI without BEM for mne report.'
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False) | 7,342,466,567,199,111,000 | Test rendering MRI without BEM for mne report. | mne/tests/test_report.py | test_render_mri_without_bem | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False) |
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
'Test adding html str to mne report.'
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
(caption, section) = ('html', 'html_section')
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index(('report_' + section))
html_compare = report.html[idx]
assert (html in html_compare)
assert repr(report) | -7,173,137,651,665,221,000 | Test adding html str to mne report. | mne/tests/test_report.py | test_add_htmls_to_section | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
(caption, section) = ('html', 'html_section')
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index(('report_' + section))
html_compare = report.html[idx]
assert (html in html_compare)
assert repr(report) |
def test_add_slider_to_section():
'Test adding a slider with a series of images to mne report.'
tempdir = _TempDir()
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert (report.fnames[0] == 'my title-#-report_slider_section-#-custom')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section, [figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
report = Report()
(fig, ax) = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section(([fig] * 2), image_format='svg') | 890,989,121,652,424,200 | Test adding a slider with a series of images to mne report. | mne/tests/test_report.py | test_add_slider_to_section | NataKozh/mne-python | python | def test_add_slider_to_section():
tempdir = _TempDir()
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert (report.fnames[0] == 'my title-#-report_slider_section-#-custom')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section, [figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
report = Report()
(fig, ax) = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section(([fig] * 2), image_format='svg') |
def test_validate_input():
'Test Report input validation.'
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.', 'Second letter of the alphabet', 'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:(- 1)], section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section, comments=comments[:(- 1)])
values = report._validate_input(items, captions, section, comments=None)
(items_new, captions_new, comments_new) = values
assert_equal(len(comments_new), len(items)) | 7,231,881,813,550,490,000 | Test Report input validation. | mne/tests/test_report.py | test_validate_input | NataKozh/mne-python | python | def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.', 'Second letter of the alphabet', 'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:(- 1)], section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section, comments=comments[:(- 1)])
values = report._validate_input(items, captions, section, comments=None)
(items_new, captions_new, comments_new) = values
assert_equal(len(comments_new), len(items)) |
@requires_h5py
def test_open_report():
'Test the open_report function.'
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert (report.subjects_dir == subjects_dir)
assert (report._fname == hdf5)
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
assert op.exists(hdf5)
report2 = open_report(hdf5)
assert (report2._fname == hdf5)
assert (report2.subjects_dir == report.subjects_dir)
assert (report2.html == report.html)
assert (report2.__getstate__() == report.__getstate__())
assert ('_fname' not in report2.__getstate__())
pytest.raises(ValueError, open_report, hdf5, foo='bar')
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir)
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
(1 / 0) | -46,408,802,336,081,170 | Test the open_report function. | mne/tests/test_report.py | test_open_report | NataKozh/mne-python | python | @requires_h5py
def test_open_report():
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert (report.subjects_dir == subjects_dir)
assert (report._fname == hdf5)
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
assert op.exists(hdf5)
report2 = open_report(hdf5)
assert (report2._fname == hdf5)
assert (report2.subjects_dir == report.subjects_dir)
assert (report2.html == report.html)
assert (report2.__getstate__() == report.__getstate__())
assert ('_fname' not in report2.__getstate__())
pytest.raises(ValueError, open_report, hdf5, foo='bar')
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir)
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
(1 / 0) |
def test_remove():
'Test removing figures from a report.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1', section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert (removed_index == 2)
assert (len(r2.html) == 3)
assert (r2.html[0] == r.html[0])
assert (r2.html[1] == r.html[1])
assert (r2.html[2] == r.html[3])
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert (removed_index == 1)
assert (len(r2.html) == 3)
assert (r2.html[0] == r.html[0])
assert (r2.html[1] == r.html[2])
assert (r2.html[2] == r.html[3])
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert (r2.sections == ['mysection'])
assert (r2._sectionvars == {'mysection': 'report_mysection'}) | 66,209,351,956,649,530 | Test removing figures from a report. | mne/tests/test_report.py | test_remove | NataKozh/mne-python | python | def test_remove():
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1', section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert (removed_index == 2)
assert (len(r2.html) == 3)
assert (r2.html[0] == r.html[0])
assert (r2.html[1] == r.html[1])
assert (r2.html[2] == r.html[3])
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert (removed_index == 1)
assert (len(r2.html) == 3)
assert (r2.html[0] == r.html[0])
assert (r2.html[1] == r.html[2])
assert (r2.html[2] == r.html[3])
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert (r2.sections == ['mysection'])
assert (r2._sectionvars == {'mysection': 'report_mysection'}) |
def test_add_or_replace():
'Test replacing existing figures in a report.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
assert (len(r.html) == 4)
old_r = copy.deepcopy(r)
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert (len(r.html) == 4)
assert (r.html[1] != old_r.html[1])
assert (r.html[0] == old_r.html[0])
assert (r.html[2] == old_r.html[2])
assert (r.html[3] == old_r.html[3]) | -6,487,673,268,119,802,000 | Test replacing existing figures in a report. | mne/tests/test_report.py | test_add_or_replace | NataKozh/mne-python | python | def test_add_or_replace():
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
assert (len(r.html) == 4)
old_r = copy.deepcopy(r)
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert (len(r.html) == 4)
assert (r.html[1] != old_r.html[1])
assert (r.html[0] == old_r.html[0])
assert (r.html[2] == old_r.html[2])
assert (r.html[3] == old_r.html[3]) |
def test_scraper(tmpdir):
'Test report scraping.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
app = Bunch(builder=Bunch(srcdir=str(tmpdir), outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images', 'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]), example_globals=dict(a=1), target_file=target_file)
block = None
rst = scraper(block, block_vars, gallery_conf)
assert (rst == '')
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
assert (rst == '')
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert (not op.isfile(out_html))
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert (rst.count('"') == 6)
assert ('<iframe' in rst)
assert op.isfile(img_fname.replace('png', 'svg')) | 3,745,007,383,396,112,000 | Test report scraping. | mne/tests/test_report.py | test_scraper | NataKozh/mne-python | python | def test_scraper(tmpdir):
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
app = Bunch(builder=Bunch(srcdir=str(tmpdir), outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images', 'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]), example_globals=dict(a=1), target_file=target_file)
block = None
rst = scraper(block, block_vars, gallery_conf)
assert (rst == )
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
assert (rst == )
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert (not op.isfile(out_html))
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert (rst.count('"') == 6)
assert ('<iframe' in rst)
assert op.isfile(img_fname.replace('png', 'svg')) |
def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None):
'IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_ref = None
self._data_secret_name = None
self.discriminator = None
if (config_ref is not None):
self.config_ref = config_ref
if (data_secret_name is not None):
self.data_secret_name = data_secret_name | -7,458,339,454,898,926,000 | IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __init__ | mariusgheorghies/python | python | def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_ref = None
self._data_secret_name = None
self.discriminator = None
if (config_ref is not None):
self.config_ref = config_ref
if (data_secret_name is not None):
self.data_secret_name = data_secret_name |
@property
def config_ref(self):
'Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n\n :return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
return self._config_ref | -6,719,715,699,018,144,000 | Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | config_ref | mariusgheorghies/python | python | @property
def config_ref(self):
'Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n\n :return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
return self._config_ref |
@config_ref.setter
def config_ref(self, config_ref):
'Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n\n :param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
self._config_ref = config_ref | 4,942,017,845,807,972,000 | Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
:param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | config_ref | mariusgheorghies/python | python | @config_ref.setter
def config_ref(self, config_ref):
'Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n\n :param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
self._config_ref = config_ref |
@property
def data_secret_name(self):
'Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: str\n '
return self._data_secret_name | -2,117,952,424,190,382,300 | Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: str | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | data_secret_name | mariusgheorghies/python | python | @property
def data_secret_name(self):
'Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: str\n '
return self._data_secret_name |
@data_secret_name.setter
def data_secret_name(self, data_secret_name):
'Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: str\n '
self._data_secret_name = data_secret_name | 60,570,540,045,614,510 | Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: str | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | data_secret_name | mariusgheorghies/python | python | @data_secret_name.setter
def data_secret_name(self, data_secret_name):
'Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: str\n '
self._data_secret_name = data_secret_name |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | 8,442,519,487,048,767,000 | Returns the model properties as a dict | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | to_dict | mariusgheorghies/python | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | to_str | mariusgheorghies/python | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __repr__ | mariusgheorghies/python | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return False
return (self.to_dict() == other.to_dict()) | 7,289,287,128,892,294,000 | Returns true if both objects are equal | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __eq__ | mariusgheorghies/python | python | def __eq__(self, other):
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return True
return (self.to_dict() != other.to_dict()) | -403,530,282,509,678,700 | Returns true if both objects are not equal | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __ne__ | mariusgheorghies/python | python | def __ne__(self, other):
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return True
return (self.to_dict() != other.to_dict()) |
def register_wrapper(wrapper, cls_or_obj):
'register_wrapper\n\n :param wrapper: A wrapper of all kinds of providers\n :param cls_or_obj: A class or class name or object instance in data/data.py\n '
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = (cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj)
wrapper.register(obj) | -9,188,843,668,770,969,000 | register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py | qlib/data/data.py | register_wrapper | Tirbo06/qlib | python | def register_wrapper(wrapper, cls_or_obj):
'register_wrapper\n\n :param wrapper: A wrapper of all kinds of providers\n :param cls_or_obj: A class or class name or object instance in data/data.py\n '
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = (cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj)
wrapper.register(obj) |
def register_all_wrappers():
'register_all_wrappers'
logger = get_module_logger('data')
_calendar_provider = get_provider_obj(C.calendar_provider)
if (getattr(C, 'calendar_cache', None) is not None):
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f'registering Cal {C.calendar_provider}-{C.calenar_cache}')
register_wrapper(Inst, C.instrument_provider)
logger.debug(f'registering Inst {C.instrument_provider}')
if (getattr(C, 'feature_provider', None) is not None):
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f'registering FeatureD {C.feature_provider}')
if (getattr(C, 'expression_provider', None) is not None):
_eprovider = get_provider_obj(C.expression_provider)
if (getattr(C, 'expression_cache', None) is not None):
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f'registering ExpressioneD {C.expression_provider}-{C.expression_cache}')
_dprovider = get_provider_obj(C.dataset_provider)
if (getattr(C, 'dataset_cache', None) is not None):
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f'registering DataseteD {C.dataset_provider}-{C.dataset_cache}')
register_wrapper(D, C.provider)
logger.debug(f'registering D {C.provider}') | -5,182,649,215,750,496,000 | register_all_wrappers | qlib/data/data.py | register_all_wrappers | Tirbo06/qlib | python | def ():
logger = get_module_logger('data')
_calendar_provider = get_provider_obj(C.calendar_provider)
if (getattr(C, 'calendar_cache', None) is not None):
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f'registering Cal {C.calendar_provider}-{C.calenar_cache}')
register_wrapper(Inst, C.instrument_provider)
logger.debug(f'registering Inst {C.instrument_provider}')
if (getattr(C, 'feature_provider', None) is not None):
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f'registering FeatureD {C.feature_provider}')
if (getattr(C, 'expression_provider', None) is not None):
_eprovider = get_provider_obj(C.expression_provider)
if (getattr(C, 'expression_cache', None) is not None):
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f'registering ExpressioneD {C.expression_provider}-{C.expression_cache}')
_dprovider = get_provider_obj(C.dataset_provider)
if (getattr(C, 'dataset_cache', None) is not None):
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f'registering DataseteD {C.dataset_provider}-{C.dataset_cache}')
register_wrapper(D, C.provider)
logger.debug(f'registering D {C.provider}') |
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq='day', future=False):
'Get calendar of certain market in given time range.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n ----------\n list\n calendar list\n '
raise NotImplementedError('Subclass of CalendarProvider must implement `calendar` method') | 3,577,133,685,186,970,600 | Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list | qlib/data/data.py | calendar | Tirbo06/qlib | python | @abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq='day', future=False):
'Get calendar of certain market in given time range.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n ----------\n list\n calendar list\n '
raise NotImplementedError('Subclass of CalendarProvider must implement `calendar` method') |
def locate_index(self, start_time, end_time, freq, future):
'Locate the start time index and end time index in a calendar under certain frequency.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n -------\n pd.Timestamp\n the real start time\n pd.Timestamp\n the real end time\n int\n the index of start time\n int\n the index of end time\n '
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
(calendar, calendar_index) = self._get_calendar(freq=freq, future=future)
if (start_time not in calendar_index):
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError('`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`')
start_index = calendar_index[start_time]
if (end_time not in calendar_index):
end_time = calendar[(bisect.bisect_right(calendar, end_time) - 1)]
end_index = calendar_index[end_time]
return (start_time, end_time, start_index, end_index) | 1,755,067,797,088,190,200 | Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time | qlib/data/data.py | locate_index | Tirbo06/qlib | python | def locate_index(self, start_time, end_time, freq, future):
'Locate the start time index and end time index in a calendar under certain frequency.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n -------\n pd.Timestamp\n the real start time\n pd.Timestamp\n the real end time\n int\n the index of start time\n int\n the index of end time\n '
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
(calendar, calendar_index) = self._get_calendar(freq=freq, future=future)
if (start_time not in calendar_index):
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError('`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`')
start_index = calendar_index[start_time]
if (end_time not in calendar_index):
end_time = calendar[(bisect.bisect_right(calendar, end_time) - 1)]
end_index = calendar_index[end_time]
return (start_time, end_time, start_index, end_index) |
def _get_calendar(self, freq, future):
'Load calendar using memcache.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n future : bool\n whether including future trading day\n\n Returns\n -------\n list\n list of timestamps\n dict\n dict composed by timestamp as key and index as value for fast search\n '
flag = f'{freq}_future_{future}'
if (flag in H['c']):
(_calendar, _calendar_index) = H['c'][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for (i, x) in enumerate(_calendar)}
H['c'][flag] = (_calendar, _calendar_index)
return (_calendar, _calendar_index) | 2,789,606,126,967,036,400 | Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search | qlib/data/data.py | _get_calendar | Tirbo06/qlib | python | def _get_calendar(self, freq, future):
'Load calendar using memcache.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n future : bool\n whether including future trading day\n\n Returns\n -------\n list\n list of timestamps\n dict\n dict composed by timestamp as key and index as value for fast search\n '
flag = f'{freq}_future_{future}'
if (flag in H['c']):
(_calendar, _calendar_index) = H['c'][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for (i, x) in enumerate(_calendar)}
H['c'][flag] = (_calendar, _calendar_index)
return (_calendar, _calendar_index) |
def _uri(self, start_time, end_time, freq, future=False):
'Get the uri of calendar generation task.'
return hash_args(start_time, end_time, freq, future) | 5,857,268,485,102,351,000 | Get the uri of calendar generation task. | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, start_time, end_time, freq, future=False):
return hash_args(start_time, end_time, freq, future) |
@staticmethod
def instruments(market='all', filter_pipe=None):
"Get the general config dictionary for a base market adding several dynamic filters.\n\n Parameters\n ----------\n market : str\n market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500\n filter_pipe : list\n the list of dynamic filters\n\n Returns\n ----------\n dict\n dict of stockpool config\n {`market`=>base market name, `filter_pipe`=>list of filters}\n\n example :\n {'market': 'csi500',\n 'filter_pipe': [{'filter_type': 'ExpressionDFilter',\n 'rule_expression': '$open<40',\n 'filter_start_time': None,\n 'filter_end_time': None,\n 'keep': False},\n {'filter_type': 'NameDFilter',\n 'name_rule_re': 'SH[0-9]{4}55',\n 'filter_start_time': None,\n 'filter_end_time': None}]}\n "
if (filter_pipe is None):
filter_pipe = []
config = {'market': market, 'filter_pipe': []}
for filter_t in filter_pipe:
config['filter_pipe'].append(filter_t.to_config())
return config | 5,346,680,155,164,997,000 | Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]} | qlib/data/data.py | instruments | Tirbo06/qlib | python | @staticmethod
def instruments(market='all', filter_pipe=None):
"Get the general config dictionary for a base market adding several dynamic filters.\n\n Parameters\n ----------\n market : str\n market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500\n filter_pipe : list\n the list of dynamic filters\n\n Returns\n ----------\n dict\n dict of stockpool config\n {`market`=>base market name, `filter_pipe`=>list of filters}\n\n example :\n {'market': 'csi500',\n 'filter_pipe': [{'filter_type': 'ExpressionDFilter',\n 'rule_expression': '$open<40',\n 'filter_start_time': None,\n 'filter_end_time': None,\n 'keep': False},\n {'filter_type': 'NameDFilter',\n 'name_rule_re': 'SH[0-9]{4}55',\n 'filter_start_time': None,\n 'filter_end_time': None}]}\n "
if (filter_pipe is None):
filter_pipe = []
config = {'market': market, 'filter_pipe': []}
for filter_t in filter_pipe:
config['filter_pipe'].append(filter_t.to_config())
return config |
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq='day', as_list=False):
'List the instruments based on a certain stockpool config.\n\n Parameters\n ----------\n instruments : dict\n stockpool config\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n as_list : bool\n return instruments as list or dict\n\n Returns\n -------\n dict or list\n instruments list or dictionary with time spans\n '
raise NotImplementedError('Subclass of InstrumentProvider must implement `list_instruments` method') | -7,773,960,003,251,321,000 | List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans | qlib/data/data.py | list_instruments | Tirbo06/qlib | python | @abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq='day', as_list=False):
'List the instruments based on a certain stockpool config.\n\n Parameters\n ----------\n instruments : dict\n stockpool config\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n as_list : bool\n return instruments as list or dict\n\n Returns\n -------\n dict or list\n instruments list or dictionary with time spans\n '
raise NotImplementedError('Subclass of InstrumentProvider must implement `list_instruments` method') |
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
'Get feature data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain feature\n '
raise NotImplementedError('Subclass of FeatureProvider must implement `feature` method') | -4,321,086,077,266,639,400 | Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature | qlib/data/data.py | feature | Tirbo06/qlib | python | @abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
'Get feature data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain feature\n '
raise NotImplementedError('Subclass of FeatureProvider must implement `feature` method') |
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq='day'):
'Get Expression data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain expression\n '
raise NotImplementedError('Subclass of ExpressionProvider must implement `Expression` method') | -5,795,559,039,258,244,000 | Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression | qlib/data/data.py | expression | Tirbo06/qlib | python | @abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq='day'):
'Get Expression data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain expression\n '
raise NotImplementedError('Subclass of ExpressionProvider must implement `Expression` method') |
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq='day'):
'Get dataset data.\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n\n Returns\n ----------\n pd.DataFrame\n a pandas dataframe with <instrument, datetime> index\n '
raise NotImplementedError('Subclass of DatasetProvider must implement `Dataset` method') | 8,839,626,730,823,916,000 | Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index | qlib/data/data.py | dataset | Tirbo06/qlib | python | @abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq='day'):
'Get dataset data.\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n\n Returns\n ----------\n pd.DataFrame\n a pandas dataframe with <instrument, datetime> index\n '
raise NotImplementedError('Subclass of DatasetProvider must implement `Dataset` method') |
def _uri(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=1, **kwargs):
'Get task uri, used when generating rabbitmq task in qlib_server\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n '
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache) | 7,055,993,701,715,980,000 | Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=1, **kwargs):
'Get task uri, used when generating rabbitmq task in qlib_server\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n '
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache) |
@staticmethod
def get_instruments_d(instruments, freq):
'\n Parse different types of input instruments to output instruments_d\n Wrong format of input instruments will lead to exception.\n\n '
if isinstance(instruments, dict):
if ('market' in instruments):
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments_d = list(instruments)
else:
raise ValueError('Unsupported input type for param `instrument`')
return instruments_d | 6,823,630,086,228,314,000 | Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception. | qlib/data/data.py | get_instruments_d | Tirbo06/qlib | python | @staticmethod
def get_instruments_d(instruments, freq):
'\n Parse different types of input instruments to output instruments_d\n Wrong format of input instruments will lead to exception.\n\n '
if isinstance(instruments, dict):
if ('market' in instruments):
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments_d = list(instruments)
else:
raise ValueError('Unsupported input type for param `instrument`')
return instruments_d |
@staticmethod
def get_column_names(fields):
'\n Get column names from input fields\n\n '
if (len(fields) == 0):
raise ValueError('fields cannot be empty')
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names | 8,591,165,258,190,278,000 | Get column names from input fields | qlib/data/data.py | get_column_names | Tirbo06/qlib | python | @staticmethod
def get_column_names(fields):
'\n \n\n '
if (len(fields) == 0):
raise ValueError('fields cannot be empty')
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names |
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
'\n Load and process the data, return the data set.\n - default using multi-kernel method.\n\n '
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
if (C.maxtasksperchild is None):
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for (inst, spans) in instruments_d.items():
data[inst] = p.apply_async(DatasetProvider.expression_calculator, args=(inst, start_time, end_time, freq, normalize_column_names, spans, C))
else:
for inst in instruments_d:
data[inst] = p.apply_async(DatasetProvider.expression_calculator, args=(inst, start_time, end_time, freq, normalize_column_names, None, C))
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if (len(data[inst].get()) > 0):
new_data[inst] = data[inst].get()
if (len(new_data) > 0):
data = pd.concat(new_data, names=['instrument'], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data | -8,502,041,657,564,014,000 | Load and process the data, return the data set.
- default using multi-kernel method. | qlib/data/data.py | dataset_processor | Tirbo06/qlib | python | @staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
'\n Load and process the data, return the data set.\n - default using multi-kernel method.\n\n '
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
if (C.maxtasksperchild is None):
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for (inst, spans) in instruments_d.items():
data[inst] = p.apply_async(DatasetProvider.expression_calculator, args=(inst, start_time, end_time, freq, normalize_column_names, spans, C))
else:
for inst in instruments_d:
data[inst] = p.apply_async(DatasetProvider.expression_calculator, args=(inst, start_time, end_time, freq, normalize_column_names, None, C))
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if (len(data[inst].get()) > 0):
new_data[inst] = data[inst].get()
if (len(new_data) > 0):
data = pd.concat(new_data, names=['instrument'], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data |
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"\n Calculate the expressions for one instrument, return a df result.\n If the expression has been calculated before, load from cache.\n\n return value: A data frame with index 'datetime' and other data columns.\n\n "
if (getattr(ExpressionD, '_provider', None) is None):
register_all_wrappers()
obj = dict()
for field in column_names:
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ['datetime']
if (spans is None):
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for (begin, end) in spans:
mask |= ((data.index >= begin) & (data.index <= end))
return data[mask] | 2,355,530,893,085,278,000 | Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns. | qlib/data/data.py | expression_calculator | Tirbo06/qlib | python | @staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"\n Calculate the expressions for one instrument, return a df result.\n If the expression has been calculated before, load from cache.\n\n return value: A data frame with index 'datetime' and other data columns.\n\n "
if (getattr(ExpressionD, '_provider', None) is None):
register_all_wrappers()
obj = dict()
for field in column_names:
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ['datetime']
if (spans is None):
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for (begin, end) in spans:
mask |= ((data.index >= begin) & (data.index <= end))
return data[mask] |
@property
def _uri_cal(self):
'Calendar file uri.'
if self.remote:
return os.path.join(C.mount_path, 'calendars', '{}.txt')
else:
return os.path.join(C.provider_uri, 'calendars', '{}.txt') | 1,566,966,008,109,715,000 | Calendar file uri. | qlib/data/data.py | _uri_cal | Tirbo06/qlib | python | @property
def _uri_cal(self):
if self.remote:
return os.path.join(C.mount_path, 'calendars', '{}.txt')
else:
return os.path.join(C.provider_uri, 'calendars', '{}.txt') |
def _load_calendar(self, freq, future):
'Load original calendar timestamp from file.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n\n Returns\n ----------\n list\n list of timestamps\n '
if future:
fname = self._uri_cal.format((freq + '_future'))
if (not os.path.exists(fname)):
get_module_logger('data').warning(f'{freq}_future.txt not exists, return current calendar!')
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if (not os.path.exists(fname)):
raise ValueError(('calendar not exists for freq ' + freq))
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f] | -8,847,209,010,704,079,000 | Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps | qlib/data/data.py | _load_calendar | Tirbo06/qlib | python | def _load_calendar(self, freq, future):
'Load original calendar timestamp from file.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n\n Returns\n ----------\n list\n list of timestamps\n '
if future:
fname = self._uri_cal.format((freq + '_future'))
if (not os.path.exists(fname)):
get_module_logger('data').warning(f'{freq}_future.txt not exists, return current calendar!')
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if (not os.path.exists(fname)):
raise ValueError(('calendar not exists for freq ' + freq))
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f] |
@property
def _uri_inst(self):
'Instrument file uri.'
return os.path.join(C.provider_uri, 'instruments', '{}.txt') | 437,905,685,548,133,440 | Instrument file uri. | qlib/data/data.py | _uri_inst | Tirbo06/qlib | python | @property
def _uri_inst(self):
return os.path.join(C.provider_uri, 'instruments', '{}.txt') |
@property
def _uri_data(self):
'Static feature file uri.'
if self.remote:
return os.path.join(C.mount_path, 'features', '{}', '{}.{}.bin')
else:
return os.path.join(C.provider_uri, 'features', '{}', '{}.{}.bin') | 5,267,282,713,068,252,000 | Static feature file uri. | qlib/data/data.py | _uri_data | Tirbo06/qlib | python | @property
def _uri_data(self):
if self.remote:
return os.path.join(C.mount_path, 'features', '{}', '{}.{}.bin')
else:
return os.path.join(C.provider_uri, 'features', '{}', '{}.{}.bin') |
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq='day'):
'\n This method is used to prepare the expression cache for the client.\n Then the client will load the data from expression cache by itself.\n\n '
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if (len(cal) == 0):
return
start_time = cal[0]
end_time = cal[(- 1)]
if (C.maxtasksperchild is None):
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(LocalDatasetProvider.cache_walker, args=(inst, start_time, end_time, freq, column_names))
p.close()
p.join() | 4,493,967,503,467,954,700 | This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself. | qlib/data/data.py | multi_cache_walker | Tirbo06/qlib | python | @staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq='day'):
'\n This method is used to prepare the expression cache for the client.\n Then the client will load the data from expression cache by itself.\n\n '
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if (len(cal) == 0):
return
start_time = cal[0]
end_time = cal[(- 1)]
if (C.maxtasksperchild is None):
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(LocalDatasetProvider.cache_walker, args=(inst, start_time, end_time, freq, column_names))
p.close()
p.join() |
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"\n If the expressions of one instrument haven't been calculated before,\n calculate it and write it into expression cache.\n\n "
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq) | -864,899,502,299,588,600 | If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache. | qlib/data/data.py | cache_walker | Tirbo06/qlib | python | @staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"\n If the expressions of one instrument haven't been calculated before,\n calculate it and write it into expression cache.\n\n "
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq) |
def features(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=None):
'\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n This function will try to use cache method which has a keyword `disk_cache`,\n and will use provider method if a type error is raised because the DatasetD instance\n is a provider class.\n '
disk_cache = (C.default_disk_cache if (disk_cache is None) else disk_cache)
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq) | 2,457,201,586,131,732,500 | disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class. | qlib/data/data.py | features | Tirbo06/qlib | python | def features(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=None):
'\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n This function will try to use cache method which has a keyword `disk_cache`,\n and will use provider method if a type error is raised because the DatasetD instance\n is a provider class.\n '
disk_cache = (C.default_disk_cache if (disk_cache is None) else disk_cache)
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq) |
def _uri(self, type, **kwargs):
'_uri\n The server hope to get the uri of the request. The uri will be decided\n by the dataprovider. For ex, different cache layer has different uri.\n\n :param type: The type of resource for the uri\n :param **kwargs:\n '
if (type == 'calendar'):
return Cal._uri(**kwargs)
elif (type == 'instrument'):
return Inst._uri(**kwargs)
elif (type == 'feature'):
return DatasetD._uri(**kwargs) | 6,238,780,568,753,580,000 | _uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs: | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, type, **kwargs):
'_uri\n The server hope to get the uri of the request. The uri will be decided\n by the dataprovider. For ex, different cache layer has different uri.\n\n :param type: The type of resource for the uri\n :param **kwargs:\n '
if (type == 'calendar'):
return Cal._uri(**kwargs)
elif (type == 'instrument'):
return Inst._uri(**kwargs)
elif (type == 'feature'):
return DatasetD._uri(**kwargs) |
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
'features_uri\n\n Return the uri of the generated cache of features/dataset\n\n :param disk_cache:\n :param instruments:\n :param fields:\n :param start_time:\n :param end_time:\n :param freq:\n '
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache) | -5,710,456,833,990,194,000 | features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq: | qlib/data/data.py | features_uri | Tirbo06/qlib | python | def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
'features_uri\n\n Return the uri of the generated cache of features/dataset\n\n :param disk_cache:\n :param instruments:\n :param fields:\n :param start_time:\n :param end_time:\n :param freq:\n '
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache) |
def load_in_chunks(path, chunk_size=1024):
'Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.'
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if (not data):
break
(yield data) | -2,405,785,985,499,538,400 | Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k. | conans/client/rest/uploader_downloader.py | load_in_chunks | AKhranovskiy/conan | python | def load_in_chunks(path, chunk_size=1024):
'Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.'
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if (not data):
break
(yield data) |
def download_chunks(file_handler=None, ret_buffer=None):
'Write to a buffer or to a file handler'
chunk_size = (1024 if (not file_path) else (1024 * 100))
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if (ret_buffer is not None):
ret_buffer.extend(data)
if (file_handler is not None):
file_handler.write(to_file_bytes(data))
if self.output:
units = progress_units(download_size, total_length)
progress = human_readable_progress(download_size, total_length)
if (last_progress != units):
print_progress(self.output, units, progress)
last_progress = units
return download_size | -8,814,911,293,651,486,000 | Write to a buffer or to a file handler | conans/client/rest/uploader_downloader.py | download_chunks | AKhranovskiy/conan | python | def download_chunks(file_handler=None, ret_buffer=None):
chunk_size = (1024 if (not file_path) else (1024 * 100))
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if (ret_buffer is not None):
ret_buffer.extend(data)
if (file_handler is not None):
file_handler.write(to_file_bytes(data))
if self.output:
units = progress_units(download_size, total_length)
progress = human_readable_progress(download_size, total_length)
if (last_progress != units):
print_progress(self.output, units, progress)
last_progress = units
return download_size |
def get(self, link_id):
'Get link resource.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /links/1 HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n "data": {\n "clicks": 0,\n "created": "2018-08-21T19:13:34.157470+00:00",\n "short_link": "b",\n "updated": null,\n "url": "https://www.google.com"\n },\n "id": 1,\n "type": "links",\n "url": "/links"\n }\n\n :jsonparam string url: url for which to create short link.\n :reqheader Accept: The response content type depends on\n :mailheader:`Accept` header\n :reqheader Authorization: Optional authentication token.\n :resheader Content-Type: this depends on :mailheader:`Accept`\n header of request\n :statuscode 201: Link created\n '
link = Link.query.filter_by(id=link_id).first()
(link_data, errors) = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {'id': link.id, 'data': link_data, 'url': '/links', 'type': 'link'}
return (response_out, 200) | -2,436,602,847,577,292,300 | Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created | slicr/resources/links.py | get | travisbyrum/slicr | python | def get(self, link_id):
'Get link resource.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /links/1 HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n "data": {\n "clicks": 0,\n "created": "2018-08-21T19:13:34.157470+00:00",\n "short_link": "b",\n "updated": null,\n "url": "https://www.google.com"\n },\n "id": 1,\n "type": "links",\n "url": "/links"\n }\n\n :jsonparam string url: url for which to create short link.\n :reqheader Accept: The response content type depends on\n :mailheader:`Accept` header\n :reqheader Authorization: Optional authentication token.\n :resheader Content-Type: this depends on :mailheader:`Accept`\n header of request\n :statuscode 201: Link created\n '
link = Link.query.filter_by(id=link_id).first()
(link_data, errors) = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {'id': link.id, 'data': link_data, 'url': '/links', 'type': 'link'}
return (response_out, 200) |
@use_args(link_args)
def post(self, args):
'Create shortened link.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /links HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n {\n "url": "https://www.google.com"\n }\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 201 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n "data": {\n "clicks": 0,\n "created": "2018-08-21T19:13:34.157470+00:00",\n "short_link": "b",\n "updated": null,\n "url": "https://www.google.com"\n },\n "id": 1,\n "type": "links",\n "url": "/links"\n }\n\n :jsonparam string url: url for which to create short link.\n :reqheader Accept: The response content type depends on\n :mailheader:`Accept` header\n :reqheader Authorization: Optional authentication token.\n :resheader Content-Type: this depends on :mailheader:`Accept`\n header of request\n :statuscode 201: Link created\n '
args = convert_args(args)
link = Link(url=args.url, domain_id=args.domain_id, salt=int(current_app.config.get('ENCODER_SALT'))).save()
(link_data, errors) = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {'id': link.id, 'data': link_data, 'url': '/links', 'type': 'link'}
return (response_out, 201) | -9,147,935,309,359,684,000 | Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created | slicr/resources/links.py | post | travisbyrum/slicr | python | @use_args(link_args)
def post(self, args):
'Create shortened link.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /links HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n {\n "url": "https://www.google.com"\n }\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 201 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n "data": {\n "clicks": 0,\n "created": "2018-08-21T19:13:34.157470+00:00",\n "short_link": "b",\n "updated": null,\n "url": "https://www.google.com"\n },\n "id": 1,\n "type": "links",\n "url": "/links"\n }\n\n :jsonparam string url: url for which to create short link.\n :reqheader Accept: The response content type depends on\n :mailheader:`Accept` header\n :reqheader Authorization: Optional authentication token.\n :resheader Content-Type: this depends on :mailheader:`Accept`\n header of request\n :statuscode 201: Link created\n '
args = convert_args(args)
link = Link(url=args.url, domain_id=args.domain_id, salt=int(current_app.config.get('ENCODER_SALT'))).save()
(link_data, errors) = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {'id': link.id, 'data': link_data, 'url': '/links', 'type': 'link'}
return (response_out, 201) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.