body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def registration_list_status_filter_sql():
'SQL to filter for whitelisted or null registration_list statuses.'
return sql.SQL("(status IS NULL OR status = 'whitelist')") | -2,651,173,938,659,543,600 | SQL to filter for whitelisted or null registration_list statuses. | src/dirbs/utils.py | registration_list_status_filter_sql | nealmadhu/DIRBS-Core | python | def registration_list_status_filter_sql():
return sql.SQL("(status IS NULL OR status = 'whitelist')") |
def compute_amnesty_flags(app_config, curr_date):
'Helper function to determine whether the date falls within amnesty eval or amnesty period.'
in_amnesty_eval_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date <= app_config.amnesty_config.evaluation_period_end_date)) else False)
in_amnesty_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date > app_config.amnesty_config.evaluation_period_end_date) and (curr_date <= app_config.amnesty_config.amnesty_period_end_date)) else False)
return (in_amnesty_eval_period, in_amnesty_period) | 4,268,033,186,415,836,700 | Helper function to determine whether the date falls within amnesty eval or amnesty period. | src/dirbs/utils.py | compute_amnesty_flags | nealmadhu/DIRBS-Core | python | def compute_amnesty_flags(app_config, curr_date):
in_amnesty_eval_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date <= app_config.amnesty_config.evaluation_period_end_date)) else False)
in_amnesty_period = (True if (app_config.amnesty_config.amnesty_enabled and (curr_date > app_config.amnesty_config.evaluation_period_end_date) and (curr_date <= app_config.amnesty_config.amnesty_period_end_date)) else False)
return (in_amnesty_eval_period, in_amnesty_period) |
def table_exists_sql(any_schema=False):
'SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True.'
if (not any_schema):
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL('SELECT EXISTS (SELECT 1\n FROM pg_tables\n WHERE tablename = %s\n {schema_filter_sql})').format(schema_filter_sql=schema_filter_sql) | -2,982,755,632,233,627,000 | SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True. | src/dirbs/utils.py | table_exists_sql | nealmadhu/DIRBS-Core | python | def table_exists_sql(any_schema=False):
if (not any_schema):
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL()
return sql.SQL('SELECT EXISTS (SELECT 1\n FROM pg_tables\n WHERE tablename = %s\n {schema_filter_sql})').format(schema_filter_sql=schema_filter_sql) |
def is_table_partitioned(conn, tbl_name):
'Function to determine whether a table is partitioned.'
with conn.cursor() as cursor:
cursor.execute('SELECT EXISTS (SELECT 1\n FROM pg_class\n JOIN pg_partitioned_table\n ON pg_partitioned_table.partrelid = pg_class.oid\n WHERE pg_class.relname = %s)', [tbl_name])
return cursor.fetchone().exists | 5,915,633,380,111,043,000 | Function to determine whether a table is partitioned. | src/dirbs/utils.py | is_table_partitioned | nealmadhu/DIRBS-Core | python | def is_table_partitioned(conn, tbl_name):
with conn.cursor() as cursor:
cursor.execute('SELECT EXISTS (SELECT 1\n FROM pg_class\n JOIN pg_partitioned_table\n ON pg_partitioned_table.partrelid = pg_class.oid\n WHERE pg_class.relname = %s)', [tbl_name])
return cursor.fetchone().exists |
def __init__(self, msg):
'Constructor.'
super().__init__('DB schema check failure: {0}'.format(msg)) | -4,235,430,209,384,187,000 | Constructor. | src/dirbs/utils.py | __init__ | nealmadhu/DIRBS-Core | python | def __init__(self, msg):
super().__init__('DB schema check failure: {0}'.format(msg)) |
def __init__(self, msg):
'Constructor.'
super().__init__('DB role check failure: {0}'.format(msg)) | 4,190,923,084,369,278,000 | Constructor. | src/dirbs/utils.py | __init__ | nealmadhu/DIRBS-Core | python | def __init__(self, msg):
super().__init__('DB role check failure: {0}'.format(msg)) |
def default(self, obj):
'Overrides JSONEncoder.default.'
if isinstance(obj, datetime.date):
return obj.isoformat()
return JSONEncoder.default(self, obj) | 6,396,015,363,180,159,000 | Overrides JSONEncoder.default. | src/dirbs/utils.py | default | nealmadhu/DIRBS-Core | python | def default(self, obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
return JSONEncoder.default(self, obj) |
def __init__(self, *args, **kwargs):
'Constructor.'
super().__init__(*args, **kwargs)
if (self.name is not None):
self.itersize = 100000 | 1,331,113,109,789,704,000 | Constructor. | src/dirbs/utils.py | __init__ | nealmadhu/DIRBS-Core | python | def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (self.name is not None):
self.itersize = 100000 |
def execute(self, query, params=None):
'Overrides NamedTupleCursor.execute.'
try:
return super(LoggingNamedTupleCursor, self).execute(query, params)
finally:
if (self.query is not None):
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8')) | 8,880,579,946,259,191,000 | Overrides NamedTupleCursor.execute. | src/dirbs/utils.py | execute | nealmadhu/DIRBS-Core | python | def execute(self, query, params=None):
try:
return super(LoggingNamedTupleCursor, self).execute(query, params)
finally:
if (self.query is not None):
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8')) |
def callproc(self, procname, params=None):
'Overrides NamedTupleCursor.callproc.'
try:
return super(LoggingNamedTupleCursor, self).callproc(procname, params)
finally:
if (self.query is not None):
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8')) | 3,671,090,875,776,687,000 | Overrides NamedTupleCursor.callproc. | src/dirbs/utils.py | callproc | nealmadhu/DIRBS-Core | python | def callproc(self, procname, params=None):
try:
return super(LoggingNamedTupleCursor, self).callproc(procname, params)
finally:
if (self.query is not None):
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8')) |
def __enter__(self):
'Python context manager support for use in with statement (on enter).'
self.start = time.time()
return self | -5,373,665,672,555,719,000 | Python context manager support for use in with statement (on enter). | src/dirbs/utils.py | __enter__ | nealmadhu/DIRBS-Core | python | def __enter__(self):
self.start = time.time()
return self |
def __exit__(self, *args):
'Python context manager support for use in with statement (on exit).'
self.duration = int(((time.time() - self.start) * 1000)) | 2,386,409,817,478,090,000 | Python context manager support for use in with statement (on exit). | src/dirbs/utils.py | __exit__ | nealmadhu/DIRBS-Core | python | def __exit__(self, *args):
self.duration = int(((time.time() - self.start) * 1000)) |
def test_get_backupdir_path(tmp_path):
'Returns backups Path named for default working directory.'
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
backdir = '_backups'
datestr = '2020-01-03_1646'
workingdir = Path('agenda')
workingdir.mkdir()
os.chdir(workingdir)
actual = get_backupdir_path(backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / str(workingdir)) / datestr)
expected_explicit = (((Path(tmp_path) / '_backups') / 'agenda') / '2020-01-03_1646')
assert (actual == expected)
assert (actual == expected_explicit) | 5,251,406,337,909,453,000 | Returns backups Path named for default working directory. | tests/returns/test_get_backupdir_path.py | test_get_backupdir_path | tombaker/mklists_old | python | def test_get_backupdir_path(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
backdir = '_backups'
datestr = '2020-01-03_1646'
workingdir = Path('agenda')
workingdir.mkdir()
os.chdir(workingdir)
actual = get_backupdir_path(backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / str(workingdir)) / datestr)
expected_explicit = (((Path(tmp_path) / '_backups') / 'agenda') / '2020-01-03_1646')
assert (actual == expected)
assert (actual == expected_explicit) |
def test_get_backupdir_path_given_datadir(tmp_path):
'Returns backups Path named for specified working directory.'
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
workingdir = Path(tmp_path).joinpath('todolists/a')
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = 'todolists_a'
backdir = '_backups'
datestr = '2020-01-03_1646_06488910'
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / workingdir_shortname_expected) / datestr)
assert (actual == expected) | -8,056,138,804,211,980,000 | Returns backups Path named for specified working directory. | tests/returns/test_get_backupdir_path.py | test_get_backupdir_path_given_datadir | tombaker/mklists_old | python | def test_get_backupdir_path_given_datadir(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
workingdir = Path(tmp_path).joinpath('todolists/a')
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = 'todolists_a'
backdir = '_backups'
datestr = '2020-01-03_1646_06488910'
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / workingdir_shortname_expected) / datestr)
assert (actual == expected) |
def test_get_backupdir_path_given_datadir_with_slash(tmp_path):
'Returns backups Path named for specified working directory ending with slash.'
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
workingdir = Path(tmp_path).joinpath('todolists/a/')
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = 'todolists_a'
backdir = '_backups'
datestr = '2020-01-03_1646_06488910'
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / workingdir_shortname_expected) / datestr)
assert (actual == expected) | -3,118,882,342,985,879,600 | Returns backups Path named for specified working directory ending with slash. | tests/returns/test_get_backupdir_path.py | test_get_backupdir_path_given_datadir_with_slash | tombaker/mklists_old | python | def test_get_backupdir_path_given_datadir_with_slash(tmp_path):
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text('config stuff')
workingdir = Path(tmp_path).joinpath('todolists/a/')
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = 'todolists_a'
backdir = '_backups'
datestr = '2020-01-03_1646_06488910'
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = (((Path(tmp_path) / backdir) / workingdir_shortname_expected) / datestr)
assert (actual == expected) |
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path):
'Raises exception if no rootdir is found (rootdir is None).'
os.chdir(tmp_path)
with pytest.raises(SystemExit):
get_backupdir_path() | 1,102,953,894,251,443,700 | Raises exception if no rootdir is found (rootdir is None). | tests/returns/test_get_backupdir_path.py | test_get_backupdir_path_raise_exception_if_rootdir_not_found | tombaker/mklists_old | python | def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path):
os.chdir(tmp_path)
with pytest.raises(SystemExit):
get_backupdir_path() |
def __init__(self, host='127.0.0.1', port=9200):
'Create a Elasticsearch client.'
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if (self.user and self.password):
self.client = Elasticsearch([{'host': host, 'port': port}], http_auth=(self.user, self.password), use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}], use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get('TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT) | 3,857,736,299,582,721,500 | Create a Elasticsearch client. | timesketch/lib/datastores/elastic.py | __init__ | stevengoossensB/timesketch | python | def __init__(self, host='127.0.0.1', port=9200):
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if (self.user and self.password):
self.client = Elasticsearch([{'host': host, 'port': port}], http_auth=(self.user, self.password), use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}], use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get('TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT) |
@staticmethod
def _build_labels_query(sketch_id, labels):
'Build Elasticsearch query for Timesketch labels.\n\n Args:\n sketch_id: Integer of sketch primary key.\n labels: List of label names.\n\n Returns:\n Elasticsearch query as a dictionary.\n '
label_query = {'bool': {'must': []}}
for label in labels:
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {'nested': {'query': {'bool': {'must': [{'term': {'timesketch_label.name.keyword': label}}, {'term': {'timesketch_label.sketch_id': sketch_id}}]}}, 'path': 'timesketch_label'}}
label_query['bool']['must'].append(nested_query)
return label_query | -5,654,028,270,528,403,000 | Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary. | timesketch/lib/datastores/elastic.py | _build_labels_query | stevengoossensB/timesketch | python | @staticmethod
def _build_labels_query(sketch_id, labels):
'Build Elasticsearch query for Timesketch labels.\n\n Args:\n sketch_id: Integer of sketch primary key.\n labels: List of label names.\n\n Returns:\n Elasticsearch query as a dictionary.\n '
label_query = {'bool': {'must': []}}
for label in labels:
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {'nested': {'query': {'bool': {'must': [{'term': {'timesketch_label.name.keyword': label}}, {'term': {'timesketch_label.sketch_id': sketch_id}}]}}, 'path': 'timesketch_label'}}
label_query['bool']['must'].append(nested_query)
return label_query |
@staticmethod
def _build_events_query(events):
'Build Elasticsearch query for one or more document ids.\n\n Args:\n events: List of Elasticsearch document IDs.\n\n Returns:\n Elasticsearch query as a dictionary.\n '
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict | 8,328,508,765,477,211,000 | Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary. | timesketch/lib/datastores/elastic.py | _build_events_query | stevengoossensB/timesketch | python | @staticmethod
def _build_events_query(events):
'Build Elasticsearch query for one or more document ids.\n\n Args:\n events: List of Elasticsearch document IDs.\n\n Returns:\n Elasticsearch query as a dictionary.\n '
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict |
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
'Build Elastic Search DSL query by adding in timeline filtering.\n\n Args:\n query_dsl: A dict with the current query_dsl\n timeline_ids: Either a list of timeline IDs (int) or None.\n\n Returns:\n Elasticsearch query DSL as a dictionary.\n '
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if (not timeline_ids):
return query_dsl
if (not isinstance(timeline_ids, (list, tuple))):
es_logger.error('Attempting to pass in timelines to a query DSL, but the passed timelines are not a list.')
return query_dsl
if (not all([isinstance(x, int) for x in timeline_ids])):
es_logger.error('All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if (not old_query):
return query_dsl
query_dsl['query'] = {'bool': {'must': [], 'should': [{'bool': {'must': old_query, 'must_not': [{'exists': {'field': '__ts_timeline_id'}}]}}, {'bool': {'must': [{'terms': {'__ts_timeline_id': timeline_ids}}, old_query], 'must_not': [], 'filter': [{'exists': {'field': '__ts_timeline_id'}}]}}], 'must_not': [], 'filter': []}}
return query_dsl | -3,096,211,081,514,344,400 | Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary. | timesketch/lib/datastores/elastic.py | _build_query_dsl | stevengoossensB/timesketch | python | @staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
'Build Elastic Search DSL query by adding in timeline filtering.\n\n Args:\n query_dsl: A dict with the current query_dsl\n timeline_ids: Either a list of timeline IDs (int) or None.\n\n Returns:\n Elasticsearch query DSL as a dictionary.\n '
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if (not timeline_ids):
return query_dsl
if (not isinstance(timeline_ids, (list, tuple))):
es_logger.error('Attempting to pass in timelines to a query DSL, but the passed timelines are not a list.')
return query_dsl
if (not all([isinstance(x, int) for x in timeline_ids])):
es_logger.error('All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if (not old_query):
return query_dsl
query_dsl['query'] = {'bool': {'must': [], 'should': [{'bool': {'must': old_query, 'must_not': [{'exists': {'field': '__ts_timeline_id'}}]}}, {'bool': {'must': [{'terms': {'__ts_timeline_id': timeline_ids}}, old_query], 'must_not': [], 'filter': [{'exists': {'field': '__ts_timeline_id'}}]}}], 'must_not': [], 'filter': []}}
return query_dsl |
@staticmethod
def _convert_to_time_range(interval):
'Convert an interval timestamp into start and end dates.\n\n Args:\n interval: Time frame representation\n\n Returns:\n Start timestamp in string format.\n End timestamp in string format.\n '
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = (lambda s: int(''.join(filter(str.isdigit, s))))
get_alpha = (lambda s: ''.join(filter(str.isalpha, s)))
ts_parts = interval.split(' ')
start = ' '.join(ts_parts[0:(len(ts_parts) - 2)])
minus = get_digits(ts_parts[(- 2)])
plus = get_digits(ts_parts[(- 1)])
interval = get_alpha(ts_parts[(- 1)])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if (interval == 's'):
start_range = (start_ts - rd(seconds=minus))
end_range = (start_ts + rd(seconds=plus))
elif (interval == 'm'):
start_range = (start_ts - rd(minutes=minus))
end_range = (start_ts + rd(minutes=plus))
elif (interval == 'h'):
start_range = (start_ts - rd(hours=minus))
end_range = (start_ts + rd(hours=plus))
elif (interval == 'd'):
start_range = (start_ts - rd(days=minus))
end_range = (start_ts + rd(days=plus))
else:
raise RuntimeError(('Unable to parse the timestamp: ' + str(interval)))
return (start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)) | 4,055,374,866,093,789,000 | Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format. | timesketch/lib/datastores/elastic.py | _convert_to_time_range | stevengoossensB/timesketch | python | @staticmethod
def _convert_to_time_range(interval):
'Convert an interval timestamp into start and end dates.\n\n Args:\n interval: Time frame representation\n\n Returns:\n Start timestamp in string format.\n End timestamp in string format.\n '
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = (lambda s: int(.join(filter(str.isdigit, s))))
get_alpha = (lambda s: .join(filter(str.isalpha, s)))
ts_parts = interval.split(' ')
start = ' '.join(ts_parts[0:(len(ts_parts) - 2)])
minus = get_digits(ts_parts[(- 2)])
plus = get_digits(ts_parts[(- 1)])
interval = get_alpha(ts_parts[(- 1)])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if (interval == 's'):
start_range = (start_ts - rd(seconds=minus))
end_range = (start_ts + rd(seconds=plus))
elif (interval == 'm'):
start_range = (start_ts - rd(minutes=minus))
end_range = (start_ts + rd(minutes=plus))
elif (interval == 'h'):
start_range = (start_ts - rd(hours=minus))
end_range = (start_ts + rd(hours=plus))
elif (interval == 'd'):
start_range = (start_ts - rd(days=minus))
end_range = (start_ts + rd(days=plus))
else:
raise RuntimeError(('Unable to parse the timestamp: ' + str(interval)))
return (start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)) |
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None, aggregations=None, timeline_ids=None):
'Build Elasticsearch DSL query.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n aggregations: Dict of Elasticsearch aggregations\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Elasticsearch DSL query as a dictionary\n '
if query_dsl:
if (not isinstance(query_dsl, dict)):
query_dsl = json.loads(query_dsl)
if (not query_dsl):
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {'query': {'bool': {'must': [], 'must_not': [], 'filter': []}}}
if query_string:
query_dsl['query']['bool']['must'].append({'query_string': {'query': query_string}})
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {'bool': {'should': [], 'minimum_should_match': 1}}
for chip in query_filter['chips']:
if (not chip.get('active', True)):
continue
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if (chip['type'] == 'label'):
labels.append(chip['value'])
elif (chip['type'] == 'term'):
term_filter = {'match_phrase': {'{}'.format(chip['field']): {'query': '{}'.format(chip['value'])}}}
if (chip['operator'] == 'must'):
must_filters.append(term_filter)
elif (chip['operator'] == 'must_not'):
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = (lambda start, end: {'range': {'datetime': {'gte': start, 'lte': end}}})
if (chip['type'] == 'datetime_range'):
(start, end) = chip['value'].split(',')
elif (chip['type'] == 'datetime_interval'):
(start, end) = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
if (not query_dsl.get('sort', None)):
query_dsl['sort'] = {'datetime': query_filter.get('order', 'asc')}
if aggregations:
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl['post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
if (timeline_ids and isinstance(timeline_ids, (list, tuple))):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({'exists': {'field': '__ts_timeline_id'}})
must_filters_post.append({'terms': {'__ts_timeline_id': timeline_ids}})
query_dsl['query'] = {'bool': {'must': [], 'should': [{'bool': {'must': must_filters_pre, 'must_not': must_not_filters_pre}}, {'bool': {'must': must_filters_post, 'must_not': must_not_filters_post, 'filter': [{'exists': {'field': '__ts_timeline_id'}}]}}], 'must_not': [], 'filter': []}}
return query_dsl | 8,189,367,095,946,872,000 | Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary | timesketch/lib/datastores/elastic.py | build_query | stevengoossensB/timesketch | python | def build_query(self, sketch_id, query_string, query_filter, query_dsl=None, aggregations=None, timeline_ids=None):
'Build Elasticsearch DSL query.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n aggregations: Dict of Elasticsearch aggregations\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Elasticsearch DSL query as a dictionary\n '
if query_dsl:
if (not isinstance(query_dsl, dict)):
query_dsl = json.loads(query_dsl)
if (not query_dsl):
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {'query': {'bool': {'must': [], 'must_not': [], 'filter': []}}}
if query_string:
query_dsl['query']['bool']['must'].append({'query_string': {'query': query_string}})
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {'bool': {'should': [], 'minimum_should_match': 1}}
for chip in query_filter['chips']:
if (not chip.get('active', True)):
continue
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if (chip['type'] == 'label'):
labels.append(chip['value'])
elif (chip['type'] == 'term'):
term_filter = {'match_phrase': {'{}'.format(chip['field']): {'query': '{}'.format(chip['value'])}}}
if (chip['operator'] == 'must'):
must_filters.append(term_filter)
elif (chip['operator'] == 'must_not'):
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = (lambda start, end: {'range': {'datetime': {'gte': start, 'lte': end}}})
if (chip['type'] == 'datetime_range'):
(start, end) = chip['value'].split(',')
elif (chip['type'] == 'datetime_interval'):
(start, end) = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
if (not query_dsl.get('sort', None)):
query_dsl['sort'] = {'datetime': query_filter.get('order', 'asc')}
if aggregations:
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl['post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
if (timeline_ids and isinstance(timeline_ids, (list, tuple))):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({'exists': {'field': '__ts_timeline_id'}})
must_filters_post.append({'terms': {'__ts_timeline_id': timeline_ids}})
query_dsl['query'] = {'bool': {'must': [], 'should': [{'bool': {'must': must_filters_pre, 'must_not': must_not_filters_pre}}, {'bool': {'must': must_filters_post, 'must_not': must_not_filters_post, 'filter': [{'exists': {'field': '__ts_timeline_id'}}]}}], 'must_not': [], 'filter': []}}
return query_dsl |
def search(self, sketch_id, query_string, query_filter, query_dsl, indices, count=False, aggregations=None, return_fields=None, enable_scroll=False, timeline_ids=None):
'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n search request on ElasticSearch and get result back.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n indices: List of indices to query\n count: Boolean indicating if we should only return result count\n aggregations: Dict of Elasticsearch aggregations\n return_fields: List of fields to return\n enable_scroll: If Elasticsearch scroll API should be used\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Set of event documents in JSON format\n '
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m'
if (not indices):
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
if query_filter.get('events', None):
indices = {event['index'] for event in query_filter['events'] if (event['index'] in indices)}
query_dsl = self.build_query(sketch_id=sketch_id, query_string=query_string, query_filter=query_filter, query_dsl=query_dsl, aggregations=aggregations, timeline_ids=timeline_ids)
search_type = 'query_then_fetch'
if count:
if ('sort' in query_dsl):
del query_dsl['sort']
try:
count_result = self.client.count(body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error('Unable to count due to an index not found: {0:s}'.format(','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if (not return_fields):
return self.client.search(body=query_dsl, index=list(indices), search_type=search_type, scroll=scroll_timeout)
try:
if self.version.startswith('6'):
_search_result = self.client.search(body=query_dsl, index=list(indices), search_type=search_type, _source_include=return_fields, scroll=scroll_timeout)
else:
_search_result = self.client.search(body=query_dsl, index=list(indices), search_type=search_type, _source_includes=return_fields, scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append('[{0:s}] {1:s}'.format(cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error('Unable to run search query: {0:s}'.format(cause), exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result | -7,302,113,754,087,591,000 | Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format | timesketch/lib/datastores/elastic.py | search | stevengoossensB/timesketch | python | def search(self, sketch_id, query_string, query_filter, query_dsl, indices, count=False, aggregations=None, return_fields=None, enable_scroll=False, timeline_ids=None):
'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n search request on ElasticSearch and get result back.\n\n Args:\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n indices: List of indices to query\n count: Boolean indicating if we should only return result count\n aggregations: Dict of Elasticsearch aggregations\n return_fields: List of fields to return\n enable_scroll: If Elasticsearch scroll API should be used\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Set of event documents in JSON format\n '
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m'
if (not indices):
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
if query_filter.get('events', None):
indices = {event['index'] for event in query_filter['events'] if (event['index'] in indices)}
query_dsl = self.build_query(sketch_id=sketch_id, query_string=query_string, query_filter=query_filter, query_dsl=query_dsl, aggregations=aggregations, timeline_ids=timeline_ids)
search_type = 'query_then_fetch'
if count:
if ('sort' in query_dsl):
del query_dsl['sort']
try:
count_result = self.client.count(body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error('Unable to count due to an index not found: {0:s}'.format(','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if (not return_fields):
return self.client.search(body=query_dsl, index=list(indices), search_type=search_type, scroll=scroll_timeout)
try:
if self.version.startswith('6'):
_search_result = self.client.search(body=query_dsl, index=list(indices), search_type=search_type, _source_include=return_fields, scroll=scroll_timeout)
else:
_search_result = self.client.search(body=query_dsl, index=list(indices), search_type=search_type, _source_includes=return_fields, scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append('[{0:s}] {1:s}'.format(cause.get('type', ), cause.get('reason', )))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error('Unable to run search query: {0:s}'.format(cause), exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result |
def search_stream(self, sketch_id=None, query_string=None, query_filter=None, query_dsl=None, indices=None, return_fields=None, enable_scroll=True, timeline_ids=None):
'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n search request on ElasticSearch and get result back.\n\n Args :\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n indices: List of indices to query\n return_fields: List of fields to return\n enable_scroll: Boolean determining whether scrolling is enabled.\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Generator of event documents in JSON format\n '
METRICS['search_requests'].labels(type='streaming').inc()
if (not query_filter.get('size')):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if (not query_filter.get('terminate_after')):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(sketch_id=sketch_id, query_string=query_string, query_dsl=query_dsl, query_filter=query_filter, indices=indices, return_fields=return_fields, enable_scroll=enable_scroll, timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
(yield event)
while (scroll_size > 0):
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
(yield event) | -2,000,918,080,028,975,000 | Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format | timesketch/lib/datastores/elastic.py | search_stream | stevengoossensB/timesketch | python | def search_stream(self, sketch_id=None, query_string=None, query_filter=None, query_dsl=None, indices=None, return_fields=None, enable_scroll=True, timeline_ids=None):
'Search ElasticSearch. This will take a query string from the UI\n together with a filter definition. Based on this it will execute the\n search request on ElasticSearch and get result back.\n\n Args :\n sketch_id: Integer of sketch primary key\n query_string: Query string\n query_filter: Dictionary containing filters to apply\n query_dsl: Dictionary containing Elasticsearch DSL query\n indices: List of indices to query\n return_fields: List of fields to return\n enable_scroll: Boolean determining whether scrolling is enabled.\n timeline_ids: Optional list of IDs of Timeline objects that should\n be queried as part of the search.\n\n Returns:\n Generator of event documents in JSON format\n '
METRICS['search_requests'].labels(type='streaming').inc()
if (not query_filter.get('size')):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if (not query_filter.get('terminate_after')):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(sketch_id=sketch_id, query_string=query_string, query_dsl=query_dsl, query_filter=query_filter, indices=indices, return_fields=return_fields, enable_scroll=enable_scroll, timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
(yield event)
while (scroll_size > 0):
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
(yield event) |
def get_filter_labels(self, sketch_id, indices):
'Aggregate labels for a sketch.\n\n Args:\n sketch_id: The Sketch ID\n indices: List of indices to aggregate on\n\n Returns:\n List with label names.\n '
max_labels = 10000
aggregation = {'aggs': {'nested': {'nested': {'path': 'timesketch_label'}, 'aggs': {'inner': {'filter': {'bool': {'must': [{'term': {'timesketch_label.sketch_id': sketch_id}}]}}, 'aggs': {'labels': {'terms': {'size': max_labels, 'field': 'timesketch_label.name.keyword'}}}}}}}}
labels = []
try:
result = self.client.search(index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(','.join(indices)))
return labels
buckets = result.get('aggregations', {}).get('nested', {}).get('inner', {}).get('labels', {}).get('buckets', [])
for bucket in buckets:
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels | 714,276,077,707,961,900 | Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names. | timesketch/lib/datastores/elastic.py | get_filter_labels | stevengoossensB/timesketch | python | def get_filter_labels(self, sketch_id, indices):
'Aggregate labels for a sketch.\n\n Args:\n sketch_id: The Sketch ID\n indices: List of indices to aggregate on\n\n Returns:\n List with label names.\n '
max_labels = 10000
aggregation = {'aggs': {'nested': {'nested': {'path': 'timesketch_label'}, 'aggs': {'inner': {'filter': {'bool': {'must': [{'term': {'timesketch_label.sketch_id': sketch_id}}]}}, 'aggs': {'labels': {'terms': {'size': max_labels, 'field': 'timesketch_label.name.keyword'}}}}}}}}
labels = []
try:
result = self.client.search(index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(','.join(indices)))
return labels
buckets = result.get('aggregations', {}).get('nested', {}).get('inner', {}).get('labels', {}).get('buckets', [])
for bucket in buckets:
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels |
def get_event(self, searchindex_id, event_id):
'Get one event from the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n\n Returns:\n Event document in JSON format\n '
METRICS['search_get_event'].inc()
try:
if self.version.startswith('6'):
event = self.client.get(index=searchindex_id, id=event_id, doc_type='_all', _source_exclude=['timesketch_label'])
else:
event = self.client.get(index=searchindex_id, id=event_id, doc_type='_all', _source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND) | 4,496,177,488,117,825,500 | Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format | timesketch/lib/datastores/elastic.py | get_event | stevengoossensB/timesketch | python | def get_event(self, searchindex_id, event_id):
'Get one event from the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n\n Returns:\n Event document in JSON format\n '
METRICS['search_get_event'].inc()
try:
if self.version.startswith('6'):
event = self.client.get(index=searchindex_id, id=event_id, doc_type='_all', _source_exclude=['timesketch_label'])
else:
event = self.client.get(index=searchindex_id, id=event_id, doc_type='_all', _source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND) |
def count(self, indices):
'Count number of documents.\n\n Args:\n indices: List of indices.\n\n Returns:\n Tuple containing number of documents and size on disk.\n '
if (not indices):
return (0, 0)
try:
es_stats = self.client.indices.stats(index=indices, metric='docs, store')
except NotFoundError:
es_logger.error('Unable to count indices (index not found)')
return (0, 0)
except RequestError:
es_logger.error('Unable to count indices (request error)', exc_info=True)
return (0, 0)
doc_count_total = es_stats.get('_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get('_all', {}).get('primaries', {}).get('store', {}).get('size_in_bytes', 0)
return (doc_count_total, doc_bytes_total) | 6,281,411,345,004,881,000 | Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk. | timesketch/lib/datastores/elastic.py | count | stevengoossensB/timesketch | python | def count(self, indices):
'Count number of documents.\n\n Args:\n indices: List of indices.\n\n Returns:\n Tuple containing number of documents and size on disk.\n '
if (not indices):
return (0, 0)
try:
es_stats = self.client.indices.stats(index=indices, metric='docs, store')
except NotFoundError:
es_logger.error('Unable to count indices (index not found)')
return (0, 0)
except RequestError:
es_logger.error('Unable to count indices (request error)', exc_info=True)
return (0, 0)
doc_count_total = es_stats.get('_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get('_all', {}).get('primaries', {}).get('store', {}).get('size_in_bytes', 0)
return (doc_count_total, doc_bytes_total) |
def set_label(self, searchindex_id, event_id, event_type, sketch_id, user_id, label, toggle=False, remove=False, single_update=True):
'Set label on event in the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n event_type: String of ElasticSearch document type\n sketch_id: Integer of sketch primary key\n user_id: Integer of user primary key\n label: String with the name of the label\n remove: Optional boolean value if the label should be removed\n toggle: Optional boolean value if the label should be toggled\n single_update: Boolean if the label should be indexed immediately.\n\n Returns:\n Dict with updated document body, or None if this is a single update.\n '
update_body = {'script': {'lang': 'painless', 'source': UPDATE_LABEL_SCRIPT, 'params': {'timesketch_label': {'name': str(label), 'user_id': user_id, 'sketch_id': sketch_id}, remove: remove}}}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if (not single_update):
script = update_body['script']
return dict(source=script['source'], lang=script['lang'], params=script['params'])
doc = self.client.get(index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(index=searchindex_id, doc_type=event_type, id=event_id, body=doc)
self.client.update(index=searchindex_id, id=event_id, doc_type=event_type, body=update_body)
return None | -3,900,731,638,094,000,600 | Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update. | timesketch/lib/datastores/elastic.py | set_label | stevengoossensB/timesketch | python | def set_label(self, searchindex_id, event_id, event_type, sketch_id, user_id, label, toggle=False, remove=False, single_update=True):
'Set label on event in the datastore.\n\n Args:\n searchindex_id: String of ElasticSearch index id\n event_id: String of ElasticSearch event id\n event_type: String of ElasticSearch document type\n sketch_id: Integer of sketch primary key\n user_id: Integer of user primary key\n label: String with the name of the label\n remove: Optional boolean value if the label should be removed\n toggle: Optional boolean value if the label should be toggled\n single_update: Boolean if the label should be indexed immediately.\n\n Returns:\n Dict with updated document body, or None if this is a single update.\n '
update_body = {'script': {'lang': 'painless', 'source': UPDATE_LABEL_SCRIPT, 'params': {'timesketch_label': {'name': str(label), 'user_id': user_id, 'sketch_id': sketch_id}, remove: remove}}}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if (not single_update):
script = update_body['script']
return dict(source=script['source'], lang=script['lang'], params=script['params'])
doc = self.client.get(index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(index=searchindex_id, doc_type=event_type, id=event_id, body=doc)
self.client.update(index=searchindex_id, id=event_id, doc_type=event_type, body=update_body)
return None |
def create_index(self, index_name=uuid4().hex, doc_type='generic_event', mappings=None):
'Create index with Timesketch settings.\n\n Args:\n index_name: Name of the index. Default is a generated UUID.\n doc_type: Name of the document type. Default id generic_event.\n mappings: Optional dict with the document mapping for Elastic.\n\n Returns:\n Index name in string format.\n Document type in string format.\n '
if mappings:
_document_mapping = mappings
else:
_document_mapping = {'properties': {'timesketch_label': {'type': 'nested'}, 'datetime': {'type': 'date'}}}
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if (not self.client.indices.exists(index_name)):
try:
self.client.indices.create(index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError('Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning('Attempting to create an index that already exists ({0:s} - {1:s})'.format(index_name, str(index_exists)))
return (index_name, doc_type) | -8,882,026,856,317,529,000 | Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format. | timesketch/lib/datastores/elastic.py | create_index | stevengoossensB/timesketch | python | def create_index(self, index_name=uuid4().hex, doc_type='generic_event', mappings=None):
'Create index with Timesketch settings.\n\n Args:\n index_name: Name of the index. Default is a generated UUID.\n doc_type: Name of the document type. Default id generic_event.\n mappings: Optional dict with the document mapping for Elastic.\n\n Returns:\n Index name in string format.\n Document type in string format.\n '
if mappings:
_document_mapping = mappings
else:
_document_mapping = {'properties': {'timesketch_label': {'type': 'nested'}, 'datetime': {'type': 'date'}}}
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if (not self.client.indices.exists(index_name)):
try:
self.client.indices.create(index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError('Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning('Attempting to create an index that already exists ({0:s} - {1:s})'.format(index_name, str(index_exists)))
return (index_name, doc_type) |
def delete_index(self, index_name):
'Delete Elasticsearch index.\n\n Args:\n index_name: Name of the index to delete.\n '
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError('Unable to connect to Timesketch backend: {}'.format(e)) from e | 8,613,442,976,308,407,000 | Delete Elasticsearch index.
Args:
index_name: Name of the index to delete. | timesketch/lib/datastores/elastic.py | delete_index | stevengoossensB/timesketch | python | def delete_index(self, index_name):
'Delete Elasticsearch index.\n\n Args:\n index_name: Name of the index to delete.\n '
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError('Unable to connect to Timesketch backend: {}'.format(e)) from e |
def import_event(self, index_name, event_type, event=None, event_id=None, flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
'Add event to Elasticsearch.\n\n Args:\n index_name: Name of the index in Elasticsearch\n event_type: Type of event (e.g. plaso_event)\n event: Event dictionary\n event_id: Event Elasticsearch ID\n flush_interval: Number of events to queue up before indexing\n timeline_id: Optional ID number of a Timeline object this event\n belongs to. If supplied an additional field will be added to\n the store indicating the timeline this belongs to.\n '
if event:
for (k, v) in event.items():
if (not isinstance(k, six.text_type)):
k = codecs.decode(k, 'utf8')
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
header = {'index': {'_index': index_name}}
update_header = {'update': {'_index': index_name, '_id': event_id}}
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if ((self.import_counter['events'] % int(flush_interval)) == 0):
_ = self.flush_queued_events()
self.import_events = []
elif self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events'] | 8,753,995,590,469,953,000 | Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to. | timesketch/lib/datastores/elastic.py | import_event | stevengoossensB/timesketch | python | def import_event(self, index_name, event_type, event=None, event_id=None, flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
'Add event to Elasticsearch.\n\n Args:\n index_name: Name of the index in Elasticsearch\n event_type: Type of event (e.g. plaso_event)\n event: Event dictionary\n event_id: Event Elasticsearch ID\n flush_interval: Number of events to queue up before indexing\n timeline_id: Optional ID number of a Timeline object this event\n belongs to. If supplied an additional field will be added to\n the store indicating the timeline this belongs to.\n '
if event:
for (k, v) in event.items():
if (not isinstance(k, six.text_type)):
k = codecs.decode(k, 'utf8')
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
header = {'index': {'_index': index_name}}
update_header = {'update': {'_index': index_name, '_id': event_id}}
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if ((self.import_counter['events'] % int(flush_interval)) == 0):
_ = self.flush_queued_events()
self.import_events = []
elif self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events'] |
def flush_queued_events(self, retry_count=0):
'Flush all queued events.\n\n Returns:\n dict: A dict object that contains the number of events\n that were sent to Elastic as well as information\n on whether there were any errors, and what the\n details of these errors if any.\n retry_count: optional int indicating whether this is a retry.\n '
if (not self.import_events):
return {}
return_dict = {'number_of_events': (len(self.import_events) / 2), 'total_events': self.import_counter['events']}
try:
results = self.client.bulk(body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if (retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT):
es_logger.error('Unable to add events, reached recount max.', exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events((retry_count + 1))
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(index_name, {'errors': [], 'types': Counter(), 'details': Counter()})
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name]['details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get('reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(caused_by.get('type', 'Unknown Detailed Type'), ' '.join(caused_reason.split()[:5]))
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(error.get('type', 'Unknown Type'), error.get('reason', 'No reason given'), caused_by.get('type', 'Unknown Type'), caused_reason)
error_list.append(error_msg)
try:
es_logger.error('Unable to upload document: {0:s} to index {1:s} - [{2:d}] {3:s}'.format(doc_id, index_name, status_code, error_msg))
except Exception:
es_logger.error('Unable to upload document, and unable to log the error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict | -8,373,796,784,467,723,000 | Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry. | timesketch/lib/datastores/elastic.py | flush_queued_events | stevengoossensB/timesketch | python | def flush_queued_events(self, retry_count=0):
'Flush all queued events.\n\n Returns:\n dict: A dict object that contains the number of events\n that were sent to Elastic as well as information\n on whether there were any errors, and what the\n details of these errors if any.\n retry_count: optional int indicating whether this is a retry.\n '
if (not self.import_events):
return {}
return_dict = {'number_of_events': (len(self.import_events) / 2), 'total_events': self.import_counter['events']}
try:
results = self.client.bulk(body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if (retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT):
es_logger.error('Unable to add events, reached recount max.', exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events((retry_count + 1))
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(index_name, {'errors': [], 'types': Counter(), 'details': Counter()})
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name]['details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get('reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(caused_by.get('type', 'Unknown Detailed Type'), ' '.join(caused_reason.split()[:5]))
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(error.get('type', 'Unknown Type'), error.get('reason', 'No reason given'), caused_by.get('type', 'Unknown Type'), caused_reason)
error_list.append(error_msg)
try:
es_logger.error('Unable to upload document: {0:s} to index {1:s} - [{2:d}] {3:s}'.format(doc_id, index_name, status_code, error_msg))
except Exception:
es_logger.error('Unable to upload document, and unable to log the error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict |
@property
def version(self):
'Get Elasticsearch version.\n\n Returns:\n Version number as a string.\n '
version_info = self.client.info().get('version')
return version_info.get('number') | 2,982,666,308,491,461,600 | Get Elasticsearch version.
Returns:
Version number as a string. | timesketch/lib/datastores/elastic.py | version | stevengoossensB/timesketch | python | @property
def version(self):
'Get Elasticsearch version.\n\n Returns:\n Version number as a string.\n '
version_info = self.client.info().get('version')
return version_info.get('number') |
def render(game, current):
' Displays the current room '
print(('You are in the ' + game['rooms'][current]['name']))
print(game['rooms'][current]['desc']) | 3,437,695,610,613,276,000 | Displays the current room | main.py | render | BraffordHunter/03-Text-Adventure-2 | python | def render(game, current):
' '
print(('You are in the ' + game['rooms'][current]['name']))
print(game['rooms'][current]['desc']) |
def getInput():
' Asks the user for input and returns a stripped, uppercase version of what they typed '
response = input('What would you like to do? ').strip().upper()
return response | -8,819,435,133,751,094,000 | Asks the user for input and returns a stripped, uppercase version of what they typed | main.py | getInput | BraffordHunter/03-Text-Adventure-2 | python | def getInput():
' '
response = input('What would you like to do? ').strip().upper()
return response |
def update(response, game, current):
' Process the input and update the state of the world '
for e in game['rooms'][current]['exits']:
if (response == e['verb']):
current = e['target']
return current | 4,104,156,395,958,741,000 | Process the input and update the state of the world | main.py | update | BraffordHunter/03-Text-Adventure-2 | python | def update(response, game, current):
' '
for e in game['rooms'][current]['exits']:
if (response == e['verb']):
current = e['target']
return current |
def _post_clients(self, client, user_ids, token_generator):
'\n Helper function that creates (and tests creating) a collection of Clients.\n '
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
client_ids = []
for (i, api_client) in enumerate(CLIENTS):
api_client['user_id'] = user_ids[i]
response = client.post('/clients', data=json.dumps(api_client), headers=headers)
expect(response.status_code).to(equal(201))
client_ids.append(response.json['response'][0]['id'])
expect(len(client_ids)).to(equal(8))
return client_ids | -5,197,643,749,388,798,000 | Helper function that creates (and tests creating) a collection of Clients. | tests/api/test_all_apis.py | _post_clients | brighthive/authserver | python | def _post_clients(self, client, user_ids, token_generator):
'\n \n '
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
client_ids = []
for (i, api_client) in enumerate(CLIENTS):
api_client['user_id'] = user_ids[i]
response = client.post('/clients', data=json.dumps(api_client), headers=headers)
expect(response.status_code).to(equal(201))
client_ids.append(response.json['response'][0]['id'])
expect(len(client_ids)).to(equal(8))
return client_ids |
def multicrop_collate_fn(samples):
'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))]
for batch_ele in samples:
multi_crop_imgs = batch_ele[DefaultDataKeys.INPUT]
for (idx, crop) in enumerate(multi_crop_imgs):
inputs[idx].append(crop)
for (idx, ele) in enumerate(inputs):
inputs[idx] = torch.stack(ele)
result[DefaultDataKeys.INPUT] = inputs
return result | 4,826,671,954,298,192,000 | Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT | flash/image/embedding/vissl/transforms/utilities.py | multicrop_collate_fn | Darktex/lightning-flash | python | def multicrop_collate_fn(samples):
'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))]
for batch_ele in samples:
multi_crop_imgs = batch_ele[DefaultDataKeys.INPUT]
for (idx, crop) in enumerate(multi_crop_imgs):
inputs[idx].append(crop)
for (idx, ele) in enumerate(inputs):
inputs[idx] = torch.stack(ele)
result[DefaultDataKeys.INPUT] = inputs
return result |
def simclr_collate_fn(samples):
'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = []
num_views = len(samples[0][DefaultDataKeys.INPUT])
view_idx = 0
while (view_idx < num_views):
for batch_ele in samples:
imgs = batch_ele[DefaultDataKeys.INPUT]
inputs.append(imgs[view_idx])
view_idx += 1
result[DefaultDataKeys.INPUT] = torch.stack(inputs)
return result | 1,590,668,760,028,334,600 | Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT | flash/image/embedding/vissl/transforms/utilities.py | simclr_collate_fn | Darktex/lightning-flash | python | def simclr_collate_fn(samples):
'Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = []
num_views = len(samples[0][DefaultDataKeys.INPUT])
view_idx = 0
while (view_idx < num_views):
for batch_ele in samples:
imgs = batch_ele[DefaultDataKeys.INPUT]
inputs.append(imgs[view_idx])
view_idx += 1
result[DefaultDataKeys.INPUT] = torch.stack(inputs)
return result |
def moco_collate_fn(samples):
'MOCO collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = []
for batch_ele in samples:
inputs.append(torch.stack(batch_ele[DefaultDataKeys.INPUT]))
result[DefaultDataKeys.INPUT] = torch.stack(inputs).squeeze()[:, 0, :, :, :].squeeze()
result['data_momentum'] = torch.stack(inputs).squeeze()[:, 1, :, :, :].squeeze()
return result | -102,752,008,453,979,340 | MOCO collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT | flash/image/embedding/vissl/transforms/utilities.py | moco_collate_fn | Darktex/lightning-flash | python | def moco_collate_fn(samples):
'MOCO collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n '
result = vissl_collate_helper(samples)
inputs = []
for batch_ele in samples:
inputs.append(torch.stack(batch_ele[DefaultDataKeys.INPUT]))
result[DefaultDataKeys.INPUT] = torch.stack(inputs).squeeze()[:, 0, :, :, :].squeeze()
result['data_momentum'] = torch.stack(inputs).squeeze()[:, 1, :, :, :].squeeze()
return result |
@abstractmethod
def __call__(self, location):
'Evaluate the time-continuous posterior for a given location\n\n Parameters\n ----------\n location : float\n Location, or time, at which to evaluate the posterior.\n\n Returns\n -------\n rv : `RandomVariable`\n '
raise NotImplementedError | 2,588,504,303,512,299,000 | Evaluate the time-continuous posterior for a given location
Parameters
----------
location : float
Location, or time, at which to evaluate the posterior.
Returns
-------
rv : `RandomVariable` | src/probnum/filtsmooth/filtsmoothposterior.py | __call__ | admdev8/probnum | python | @abstractmethod
def __call__(self, location):
'Evaluate the time-continuous posterior for a given location\n\n Parameters\n ----------\n location : float\n Location, or time, at which to evaluate the posterior.\n\n Returns\n -------\n rv : `RandomVariable`\n '
raise NotImplementedError |
@abstractmethod
def __len__(self):
'Length of the discrete-time solution\n\n Corresponds to the number of filtering/smoothing steps\n '
raise NotImplementedError | 7,496,453,161,260,714,000 | Length of the discrete-time solution
Corresponds to the number of filtering/smoothing steps | src/probnum/filtsmooth/filtsmoothposterior.py | __len__ | admdev8/probnum | python | @abstractmethod
def __len__(self):
'Length of the discrete-time solution\n\n Corresponds to the number of filtering/smoothing steps\n '
raise NotImplementedError |
@abstractmethod
def __getitem__(self, idx):
'Return the corresponding index/slice of the discrete-time solution'
raise NotImplementedError | -1,963,588,614,465,622,800 | Return the corresponding index/slice of the discrete-time solution | src/probnum/filtsmooth/filtsmoothposterior.py | __getitem__ | admdev8/probnum | python | @abstractmethod
def __getitem__(self, idx):
raise NotImplementedError |
def sample(self, locations=None, size=()):
'\n Draw samples from the filtering/smoothing posterior.\n\n If nothing is specified, a single sample is drawn (supported on self.locations).\n If locations are specified, the samples are drawn on those locations.\n If size is specified, more than a single sample is drawn.\n\n Parameters\n ----------\n locations : array_like, optional\n Locations on which the samples are wanted. Default is none, which implies that\n self.location is used.\n size : int or tuple of ints, optional\n Indicates how many samples are drawn. Default is an empty tuple, in which case\n a single sample is returned.\n\n Returns\n -------\n numpy.ndarray\n Drawn samples. If size has shape (A1, ..., Z1), locations have shape (L,),\n and the state space model has shape (A2, ..., Z2), the output has\n shape (A1, ..., Z1, L, A2, ..., Z2).\n For example: size=4, len(locations)=4, dim=3 gives shape (4, 4, 3).\n\n '
raise NotImplementedError('Sampling not implemented.') | 4,466,780,101,186,818,600 | Draw samples from the filtering/smoothing posterior.
If nothing is specified, a single sample is drawn (supported on self.locations).
If locations are specified, the samples are drawn on those locations.
If size is specified, more than a single sample is drawn.
Parameters
----------
locations : array_like, optional
Locations on which the samples are wanted. Default is none, which implies that
self.location is used.
size : int or tuple of ints, optional
Indicates how many samples are drawn. Default is an empty tuple, in which case
a single sample is returned.
Returns
-------
numpy.ndarray
Drawn samples. If size has shape (A1, ..., Z1), locations have shape (L,),
and the state space model has shape (A2, ..., Z2), the output has
shape (A1, ..., Z1, L, A2, ..., Z2).
For example: size=4, len(locations)=4, dim=3 gives shape (4, 4, 3). | src/probnum/filtsmooth/filtsmoothposterior.py | sample | admdev8/probnum | python | def sample(self, locations=None, size=()):
'\n Draw samples from the filtering/smoothing posterior.\n\n If nothing is specified, a single sample is drawn (supported on self.locations).\n If locations are specified, the samples are drawn on those locations.\n If size is specified, more than a single sample is drawn.\n\n Parameters\n ----------\n locations : array_like, optional\n Locations on which the samples are wanted. Default is none, which implies that\n self.location is used.\n size : int or tuple of ints, optional\n Indicates how many samples are drawn. Default is an empty tuple, in which case\n a single sample is returned.\n\n Returns\n -------\n numpy.ndarray\n Drawn samples. If size has shape (A1, ..., Z1), locations have shape (L,),\n and the state space model has shape (A2, ..., Z2), the output has\n shape (A1, ..., Z1, L, A2, ..., Z2).\n For example: size=4, len(locations)=4, dim=3 gives shape (4, 4, 3).\n\n '
raise NotImplementedError('Sampling not implemented.') |
def create_app(config_object='code_runner.settings'):
'Creates and returns flask app instance as well as register all the extensions and blueprints'
app = Flask(__name__)
register_environment()
app.config.from_object(config_object)
register_blueprints(app=app)
register_views(app=app)
register_extensions(app=app)
configure_logger(app=app)
return app | 3,818,114,167,602,064,400 | Creates and returns flask app instance as well as register all the extensions and blueprints | code_runner/app.py | create_app | thephilomaths/code-runner-as-a-service | python | def create_app(config_object='code_runner.settings'):
app = Flask(__name__)
register_environment()
app.config.from_object(config_object)
register_blueprints(app=app)
register_views(app=app)
register_extensions(app=app)
configure_logger(app=app)
return app |
def register_blueprints(app):
'Registers the blueprints'
app.register_blueprint(code.views.blueprint) | -6,392,716,567,037,836,000 | Registers the blueprints | code_runner/app.py | register_blueprints | thephilomaths/code-runner-as-a-service | python | def register_blueprints(app):
app.register_blueprint(code.views.blueprint) |
def register_views(app):
'Registers the pluggable views'
run_view = code.views.RunCode.as_view('run')
run_async_view = code.views.RunCodeAsync.as_view('run-async')
app.add_url_rule('/run', view_func=run_view, methods=['POST'])
app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST'])
app.add_url_rule('/get-result/<string:task_id>', view_func=run_async_view, methods=['GET']) | 2,637,482,684,825,603,000 | Registers the pluggable views | code_runner/app.py | register_views | thephilomaths/code-runner-as-a-service | python | def register_views(app):
run_view = code.views.RunCode.as_view('run')
run_async_view = code.views.RunCodeAsync.as_view('run-async')
app.add_url_rule('/run', view_func=run_view, methods=['POST'])
app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST'])
app.add_url_rule('/get-result/<string:task_id>', view_func=run_async_view, methods=['GET']) |
def register_extensions(app):
'Register Flask extensions'
with app.app_context():
db.init_app(app=app)
db.create_all()
limiter.init_app(app=app) | 1,989,962,585,448,259,600 | Register Flask extensions | code_runner/app.py | register_extensions | thephilomaths/code-runner-as-a-service | python | def register_extensions(app):
with app.app_context():
db.init_app(app=app)
db.create_all()
limiter.init_app(app=app) |
def register_environment():
'Register environment'
dotenv_path = (Path('./') / '.env.development.local')
load_dotenv(dotenv_path=dotenv_path) | 4,229,727,122,486,207,000 | Register environment | code_runner/app.py | register_environment | thephilomaths/code-runner-as-a-service | python | def register_environment():
dotenv_path = (Path('./') / '.env.development.local')
load_dotenv(dotenv_path=dotenv_path) |
def configure_logger(app):
'Configure loggers.'
handler = logging.StreamHandler(sys.stdout)
if (not app.logger.handlers):
app.logger.addHandler(handler) | 3,422,815,523,629,059,000 | Configure loggers. | code_runner/app.py | configure_logger | thephilomaths/code-runner-as-a-service | python | def configure_logger(app):
handler = logging.StreamHandler(sys.stdout)
if (not app.logger.handlers):
app.logger.addHandler(handler) |
def computeLPPTransitMetric(data, mapInfo):
'\n This function takes a data class with light curve info\n and the mapInfo with information about the mapping to use.\n It then returns a lpp metric value.\n '
(binFlux, binPhase) = foldBinLightCurve(data, mapInfo.ntrfr, mapInfo.npts)
(rawTLpp, transformedTransit) = computeRawLPPTransitMetric(binFlux, mapInfo)
normTLpp = periodNormalLPPTransitMetric(rawTLpp, np.array([data.period, data.mes]), mapInfo)
return (normTLpp, rawTLpp, transformedTransit) | 7,073,059,742,202,364,000 | This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value. | lpp/newlpp/lppTransform.py | computeLPPTransitMetric | barentsen/dave | python | def computeLPPTransitMetric(data, mapInfo):
'\n This function takes a data class with light curve info\n and the mapInfo with information about the mapping to use.\n It then returns a lpp metric value.\n '
(binFlux, binPhase) = foldBinLightCurve(data, mapInfo.ntrfr, mapInfo.npts)
(rawTLpp, transformedTransit) = computeRawLPPTransitMetric(binFlux, mapInfo)
normTLpp = periodNormalLPPTransitMetric(rawTLpp, np.array([data.period, data.mes]), mapInfo)
return (normTLpp, rawTLpp, transformedTransit) |
def runningMedian(t, y, dt, runt):
'\n Take a running median of size dt\n Return values at times given in runt\n '
newy = np.zeros(len(y))
newt = np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy = []
for i in range(len(runt)):
tmp = []
for j in range(len(newt)):
if ((newt[j] >= (runt[i] - dt)) and (newt[j] <= (runt[i] + dt))):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))):
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return (list(runt), runy) | -5,922,158,501,723,082,000 | Take a running median of size dt
Return values at times given in runt | lpp/newlpp/lppTransform.py | runningMedian | barentsen/dave | python | def runningMedian(t, y, dt, runt):
'\n Take a running median of size dt\n Return values at times given in runt\n '
newy = np.zeros(len(y))
newt = np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy = []
for i in range(len(runt)):
tmp = []
for j in range(len(newt)):
if ((newt[j] >= (runt[i] - dt)) and (newt[j] <= (runt[i] + dt))):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))):
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return (list(runt), runy) |
def foldBinLightCurve(data, ntrfr, npts):
'\n Fold and bin light curve for input to LPP metric calculation\n \n data contains time, tzero, dur, priod,mes and flux (centered around zero)\n \n ntrfr -- number of transit fraction for binning around transit ~1.5\n npts -- number of points in the final binning.\n \n '
phaselc = np.mod(((data.time - (data.tzero - (0.5 * data.period))) / data.period), 1)
flux = data.flux
mes = data.mes
if ((~ np.isnan(data.dur)) & (data.dur > 0)):
transit_dur = data.dur
else:
transit_dur = ((0.2 * data.period) / 24.0)
transit_fr = ((transit_dur / 24.0) / data.period)
if ((transit_fr * ntrfr) > 0.5):
transit_fr = (0.5 / ntrfr)
binover = 1.3
if (mes <= 20):
binover = (((- (1 / 8.0)) * mes) + 3.8)
endfr = 0.03
midfr = 0.11
a = np.concatenate((np.arange(endfr, (0.5 - midfr), (1 / npts)), np.arange((0.5 + midfr), (1 - endfr), (1 / npts))), axis=None)
ovsamp = 4.0
b_num = 41
b = np.linspace((0.5 - (ntrfr * transit_fr)), (0.5 + (ntrfr * transit_fr)), b_num)
[runta, runya] = runningMedian(phaselc, flux, (binover / npts), a)
[runtb, runyb] = runningMedian(phaselc, flux, ((((binover * ovsamp) * ntrfr) * transit_fr) / npts), b)
runymess = np.array((runya + runyb))
runtmess = np.array((runta + runtb))
srt = np.argsort(runtmess)
runy = runymess[srt]
runt = runtmess[srt]
scale = ((- 1) * np.min(runyb))
if (scale != 0):
scaledFlux = (runy / scale)
else:
scaledFlux = runy
binnedFlux = scaledFlux
phasebins = runt
return (binnedFlux, phasebins) | 281,194,665,893,503,000 | Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning. | lpp/newlpp/lppTransform.py | foldBinLightCurve | barentsen/dave | python | def foldBinLightCurve(data, ntrfr, npts):
'\n Fold and bin light curve for input to LPP metric calculation\n \n data contains time, tzero, dur, priod,mes and flux (centered around zero)\n \n ntrfr -- number of transit fraction for binning around transit ~1.5\n npts -- number of points in the final binning.\n \n '
phaselc = np.mod(((data.time - (data.tzero - (0.5 * data.period))) / data.period), 1)
flux = data.flux
mes = data.mes
if ((~ np.isnan(data.dur)) & (data.dur > 0)):
transit_dur = data.dur
else:
transit_dur = ((0.2 * data.period) / 24.0)
transit_fr = ((transit_dur / 24.0) / data.period)
if ((transit_fr * ntrfr) > 0.5):
transit_fr = (0.5 / ntrfr)
binover = 1.3
if (mes <= 20):
binover = (((- (1 / 8.0)) * mes) + 3.8)
endfr = 0.03
midfr = 0.11
a = np.concatenate((np.arange(endfr, (0.5 - midfr), (1 / npts)), np.arange((0.5 + midfr), (1 - endfr), (1 / npts))), axis=None)
ovsamp = 4.0
b_num = 41
b = np.linspace((0.5 - (ntrfr * transit_fr)), (0.5 + (ntrfr * transit_fr)), b_num)
[runta, runya] = runningMedian(phaselc, flux, (binover / npts), a)
[runtb, runyb] = runningMedian(phaselc, flux, ((((binover * ovsamp) * ntrfr) * transit_fr) / npts), b)
runymess = np.array((runya + runyb))
runtmess = np.array((runta + runtb))
srt = np.argsort(runtmess)
runy = runymess[srt]
runt = runtmess[srt]
scale = ((- 1) * np.min(runyb))
if (scale != 0):
scaledFlux = (runy / scale)
else:
scaledFlux = runy
binnedFlux = scaledFlux
phasebins = runt
return (binnedFlux, phasebins) |
def computeRawLPPTransitMetric(binFlux, mapInfo):
'\n Perform the matrix transformation with LPP\n Do the knn test to get a raw LPP transit metric number.\n '
Yorig = mapInfo.YmapMapped
lpp = LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_ = mapInfo.YmapM
normBinFlux = (binFlux - mapInfo.YmapMean)
inputY = lpp.transform(normBinFlux.reshape(1, (- 1)))
knownTransitsY = Yorig[mapInfo.knnGood, :]
(dist, ind) = knnDistance_fromKnown(knownTransitsY, inputY, mapInfo.knn)
rawLppTrMetric = np.mean(dist)
return (rawLppTrMetric, inputY) | 8,917,899,535,312,045,000 | Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number. | lpp/newlpp/lppTransform.py | computeRawLPPTransitMetric | barentsen/dave | python | def computeRawLPPTransitMetric(binFlux, mapInfo):
'\n Perform the matrix transformation with LPP\n Do the knn test to get a raw LPP transit metric number.\n '
Yorig = mapInfo.YmapMapped
lpp = LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_ = mapInfo.YmapM
normBinFlux = (binFlux - mapInfo.YmapMean)
inputY = lpp.transform(normBinFlux.reshape(1, (- 1)))
knownTransitsY = Yorig[mapInfo.knnGood, :]
(dist, ind) = knnDistance_fromKnown(knownTransitsY, inputY, mapInfo.knn)
rawLppTrMetric = np.mean(dist)
return (rawLppTrMetric, inputY) |
def knnDistance_fromKnown(knownTransits, new, knn):
'\n For a group of known transits and a new one.\n Use knn to determine how close the new one is to the known transits\n using knn minkowski p = 3 ()\n Using scipy signal to do this.\n '
nbrs = NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
(distances, indices) = nbrs.kneighbors(new)
return (distances, indices) | -6,694,463,733,298,679,000 | For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this. | lpp/newlpp/lppTransform.py | knnDistance_fromKnown | barentsen/dave | python | def knnDistance_fromKnown(knownTransits, new, knn):
'\n For a group of known transits and a new one.\n Use knn to determine how close the new one is to the known transits\n using knn minkowski p = 3 ()\n Using scipy signal to do this.\n '
nbrs = NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
(distances, indices) = nbrs.kneighbors(new)
return (distances, indices) |
def periodNormalLPPTransitMetric(rawTLpp, newPerMes, mapInfo):
'\n Normalize the rawTransitMetric value by those with the closest period.\n This part removes the period dependence of the metric at short periods.\n Plus it makes a value near one be the threshold between good and bad.\n \n newPerMes is the np.array([period, mes]) of the new sample\n '
knownTrPeriods = mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes = mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp = mapInfo.dymeans[mapInfo.knnGood]
nPercentil = mapInfo.nPercentil
nPsample = mapInfo.nPsample
logPeriods = np.log10(knownTrPeriods)
logMes = np.log10(knownTrMes)
knownPerMes = np.stack((logPeriods, logMes), axis=(- 1))
np.shape(knownPerMes)
logNew = np.log10(newPerMes).reshape(1, (- 1))
(dist, ind) = knnDistance_fromKnown(knownPerMes, logNew, nPsample)
nearPeriodLpp = knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp, nPercentil)
NormLppTransitMetric = (rawTLpp / LppNPercentile)
return NormLppTransitMetric | -4,829,747,934,316,969,000 | Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample | lpp/newlpp/lppTransform.py | periodNormalLPPTransitMetric | barentsen/dave | python | def periodNormalLPPTransitMetric(rawTLpp, newPerMes, mapInfo):
'\n Normalize the rawTransitMetric value by those with the closest period.\n This part removes the period dependence of the metric at short periods.\n Plus it makes a value near one be the threshold between good and bad.\n \n newPerMes is the np.array([period, mes]) of the new sample\n '
knownTrPeriods = mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes = mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp = mapInfo.dymeans[mapInfo.knnGood]
nPercentil = mapInfo.nPercentil
nPsample = mapInfo.nPsample
logPeriods = np.log10(knownTrPeriods)
logMes = np.log10(knownTrMes)
knownPerMes = np.stack((logPeriods, logMes), axis=(- 1))
np.shape(knownPerMes)
logNew = np.log10(newPerMes).reshape(1, (- 1))
(dist, ind) = knnDistance_fromKnown(knownPerMes, logNew, nPsample)
nearPeriodLpp = knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp, nPercentil)
NormLppTransitMetric = (rawTLpp / LppNPercentile)
return NormLppTransitMetric |
def lpp_onetransit(tcedata, mapInfo, ntransit):
'\n Chop down the full time series to one orbital period.\n Then gather the lpp value for that one transit.\n '
startTime = (tcedata.time[0] + (ntransit * tcedata.period))
endTime = ((tcedata.time[0] + ((ntransit + 1) * tcedata.period)) + (3 / 24.0))
want = ((tcedata.time >= startTime) & (tcedata.time <= endTime))
newtime = tcedata.time[want]
newflux = tcedata.flux[want]
nExpCad = ((tcedata.time[(- 1)] - tcedata.time[0]) / tcedata.period)
if len((newtime > (nExpCad * 0.75))):
onetransit = copy.deepcopy(tcedata)
onetransit.time = newtime
onetransit.flux = newflux
(normTLpp, rawTLpp, transformedTr) = computeLPPTransitMetric(onetransit, mapInfo)
else:
normTLpp = np.nan
rawTLpp = np.nan
return (normTLpp, rawTLpp) | -5,569,252,872,100,213,000 | Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit. | lpp/newlpp/lppTransform.py | lpp_onetransit | barentsen/dave | python | def lpp_onetransit(tcedata, mapInfo, ntransit):
'\n Chop down the full time series to one orbital period.\n Then gather the lpp value for that one transit.\n '
startTime = (tcedata.time[0] + (ntransit * tcedata.period))
endTime = ((tcedata.time[0] + ((ntransit + 1) * tcedata.period)) + (3 / 24.0))
want = ((tcedata.time >= startTime) & (tcedata.time <= endTime))
newtime = tcedata.time[want]
newflux = tcedata.flux[want]
nExpCad = ((tcedata.time[(- 1)] - tcedata.time[0]) / tcedata.period)
if len((newtime > (nExpCad * 0.75))):
onetransit = copy.deepcopy(tcedata)
onetransit.time = newtime
onetransit.flux = newflux
(normTLpp, rawTLpp, transformedTr) = computeLPPTransitMetric(onetransit, mapInfo)
else:
normTLpp = np.nan
rawTLpp = np.nan
return (normTLpp, rawTLpp) |
def lpp_averageIndivTransit(tcedata, mapInfo):
'\n \n Create the loop over individual transits and return \n array normalized lpp values, mean and std.\n Input TCE object and mapInfo object.\n \n It is unclear that this individual transit approach\n separates out several new false positives.\n It probably would require retuning for low SNR signals.\n \n '
length = (tcedata.time[(- 1)] - tcedata.time[0])
ntransits = int(np.floor((length / tcedata.period)))
lppNorms = np.ones(ntransits)
lppRaws = np.ones(ntransits)
nExpCad = ((tcedata.time[(- 1)] - tcedata.time[0]) / tcedata.period)
for i in range(ntransits):
(lppNorms[i], lppRaws[i]) = lpp_onetransit(tcedata, mapInfo, i)
lppMed = np.nanmedian(lppNorms)
lppStd = np.nanstd(lppNorms)
return (lppNorms, lppMed, lppStd, ntransits) | 4,539,365,381,711,080,400 | Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals. | lpp/newlpp/lppTransform.py | lpp_averageIndivTransit | barentsen/dave | python | def lpp_averageIndivTransit(tcedata, mapInfo):
'\n \n Create the loop over individual transits and return \n array normalized lpp values, mean and std.\n Input TCE object and mapInfo object.\n \n It is unclear that this individual transit approach\n separates out several new false positives.\n It probably would require retuning for low SNR signals.\n \n '
length = (tcedata.time[(- 1)] - tcedata.time[0])
ntransits = int(np.floor((length / tcedata.period)))
lppNorms = np.ones(ntransits)
lppRaws = np.ones(ntransits)
nExpCad = ((tcedata.time[(- 1)] - tcedata.time[0]) / tcedata.period)
for i in range(ntransits):
(lppNorms[i], lppRaws[i]) = lpp_onetransit(tcedata, mapInfo, i)
lppMed = np.nanmedian(lppNorms)
lppStd = np.nanstd(lppNorms)
return (lppNorms, lppMed, lppStd, ntransits) |
def get_pkg_details(in_file):
'For the new pkg format, we return the size and hashes of the inner pkg part of the file'
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file)
break
else:
raise ValueError("Don't know what to do with file {}".format(in_file))
return details | -1,385,206,209,404,265,200 | For the new pkg format, we return the size and hashes of the inner pkg part of the file | src/conda_package_handling/api.py | get_pkg_details | katietz/conda-package-handling | python | def get_pkg_details(in_file):
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file)
break
else:
raise ValueError("Don't know what to do with file {}".format(in_file))
return details |
def __init__(self, cfg, vis_highest_scoring=True, output_dir='./vis'):
'\n Args:\n cfg (CfgNode):\n vis_highest_scoring (bool): If set to True visualizes only\n the highest scoring prediction\n '
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device('cpu')
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir | 281,400,471,534,412,000 | Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction | demo/demo.py | __init__ | ishanic/MeshRCNN-keypoints | python | def __init__(self, cfg, vis_highest_scoring=True, output_dir='./vis'):
'\n Args:\n cfg (CfgNode):\n vis_highest_scoring (bool): If set to True visualizes only\n the highest scoring prediction\n '
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device('cpu')
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir |
def run_on_image(self, image, focal_length=10.0):
'\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n focal_length (float): the focal_length of the image\n\n Returns:\n predictions (dict): the output of the model.\n '
predictions = self.predictor(image)
image = image[:, :, ::(- 1)]
imsize = [image.shape[0], image.shape[1]]
focal_length = ((image.shape[1] / 32) * focal_length)
K = [focal_length, (image.shape[1] / 2), (image.shape[0] / 2)]
if ('instances' in predictions):
instances = predictions['instances'].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(verts=[mesh[0] for mesh in instances.pred_meshes], faces=[mesh[1] for mesh in instances.pred_meshes])
pred_dz = (instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1]))
tc = (pred_dz.abs().max() + 1.0)
zranges = torch.stack([torch.stack([(tc - (((tc * pred_dz[i]) / 2.0) / focal_length)), (tc + (((tc * pred_dz[i]) / 2.0) / focal_length))]) for i in range(len(meshes))], dim=0)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(meshes, boxes.tensor, zranges, Ks, imsize)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(det_id, image, boxes.tensor[det_id], labels[det_id], scores[det_id], masks[det_id], meshes[det_id])
return predictions | 7,762,340,422,223,548,000 | Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model. | demo/demo.py | run_on_image | ishanic/MeshRCNN-keypoints | python | def run_on_image(self, image, focal_length=10.0):
'\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n focal_length (float): the focal_length of the image\n\n Returns:\n predictions (dict): the output of the model.\n '
predictions = self.predictor(image)
image = image[:, :, ::(- 1)]
imsize = [image.shape[0], image.shape[1]]
focal_length = ((image.shape[1] / 32) * focal_length)
K = [focal_length, (image.shape[1] / 2), (image.shape[0] / 2)]
if ('instances' in predictions):
instances = predictions['instances'].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(verts=[mesh[0] for mesh in instances.pred_meshes], faces=[mesh[1] for mesh in instances.pred_meshes])
pred_dz = (instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1]))
tc = (pred_dz.abs().max() + 1.0)
zranges = torch.stack([torch.stack([(tc - (((tc * pred_dz[i]) / 2.0) / focal_length)), (tc + (((tc * pred_dz[i]) / 2.0) / focal_length))]) for i in range(len(meshes))], dim=0)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(meshes, boxes.tensor, zranges, Ks, imsize)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(det_id, image, boxes.tensor[det_id], labels[det_id], scores[det_id], masks[det_id], meshes[det_id])
return predictions |
def __init__(self, host, port=9000, schema=hdfs_schema):
' 目前只需要host和port '
self.host = host
self.port = port
self.schema = schema
self._path = '/'
self._status = None | 4,427,913,885,585,468,000 | 目前只需要host和port | hdfshell/cluster.py | __init__ | alingse/hdfshell | python | def __init__(self, host, port=9000, schema=hdfs_schema):
' '
self.host = host
self.port = port
self.schema = schema
self._path = '/'
self._status = None |
@property
def uri_head(self):
' 返回 uri 的 head'
head = (self.schema + '{}:{}'.format(self.host, self.port))
return head | -5,477,964,233,584,933,000 | 返回 uri 的 head | hdfshell/cluster.py | uri_head | alingse/hdfshell | python | @property
def uri_head(self):
' '
head = (self.schema + '{}:{}'.format(self.host, self.port))
return head |
@property
def uri(self):
' 返回当前路径'
_uri = (self.schema + '{}:{}{}'.format(self.host, self.port, self._path))
return _uri | -1,669,485,123,415,073,300 | 返回当前路径 | hdfshell/cluster.py | uri | alingse/hdfshell | python | @property
def uri(self):
' '
_uri = (self.schema + '{}:{}{}'.format(self.host, self.port, self._path))
return _uri |
@click.command(epilog='\x08\nExamples:\n bdt gitlab update-bob -vv\n bdt gitlab update-bob -vv --stable\n')
@click.option('--stable/--beta', help='To use the stable versions in the list and pin packages.')
@verbosity_option()
@bdt.raise_on_error
def update_bob(stable):
'Updates the Bob meta package with new packages.'
import tempfile
from ..ci import read_packages
from ..release import download_path, get_gitlab_instance, get_latest_tag_name
gl = get_gitlab_instance()
nightlies = gl.projects.get('bob/nightlies')
with tempfile.NamedTemporaryFile() as f:
download_path(nightlies, 'order.txt', f.name, ref='master')
packages = read_packages(f.name)
(public_packages, private_packages) = ([], [])
for (n, (package, branch)) in enumerate(packages):
if (package == 'bob/bob'):
continue
use_package = gl.projects.get(package)
is_public = (use_package.attributes['visibility'] == 'public')
if is_public:
public_packages.append(package.replace('bob/', ''))
else:
private_packages.append(package.replace('bob/', ''))
logger.debug('%s is %s', package, ('public' if is_public else 'not public'))
logger.info('Found %d public packages', len(public_packages))
logger.info('The following packages were not public:\n%s', '\n'.join(private_packages))
if stable:
logger.info('Getting latest tag names for the public packages')
tags = [get_latest_tag_name(gl.projects.get(f'bob/{pkg}')) for pkg in public_packages]
public_packages = [f'{pkg} =={tag}' for (pkg, tag) in zip(public_packages, tags)]
logger.info('Updating conda/meta.yaml')
start_tag = '# LIST OF BOB PACKAGES - START'
end_tag = '# LIST OF BOB PACKAGES - END'
with open('conda/meta.yaml') as f:
lines = f.read()
i1 = (lines.find(start_tag) + len(start_tag))
i2 = lines.find(end_tag)
lines = (((lines[:i1] + '\n - '.join(([''] + public_packages))) + '\n ') + lines[i2:])
with open('conda/meta.yaml', 'w') as f:
f.write(lines)
logger.info('Updating requirements.txt')
with open('requirements.txt', 'w') as f:
f.write(('\n'.join(public_packages) + '\n'))
click.echo('You may need to add the ` # [linux]` tag in front of linux only packages in conda/meta.yaml') | -5,205,953,134,817,273,000 | Updates the Bob meta package with new packages. | bob/devtools/scripts/update_bob.py | update_bob | bioidiap/bob.devtools | python | @click.command(epilog='\x08\nExamples:\n bdt gitlab update-bob -vv\n bdt gitlab update-bob -vv --stable\n')
@click.option('--stable/--beta', help='To use the stable versions in the list and pin packages.')
@verbosity_option()
@bdt.raise_on_error
def update_bob(stable):
import tempfile
from ..ci import read_packages
from ..release import download_path, get_gitlab_instance, get_latest_tag_name
gl = get_gitlab_instance()
nightlies = gl.projects.get('bob/nightlies')
with tempfile.NamedTemporaryFile() as f:
download_path(nightlies, 'order.txt', f.name, ref='master')
packages = read_packages(f.name)
(public_packages, private_packages) = ([], [])
for (n, (package, branch)) in enumerate(packages):
if (package == 'bob/bob'):
continue
use_package = gl.projects.get(package)
is_public = (use_package.attributes['visibility'] == 'public')
if is_public:
public_packages.append(package.replace('bob/', ))
else:
private_packages.append(package.replace('bob/', ))
logger.debug('%s is %s', package, ('public' if is_public else 'not public'))
logger.info('Found %d public packages', len(public_packages))
logger.info('The following packages were not public:\n%s', '\n'.join(private_packages))
if stable:
logger.info('Getting latest tag names for the public packages')
tags = [get_latest_tag_name(gl.projects.get(f'bob/{pkg}')) for pkg in public_packages]
public_packages = [f'{pkg} =={tag}' for (pkg, tag) in zip(public_packages, tags)]
logger.info('Updating conda/meta.yaml')
start_tag = '# LIST OF BOB PACKAGES - START'
end_tag = '# LIST OF BOB PACKAGES - END'
with open('conda/meta.yaml') as f:
lines = f.read()
i1 = (lines.find(start_tag) + len(start_tag))
i2 = lines.find(end_tag)
lines = (((lines[:i1] + '\n - '.join(([] + public_packages))) + '\n ') + lines[i2:])
with open('conda/meta.yaml', 'w') as f:
f.write(lines)
logger.info('Updating requirements.txt')
with open('requirements.txt', 'w') as f:
f.write(('\n'.join(public_packages) + '\n'))
click.echo('You may need to add the ` # [linux]` tag in front of linux only packages in conda/meta.yaml') |
def _testUploadFileToItem(self, item, name, user, contents):
'\n Uploads a non-empty file to the server.\n '
resp = self.request(path='/file', method='POST', user=user, params={'parentType': 'item', 'parentId': item['_id'], 'name': name, 'size': len(contents)})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
resp = self.request(path='/file/chunk', method='POST', body=contents, user=user, params={'uploadId': uploadId}, type='application/octet-stream')
self.assertStatusOk(resp) | 7,241,848,628,900,935,000 | Uploads a non-empty file to the server. | tests/cases/item_test.py | _testUploadFileToItem | RemiCecchinato/girder | python | def _testUploadFileToItem(self, item, name, user, contents):
'\n \n '
resp = self.request(path='/file', method='POST', user=user, params={'parentType': 'item', 'parentId': item['_id'], 'name': name, 'size': len(contents)})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
resp = self.request(path='/file/chunk', method='POST', body=contents, user=user, params={'uploadId': uploadId}, type='application/octet-stream')
self.assertStatusOk(resp) |
def _testDownloadSingleFileItem(self, item, user, contents):
'\n Downloads a single-file item from the server\n :param item: The item to download.\n :type item: dict\n :param contents: The expected contents.\n :type contents: str\n '
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'], 'attachment; filename="file_1"')
params = {'contentDisposition': 'inline'}
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False, params=params)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'], 'inline; filename="file_1"')
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False, params={'offset': 1})
self.assertStatus(resp, 206)
self.assertEqual(contents[1:], self.getBody(resp)) | -7,198,199,060,866,721,000 | Downloads a single-file item from the server
:param item: The item to download.
:type item: dict
:param contents: The expected contents.
:type contents: str | tests/cases/item_test.py | _testDownloadSingleFileItem | RemiCecchinato/girder | python | def _testDownloadSingleFileItem(self, item, user, contents):
'\n Downloads a single-file item from the server\n :param item: The item to download.\n :type item: dict\n :param contents: The expected contents.\n :type contents: str\n '
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'], 'attachment; filename="file_1"')
params = {'contentDisposition': 'inline'}
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False, params=params)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'], 'inline; filename="file_1"')
resp = self.request(path=('/item/%s/download' % item['_id']), method='GET', user=user, isJson=False, params={'offset': 1})
self.assertStatus(resp, 206)
self.assertEqual(contents[1:], self.getBody(resp)) |
def testItemCrud(self):
'\n Test Create, Read, Update, and Delete of items.\n '
self.ensureRequiredParams(path='/item', method='POST', required=('folderId',), user=self.users[1])
params = {'name': ' ', 'description': ' a description ', 'folderId': self.publicFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertValidationError(resp, 'name')
params['name'] = ' my item name'
params['folderId'] = self.privateFolder['_id']
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['name'], params['name'].strip())
self.assertEqual(item['description'], params['description'].strip())
params = {'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='GET', user=self.users[1], params=params)
self.assertStatus(resp, 403)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], item['_id'])
self.assertEqual(resp.json['_modelType'], 'item')
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['text'] = 'my item name'
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
del params['folderId']
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['limit'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['offset'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
resp = self.request(path='/item', method='GET', user=self.users[0], params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
params = {'name': 'changed name', 'description': 'new description'}
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', params=params, user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], params['name'])
self.assertEqual(resp.json['description'], params['description'])
item = Item().load(item['_id'], force=True)
self.assertFalse(Item().hasAccess(item))
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', user=self.users[0], params={'folderId': self.publicFolder['_id']})
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
self.assertTrue(Item().hasAccess(item))
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', user=self.users[1], params={'folderId': self.privateFolder['_id']})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith('Write access denied for folder'))
resp = self.request(path='/item/', method='PUT', params=params, user=self.users[0])
self.assertStatus(resp, 400)
resp = self.request(path=('/item/%s/blurgh' % item['_id']), method='GET', user=self.users[1])
self.assertStatus(resp, 400)
resp = self.request(path='/item/', method='DELETE', user=self.users[1])
self.assertStatus(resp, 400)
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.READ, save=True)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='DELETE', user=self.users[1])
self.assertStatus(resp, 403)
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='DELETE', user=self.users[1])
self.assertStatusOk(resp)
item = Item().load(item['_id'])
self.assertEqual(item, None) | 648,732,367,353,630,800 | Test Create, Read, Update, and Delete of items. | tests/cases/item_test.py | testItemCrud | RemiCecchinato/girder | python | def testItemCrud(self):
'\n \n '
self.ensureRequiredParams(path='/item', method='POST', required=('folderId',), user=self.users[1])
params = {'name': ' ', 'description': ' a description ', 'folderId': self.publicFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertValidationError(resp, 'name')
params['name'] = ' my item name'
params['folderId'] = self.privateFolder['_id']
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['name'], params['name'].strip())
self.assertEqual(item['description'], params['description'].strip())
params = {'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='GET', user=self.users[1], params=params)
self.assertStatus(resp, 403)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], item['_id'])
self.assertEqual(resp.json['_modelType'], 'item')
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['text'] = 'my item name'
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
del params['folderId']
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['limit'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['offset'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0], params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
resp = self.request(path='/item', method='GET', user=self.users[0], params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
params = {'name': 'changed name', 'description': 'new description'}
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', params=params, user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], params['name'])
self.assertEqual(resp.json['description'], params['description'])
item = Item().load(item['_id'], force=True)
self.assertFalse(Item().hasAccess(item))
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', user=self.users[0], params={'folderId': self.publicFolder['_id']})
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
self.assertTrue(Item().hasAccess(item))
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path=('/item/%s' % item['_id']), method='PUT', user=self.users[1], params={'folderId': self.privateFolder['_id']})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith('Write access denied for folder'))
resp = self.request(path='/item/', method='PUT', params=params, user=self.users[0])
self.assertStatus(resp, 400)
resp = self.request(path=('/item/%s/blurgh' % item['_id']), method='GET', user=self.users[1])
self.assertStatus(resp, 400)
resp = self.request(path='/item/', method='DELETE', user=self.users[1])
self.assertStatus(resp, 400)
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.READ, save=True)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='DELETE', user=self.users[1])
self.assertStatus(resp, 403)
self.publicFolder = Folder().setUserAccess(self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='DELETE', user=self.users[1])
self.assertStatusOk(resp)
item = Item().load(item['_id'])
self.assertEqual(item, None) |
def testItemMetadataCrud(self):
'\n Test CRUD of metadata.\n '
params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foobar']), type='application/json')
item = resp.json
self.assertStatusOk(resp)
self.assertEqual(item['meta'], {})
metadata = {'foo': 'bar', 'test': 2}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
body = '{"key": {"foo": Infinity}}'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=body, type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Error: "Infinity" is not valid JSON.')
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(item['meta'], ['test'])
metadata['nullVal'] = None
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), params={'allowNull': True}, type='application/json')
item = resp.json
self.assertEqual(item['meta']['nullVal'], None)
del metadata['nullVal']
metadata['other'] = 'macguffin'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['other'], metadata['other'])
self.assertEqual(item['meta']['nullVal'], None)
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['other']), type='application/json')
item = resp.json
self.assertNotHasKeys(item['meta'], ['other'])
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foo', 'foo.bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foo', '$bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.')
metadata = {'test': 'allowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata).replace('"', "'"), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid JSON passed in request body.')
metadata = {'foo.bar': 'notallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
metadata = {'$foobar': 'alsonotallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key $foobar: keys must not start with the "$" character.')
metadata = {'': 'stillnotallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Key names must not be empty.') | -7,451,827,177,975,639,000 | Test CRUD of metadata. | tests/cases/item_test.py | testItemMetadataCrud | RemiCecchinato/girder | python | def testItemMetadataCrud(self):
'\n \n '
params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foobar']), type='application/json')
item = resp.json
self.assertStatusOk(resp)
self.assertEqual(item['meta'], {})
metadata = {'foo': 'bar', 'test': 2}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
body = '{"key": {"foo": Infinity}}'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=body, type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Error: "Infinity" is not valid JSON.')
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(item['meta'], ['test'])
metadata['nullVal'] = None
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), params={'allowNull': True}, type='application/json')
item = resp.json
self.assertEqual(item['meta']['nullVal'], None)
del metadata['nullVal']
metadata['other'] = 'macguffin'
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['other'], metadata['other'])
self.assertEqual(item['meta']['nullVal'], None)
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['other']), type='application/json')
item = resp.json
self.assertNotHasKeys(item['meta'], ['other'])
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foo', 'foo.bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='DELETE', user=self.users[0], body=json.dumps(['foo', '$bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.')
metadata = {'test': 'allowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata).replace('"', "'"), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid JSON passed in request body.')
metadata = {'foo.bar': 'notallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
metadata = {'$foobar': 'alsonotallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid key $foobar: keys must not start with the "$" character.')
metadata = {: 'stillnotallowed'}
resp = self.request(path=('/item/%s/metadata' % item['_id']), method='PUT', user=self.users[0], body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Key names must not be empty.') |
def testItemFiltering(self):
'\n Test filtering private metadata from items.\n '
params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
item['private'] = 'very secret metadata'
item = Item().save(item)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertNotHasKeys(resp.json, ['private']) | -5,949,870,336,204,574,000 | Test filtering private metadata from items. | tests/cases/item_test.py | testItemFiltering | RemiCecchinato/girder | python | def testItemFiltering(self):
'\n \n '
params = {'name': 'item with metadata', 'description': ' a description ', 'folderId': self.privateFolder['_id']}
resp = self.request(path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
item['private'] = 'very secret metadata'
item = Item().save(item)
resp = self.request(path=('/item/%s' % str(item['_id'])), method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertNotHasKeys(resp.json, ['private']) |
def testLazyFieldComputation(self):
'\n Demonstrate that an item that is saved in the database without\n derived fields (like lowerName or baseParentId) get those values\n computed at load() time.\n '
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
del item['lowerName']
del item['baseParentType']
item = Item().save(item, validate=False)
item = Item().find({'_id': item['_id']})[0]
self.assertNotHasKeys(item, ('lowerName', 'baseParentType'))
Item().load(item['_id'], force=True)
item = Item().find({'_id': item['_id']})[0]
self.assertHasKeys(item, ('lowerName', 'baseParentType'))
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentType'], 'user')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder, description=None)
self.assertEqual(item['description'], '')
item['description'] = 1
item = Item().save(item)
item = Item().findOne({'_id': item['_id']})
self.assertEqual(item['description'], '1')
self.assertEqual(item['lowerName'], 'my item name (1)')
del item['lowerName']
item = Item().save(item, validate=False)
item = Item().findOne({'_id': item['_id']})
self.assertNotHasKeys(item, ('lowerName',))
Item().load(item['_id'], force=True)
item = Item().findOne({'_id': item['_id']})
self.assertHasKeys(item, ('lowerName',))
self.assertEqual(item['lowerName'], 'my item name (1)') | 5,206,396,976,131,922,000 | Demonstrate that an item that is saved in the database without
derived fields (like lowerName or baseParentId) get those values
computed at load() time. | tests/cases/item_test.py | testLazyFieldComputation | RemiCecchinato/girder | python | def testLazyFieldComputation(self):
'\n Demonstrate that an item that is saved in the database without\n derived fields (like lowerName or baseParentId) get those values\n computed at load() time.\n '
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
del item['lowerName']
del item['baseParentType']
item = Item().save(item, validate=False)
item = Item().find({'_id': item['_id']})[0]
self.assertNotHasKeys(item, ('lowerName', 'baseParentType'))
Item().load(item['_id'], force=True)
item = Item().find({'_id': item['_id']})[0]
self.assertHasKeys(item, ('lowerName', 'baseParentType'))
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentType'], 'user')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder, description=None)
self.assertEqual(item['description'], )
item['description'] = 1
item = Item().save(item)
item = Item().findOne({'_id': item['_id']})
self.assertEqual(item['description'], '1')
self.assertEqual(item['lowerName'], 'my item name (1)')
del item['lowerName']
item = Item().save(item, validate=False)
item = Item().findOne({'_id': item['_id']})
self.assertNotHasKeys(item, ('lowerName',))
Item().load(item['_id'], force=True)
item = Item().findOne({'_id': item['_id']})
self.assertHasKeys(item, ('lowerName',))
self.assertEqual(item['lowerName'], 'my item name (1)') |
def testParentsToRoot(self):
'\n Demonstrate that forcing parentsToRoot will cause it to skip the\n filtering process.\n '
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
parents = Item().parentsToRoot(item, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Item().parentsToRoot(item)
for parent in parents:
self.assertIn('_accessLevel', parent['object']) | 4,241,321,206,045,767,700 | Demonstrate that forcing parentsToRoot will cause it to skip the
filtering process. | tests/cases/item_test.py | testParentsToRoot | RemiCecchinato/girder | python | def testParentsToRoot(self):
'\n Demonstrate that forcing parentsToRoot will cause it to skip the\n filtering process.\n '
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
parents = Item().parentsToRoot(item, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Item().parentsToRoot(item)
for parent in parents:
self.assertIn('_accessLevel', parent['object']) |
def testCookieAuth(self):
"\n We make sure a cookie is sufficient for authentication for the item\n download endpoint. Also, while we're at it, we make sure it's not\n sufficient for other endpoints.\n "
item = self._createItem(self.privateFolder['_id'], 'cookie_auth_download', '', self.users[0])
self._testUploadFileToItem(item, 'file', self.users[0], 'foo')
token = Token().createToken(self.users[0])
cookie = ('girderToken=%s' % token['_id'])
resp = self.request(path=('/item/%s/download' % item['_id']), isJson=False, cookie=cookie)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'foo')
resp = self.request(path=('/item/%s' % item['_id']), cookie=cookie)
self.assertStatus(resp, 401)
resp = self.request(path=('/item/%s/download' % item['_id']), cookie='girderToken=invalid_token')
self.assertStatus(resp, 401) | -6,878,945,270,122,662,000 | We make sure a cookie is sufficient for authentication for the item
download endpoint. Also, while we're at it, we make sure it's not
sufficient for other endpoints. | tests/cases/item_test.py | testCookieAuth | RemiCecchinato/girder | python | def testCookieAuth(self):
"\n We make sure a cookie is sufficient for authentication for the item\n download endpoint. Also, while we're at it, we make sure it's not\n sufficient for other endpoints.\n "
item = self._createItem(self.privateFolder['_id'], 'cookie_auth_download', , self.users[0])
self._testUploadFileToItem(item, 'file', self.users[0], 'foo')
token = Token().createToken(self.users[0])
cookie = ('girderToken=%s' % token['_id'])
resp = self.request(path=('/item/%s/download' % item['_id']), isJson=False, cookie=cookie)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'foo')
resp = self.request(path=('/item/%s' % item['_id']), cookie=cookie)
self.assertStatus(resp, 401)
resp = self.request(path=('/item/%s/download' % item['_id']), cookie='girderToken=invalid_token')
self.assertStatus(resp, 401) |
def subtract_signal(t, signal, fit_params=3):
'\n\n Returns the subtracted signal\n\n '
coef = np.polynomial.polynomial.polyfit(t, signal, (fit_params - 1))
delta_signal = np.einsum('n,nj->j', coef, np.asarray([np.power(t, n) for n in range(fit_params)]))
ht = (signal - delta_signal)
return ht | -3,028,313,951,607,885,000 | Returns the subtracted signal | src/signals.py | subtract_signal | delos/dm-pta-mc | python | def subtract_signal(t, signal, fit_params=3):
'\n\n \n\n '
coef = np.polynomial.polynomial.polyfit(t, signal, (fit_params - 1))
delta_signal = np.einsum('n,nj->j', coef, np.asarray([np.power(t, n) for n in range(fit_params)]))
ht = (signal - delta_signal)
return ht |
def dphi_dop_chunked(t, profile, r0_vec, v_vec, d_hat, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf):
'\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n '
num_objects = len(list(profile.items())[0][1])
dphi = np.zeros(len(t))
if (use_chunk == True):
if ((num_objects % chunk_size) == 0):
num_chunks = (num_objects // chunk_size)
else:
num_chunks = ((num_objects // chunk_size) + 1)
if verbose:
print((' Chunking data (%d chunks) ... ' % num_chunks))
print()
for i in range(num_chunks):
if (time() > time_end):
raise TimeoutError
r0_c = r0_vec[(i * chunk_size):((i + 1) * chunk_size)]
v_c = v_vec[(i * chunk_size):((i + 1) * chunk_size)]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][(i * chunk_size):((i + 1) * chunk_size)]
dphi += dphi_dop(t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
else:
dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi | -6,609,646,367,769,294,000 | Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory | src/signals.py | dphi_dop_chunked | delos/dm-pta-mc | python | def dphi_dop_chunked(t, profile, r0_vec, v_vec, d_hat, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf):
'\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n '
num_objects = len(list(profile.items())[0][1])
dphi = np.zeros(len(t))
if (use_chunk == True):
if ((num_objects % chunk_size) == 0):
num_chunks = (num_objects // chunk_size)
else:
num_chunks = ((num_objects // chunk_size) + 1)
if verbose:
print((' Chunking data (%d chunks) ... ' % num_chunks))
print()
for i in range(num_chunks):
if (time() > time_end):
raise TimeoutError
r0_c = r0_vec[(i * chunk_size):((i + 1) * chunk_size)]
v_c = v_vec[(i * chunk_size):((i + 1) * chunk_size)]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][(i * chunk_size):((i + 1) * chunk_size)]
dphi += dphi_dop(t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
else:
dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi |
def dphi_dop_chunked_vec(t, profile, r0_vec, v_vec, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf):
'\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n '
num_objects = len(list(profile.items())[0][1])
dphi_vec = np.zeros((len(t), 3))
if (use_chunk == True):
if verbose:
print(' Chunking data ... ')
print()
if ((num_objects % chunk_size) == 0):
num_chunks = (num_objects // chunk_size)
else:
num_chunks = ((num_objects // chunk_size) + 1)
for i in range(num_chunks):
if (time() > time_end):
raise TimeoutError
r0_c = r0_vec[(i * chunk_size):((i + 1) * chunk_size)]
v_c = v_vec[(i * chunk_size):((i + 1) * chunk_size)]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][(i * chunk_size):((i + 1) * chunk_size)]
dphi_vec += dphi_dop_vec(t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
else:
dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi_vec | 2,014,472,338,040,164,900 | Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory | src/signals.py | dphi_dop_chunked_vec | delos/dm-pta-mc | python | def dphi_dop_chunked_vec(t, profile, r0_vec, v_vec, use_form=False, use_chunk=False, chunk_size=10000, verbose=False, form_fun=None, interp_table=None, time_end=np.inf):
'\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n '
num_objects = len(list(profile.items())[0][1])
dphi_vec = np.zeros((len(t), 3))
if (use_chunk == True):
if verbose:
print(' Chunking data ... ')
print()
if ((num_objects % chunk_size) == 0):
num_chunks = (num_objects // chunk_size)
else:
num_chunks = ((num_objects // chunk_size) + 1)
for i in range(num_chunks):
if (time() > time_end):
raise TimeoutError
r0_c = r0_vec[(i * chunk_size):((i + 1) * chunk_size)]
v_c = v_vec[(i * chunk_size):((i + 1) * chunk_size)]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][(i * chunk_size):((i + 1) * chunk_size)]
dphi_vec += dphi_dop_vec(t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
else:
dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi_vec |
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None, interp_table=None):
'\n\n Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.\n Dot with d_hat to get dphi_I\n\n TODO: add use_closest option\n\n '
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum('ij, ij -> i', r0_vec, v_vec)
t0 = ((- r0_v) / np.square(v_mag))
b_vec = (r0_vec + (v_vec * t0[:, np.newaxis]))
b_mag = np.linalg.norm(b_vec, axis=1)
tau = (b_mag / v_mag)
b_hat = (b_vec / b_mag[:, np.newaxis])
v_hat = (v_vec / v_mag[:, np.newaxis])
x = (np.subtract.outer(t, t0) / tau)
x0 = ((- t0) / tau)
prefactor = ((const.yr_to_s * const.GN) / ((const.km_s_to_kpc_yr * const.c_light) * np.square(v_mag)))
if (interp_table is None):
bd_term = ((np.sqrt((1 + (x ** 2))) + x) - (np.sqrt((1 + (x0 ** 2))) + x0))
vd_term = (np.arcsinh(x) - np.arcsinh(x0))
if ('M' in list(profile)):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
rv = (((((3 * profile['M']) / (4 * np.pi)) * (1 / 200)) * (1 / const.rho_crit)) ** (1 / 3))
form_func = np.where((r_cl < rv), form((r_cl / rv), profile['c']), 1)
bd_term *= (prefactor * form_func)
vd_term *= (prefactor * form_func)
else:
bd_term = (prefactor * bd_term)
vd_term = (prefactor * vd_term)
elif (form_fun is not None):
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
bd_term *= (prefactor * form_func)
vd_term *= (prefactor * form_func)
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = (b_mag / profile['rs'])
(bd_term0, vd_term0) = interp_table.bd_vd_terms(x0, y)
y.shape = (1, (- 1))
y = np.broadcast_to(y, x.shape)
(bd_term, vd_term) = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
bd_term *= ((prefactor * profile['rhos']) * (profile['rs'] ** 3))
vd_term *= ((prefactor * profile['rhos']) * (profile['rs'] ** 3))
sig = (np.einsum('to, oi -> ti', bd_term, b_hat) - np.einsum('to, oi -> ti', vd_term, v_hat))
return sig | -1,128,212,848,609,852,800 | Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.
Dot with d_hat to get dphi_I
TODO: add use_closest option | src/signals.py | dphi_dop_vec | delos/dm-pta-mc | python | def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None, interp_table=None):
'\n\n Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.\n Dot with d_hat to get dphi_I\n\n TODO: add use_closest option\n\n '
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum('ij, ij -> i', r0_vec, v_vec)
t0 = ((- r0_v) / np.square(v_mag))
b_vec = (r0_vec + (v_vec * t0[:, np.newaxis]))
b_mag = np.linalg.norm(b_vec, axis=1)
tau = (b_mag / v_mag)
b_hat = (b_vec / b_mag[:, np.newaxis])
v_hat = (v_vec / v_mag[:, np.newaxis])
x = (np.subtract.outer(t, t0) / tau)
x0 = ((- t0) / tau)
prefactor = ((const.yr_to_s * const.GN) / ((const.km_s_to_kpc_yr * const.c_light) * np.square(v_mag)))
if (interp_table is None):
bd_term = ((np.sqrt((1 + (x ** 2))) + x) - (np.sqrt((1 + (x0 ** 2))) + x0))
vd_term = (np.arcsinh(x) - np.arcsinh(x0))
if ('M' in list(profile)):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
rv = (((((3 * profile['M']) / (4 * np.pi)) * (1 / 200)) * (1 / const.rho_crit)) ** (1 / 3))
form_func = np.where((r_cl < rv), form((r_cl / rv), profile['c']), 1)
bd_term *= (prefactor * form_func)
vd_term *= (prefactor * form_func)
else:
bd_term = (prefactor * bd_term)
vd_term = (prefactor * vd_term)
elif (form_fun is not None):
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
bd_term *= (prefactor * form_func)
vd_term *= (prefactor * form_func)
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = (b_mag / profile['rs'])
(bd_term0, vd_term0) = interp_table.bd_vd_terms(x0, y)
y.shape = (1, (- 1))
y = np.broadcast_to(y, x.shape)
(bd_term, vd_term) = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
bd_term *= ((prefactor * profile['rhos']) * (profile['rs'] ** 3))
vd_term *= ((prefactor * profile['rhos']) * (profile['rs'] ** 3))
sig = (np.einsum('to, oi -> ti', bd_term, b_hat) - np.einsum('to, oi -> ti', vd_term, v_hat))
return sig |
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None, interp_table=None):
'\n\n Returns the phase shift due to the Doppler delay for subhalos of mass, mass\n\n TODO: add use_closest option\n\n '
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum('ij, ij -> i', r0_vec, v_vec)
t0 = ((- r0_v) / np.square(v_mag))
b_vec = (r0_vec + (v_vec * t0[:, np.newaxis]))
b_mag = np.linalg.norm(b_vec, axis=1)
tau = (b_mag / v_mag)
b_hat = (b_vec / b_mag[:, np.newaxis])
v_hat = (v_vec / v_mag[:, np.newaxis])
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = (np.subtract.outer(t, t0) / tau)
x0 = ((- t0) / tau)
prefactor = ((const.yr_to_s * const.GN) / ((const.km_s_to_kpc_yr * const.c_light) * np.square(v_mag)))
if (interp_table is None):
bd_term = ((np.sqrt((1 + (x ** 2))) + x) - (np.sqrt((1 + (x0 ** 2))) + x0))
vd_term = (np.arcsinh(x) - np.arcsinh(x0))
sig = ((bd_term * b_d) - (vd_term * v_d))
if ('M' in list(profile)):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
rv = (((((3 * profile['M']) / (4 * np.pi)) * (1 / 200)) * (1 / const.rho_crit)) ** (1 / 3))
form_func = np.where((r_cl < rv), form((r_cl / rv), profile['c']), 1)
sig = (form_func * sig)
elif (form_fun is not None):
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = (form_func * sig)
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = (b_mag / profile['rs'])
(bd_term0, vd_term0) = interp_table.bd_vd_terms(x0, y)
y.shape = (1, (- 1))
y = np.broadcast_to(y, x.shape)
(bd_term, vd_term) = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = ((profile['rhos'] * (profile['rs'] ** 3)) * ((bd_term * b_d) + (vd_term * v_d)))
sig = (prefactor * sig)
return np.sum(sig, axis=(- 1)) | 1,391,388,544,967,027,700 | Returns the phase shift due to the Doppler delay for subhalos of mass, mass
TODO: add use_closest option | src/signals.py | dphi_dop | delos/dm-pta-mc | python | def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None, interp_table=None):
'\n\n Returns the phase shift due to the Doppler delay for subhalos of mass, mass\n\n TODO: add use_closest option\n\n '
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum('ij, ij -> i', r0_vec, v_vec)
t0 = ((- r0_v) / np.square(v_mag))
b_vec = (r0_vec + (v_vec * t0[:, np.newaxis]))
b_mag = np.linalg.norm(b_vec, axis=1)
tau = (b_mag / v_mag)
b_hat = (b_vec / b_mag[:, np.newaxis])
v_hat = (v_vec / v_mag[:, np.newaxis])
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = (np.subtract.outer(t, t0) / tau)
x0 = ((- t0) / tau)
prefactor = ((const.yr_to_s * const.GN) / ((const.km_s_to_kpc_yr * const.c_light) * np.square(v_mag)))
if (interp_table is None):
bd_term = ((np.sqrt((1 + (x ** 2))) + x) - (np.sqrt((1 + (x0 ** 2))) + x0))
vd_term = (np.arcsinh(x) - np.arcsinh(x0))
sig = ((bd_term * b_d) - (vd_term * v_d))
if ('M' in list(profile)):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
rv = (((((3 * profile['M']) / (4 * np.pi)) * (1 / 200)) * (1 / const.rho_crit)) ** (1 / 3))
form_func = np.where((r_cl < rv), form((r_cl / rv), profile['c']), 1)
sig = (form_func * sig)
elif (form_fun is not None):
t_cl = np.maximum(np.minimum(t0, t[(- 1)]), 0)
x_cl = ((t_cl - t0) / tau)
r_cl = ((tau * v_mag) * np.sqrt((1 + (x_cl ** 2))))
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = (form_func * sig)
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = (b_mag / profile['rs'])
(bd_term0, vd_term0) = interp_table.bd_vd_terms(x0, y)
y.shape = (1, (- 1))
y = np.broadcast_to(y, x.shape)
(bd_term, vd_term) = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = ((profile['rhos'] * (profile['rs'] ** 3)) * ((bd_term * b_d) + (vd_term * v_d)))
sig = (prefactor * sig)
return np.sum(sig, axis=(- 1)) |
@property
def inserted(self):
'Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement\n\n MySQL\'s ON DUPLICATE KEY UPDATE clause allows reference to the row\n that would be inserted, via a special function called ``VALUES()``.\n This attribute provides all columns in this row to be referenceable\n such that they will render within a ``VALUES()`` function inside the\n ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``\n so as not to conflict with the existing\n :meth:`_expression.Insert.values` method.\n\n .. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance\n of :class:`_expression.ColumnCollection`, which provides an\n interface the same as that of the :attr:`_schema.Table.c`\n collection described at :ref:`metadata_tables_and_columns`.\n With this collection, ordinary names are accessible like attributes\n (e.g. ``stmt.inserted.some_column``), but special names and\n dictionary method names should be accessed using indexed access,\n such as ``stmt.inserted["column name"]`` or\n ``stmt.inserted["values"]``. See the docstring for\n :class:`_expression.ColumnCollection` for further examples.\n\n .. seealso::\n\n :ref:`mysql_insert_on_duplicate_key_update` - example of how\n to use :attr:`_expression.Insert.inserted`\n\n '
return self.inserted_alias.columns | -8,385,649,932,417,646,000 | Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
that would be inserted, via a special function called ``VALUES()``.
This attribute provides all columns in this row to be referenceable
such that they will render within a ``VALUES()`` function inside the
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
so as not to conflict with the existing
:meth:`_expression.Insert.values` method.
.. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance
of :class:`_expression.ColumnCollection`, which provides an
interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.inserted.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.inserted["column name"]`` or
``stmt.inserted["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` - example of how
to use :attr:`_expression.Insert.inserted` | virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py | inserted | Ag-nes/Blog | python | @property
def inserted(self):
'Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement\n\n MySQL\'s ON DUPLICATE KEY UPDATE clause allows reference to the row\n that would be inserted, via a special function called ``VALUES()``.\n This attribute provides all columns in this row to be referenceable\n such that they will render within a ``VALUES()`` function inside the\n ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``\n so as not to conflict with the existing\n :meth:`_expression.Insert.values` method.\n\n .. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance\n of :class:`_expression.ColumnCollection`, which provides an\n interface the same as that of the :attr:`_schema.Table.c`\n collection described at :ref:`metadata_tables_and_columns`.\n With this collection, ordinary names are accessible like attributes\n (e.g. ``stmt.inserted.some_column``), but special names and\n dictionary method names should be accessed using indexed access,\n such as ``stmt.inserted["column name"]`` or\n ``stmt.inserted["values"]``. See the docstring for\n :class:`_expression.ColumnCollection` for further examples.\n\n .. seealso::\n\n :ref:`mysql_insert_on_duplicate_key_update` - example of how\n to use :attr:`_expression.Insert.inserted`\n\n '
return self.inserted_alias.columns |
@_generative
@_exclusive_against('_post_values_clause', msgs={'_post_values_clause': 'This Insert construct already has an ON DUPLICATE KEY clause present'})
def on_duplicate_key_update(self, *args, **kw):
'\n Specifies the ON DUPLICATE KEY UPDATE clause.\n\n :param \\**kw: Column keys linked to UPDATE values. The\n values may be any SQL expression or supported literal Python\n values.\n\n .. warning:: This dictionary does **not** take into account\n Python-specified default UPDATE values or generation functions,\n e.g. those specified using :paramref:`_schema.Column.onupdate`.\n These values will not be exercised for an ON DUPLICATE KEY UPDATE\n style of UPDATE, unless values are manually specified here.\n\n :param \\*args: As an alternative to passing key/value parameters,\n a dictionary or list of 2-tuples can be passed as a single positional\n argument.\n\n Passing a single dictionary is equivalent to the keyword argument\n form::\n\n insert().on_duplicate_key_update({"name": "some name"})\n\n Passing a list of 2-tuples indicates that the parameter assignments\n in the UPDATE clause should be ordered as sent, in a manner similar\n to that described for the :class:`_expression.Update`\n construct overall\n in :ref:`updates_order_parameters`::\n\n insert().on_duplicate_key_update(\n [("name", "some name"), ("value", "some value")])\n\n .. versionchanged:: 1.3 parameters can be specified as a dictionary\n or list of 2-tuples; the latter form provides for parameter\n ordering.\n\n\n .. versionadded:: 1.2\n\n .. seealso::\n\n :ref:`mysql_insert_on_duplicate_key_update`\n\n '
if (args and kw):
raise exc.ArgumentError("Can't pass kwargs and positional arguments simultaneously")
if args:
if (len(args) > 1):
raise exc.ArgumentError('Only a single dictionary or list of tuples is accepted positionally.')
values = args[0]
else:
values = kw
inserted_alias = getattr(self, 'inserted_alias', None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values) | 7,189,407,818,811,196,000 | Specifies the ON DUPLICATE KEY UPDATE clause.
:param \**kw: Column keys linked to UPDATE values. The
values may be any SQL expression or supported literal Python
values.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY UPDATE
style of UPDATE, unless values are manually specified here.
:param \*args: As an alternative to passing key/value parameters,
a dictionary or list of 2-tuples can be passed as a single positional
argument.
Passing a single dictionary is equivalent to the keyword argument
form::
insert().on_duplicate_key_update({"name": "some name"})
Passing a list of 2-tuples indicates that the parameter assignments
in the UPDATE clause should be ordered as sent, in a manner similar
to that described for the :class:`_expression.Update`
construct overall
in :ref:`updates_order_parameters`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
ordering.
.. versionadded:: 1.2
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` | virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py | on_duplicate_key_update | Ag-nes/Blog | python | @_generative
@_exclusive_against('_post_values_clause', msgs={'_post_values_clause': 'This Insert construct already has an ON DUPLICATE KEY clause present'})
def on_duplicate_key_update(self, *args, **kw):
'\n Specifies the ON DUPLICATE KEY UPDATE clause.\n\n :param \\**kw: Column keys linked to UPDATE values. The\n values may be any SQL expression or supported literal Python\n values.\n\n .. warning:: This dictionary does **not** take into account\n Python-specified default UPDATE values or generation functions,\n e.g. those specified using :paramref:`_schema.Column.onupdate`.\n These values will not be exercised for an ON DUPLICATE KEY UPDATE\n style of UPDATE, unless values are manually specified here.\n\n :param \\*args: As an alternative to passing key/value parameters,\n a dictionary or list of 2-tuples can be passed as a single positional\n argument.\n\n Passing a single dictionary is equivalent to the keyword argument\n form::\n\n insert().on_duplicate_key_update({"name": "some name"})\n\n Passing a list of 2-tuples indicates that the parameter assignments\n in the UPDATE clause should be ordered as sent, in a manner similar\n to that described for the :class:`_expression.Update`\n construct overall\n in :ref:`updates_order_parameters`::\n\n insert().on_duplicate_key_update(\n [("name", "some name"), ("value", "some value")])\n\n .. versionchanged:: 1.3 parameters can be specified as a dictionary\n or list of 2-tuples; the latter form provides for parameter\n ordering.\n\n\n .. versionadded:: 1.2\n\n .. seealso::\n\n :ref:`mysql_insert_on_duplicate_key_update`\n\n '
if (args and kw):
raise exc.ArgumentError("Can't pass kwargs and positional arguments simultaneously")
if args:
if (len(args) > 1):
raise exc.ArgumentError('Only a single dictionary or list of tuples is accepted positionally.')
values = args[0]
else:
values = kw
inserted_alias = getattr(self, 'inserted_alias', None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values) |
def _compare_text_filters(self, first: TextFilter, second: TextFilter):
'\n\n :param first: TextFilter\n :param second: TextFilter\n :return: bool\n '
self.assertEqual(str(first.x), str(second.x))
self.assertEqual(str(first.y), str(second.y))
self.assertEqual(first.text, second.text)
self.assertEqual(first.timecode, second.timecode)
self.assertEqual(first.shadowY, second.shadowX)
self.assertEqual(first.shadowX, second.shadowX)
self.assertEqual(first.shadowColor, second.shadowColor)
self.assertEqual(first.alpha, second.alpha)
self.assertEqual(first.fontSize, second.fontSize)
self.assertEqual(first.font, second.font)
self.assertEqual(first.fontColor, second.fontColor)
self.assertEqual(first.fixBounds, second.fixBounds)
self.assertEqual(first.borderWidth, second.borderWidth)
self.assertEqual(first.lineSpacing, second.lineSpacing)
self.assertEqual(first.boxColor, second.boxColor)
self.assertEqual(first.boxBorderWidth, second.boxBorderWidth)
self.assertEqual(first.box, second.box)
self.assertEqual(first.description, second.description)
self.assertEqual(first.name, second.name)
return True | 2,669,470,008,177,207,300 | :param first: TextFilter
:param second: TextFilter
:return: bool | tests/bitmovin/services/filters/text_filter_tests.py | _compare_text_filters | bitmovin/bitmovin-python | python | def _compare_text_filters(self, first: TextFilter, second: TextFilter):
'\n\n :param first: TextFilter\n :param second: TextFilter\n :return: bool\n '
self.assertEqual(str(first.x), str(second.x))
self.assertEqual(str(first.y), str(second.y))
self.assertEqual(first.text, second.text)
self.assertEqual(first.timecode, second.timecode)
self.assertEqual(first.shadowY, second.shadowX)
self.assertEqual(first.shadowX, second.shadowX)
self.assertEqual(first.shadowColor, second.shadowColor)
self.assertEqual(first.alpha, second.alpha)
self.assertEqual(first.fontSize, second.fontSize)
self.assertEqual(first.font, second.font)
self.assertEqual(first.fontColor, second.fontColor)
self.assertEqual(first.fixBounds, second.fixBounds)
self.assertEqual(first.borderWidth, second.borderWidth)
self.assertEqual(first.lineSpacing, second.lineSpacing)
self.assertEqual(first.boxColor, second.boxColor)
self.assertEqual(first.boxBorderWidth, second.boxBorderWidth)
self.assertEqual(first.box, second.box)
self.assertEqual(first.description, second.description)
self.assertEqual(first.name, second.name)
return True |
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
'\n Shows what keys are cached\n '
if (isNone(path) and isNone(learn)):
print('path and learn cannot be None at the same time')
return
elif isNone(path):
path = learn.path
name = f'{model_name}_part_dep'
folder = 'cache'
path = (path / folder)
if (not Path(f'{(path / name)}.pkl').exists()):
print(f'No chache file')
else:
f = open((path / f'{name}.pkl'), 'rb')
var = load(f)
f.close()
for k in var.keys():
print(k) | 208,750,910,187,950,180 | Shows what keys are cached | fastinference/tabular/pd.py | what_cached | floleuerer/fastinference | python | @classmethod
def what_cached(self, model_name: str, path=None, learn=None):
'\n \n '
if (isNone(path) and isNone(learn)):
print('path and learn cannot be None at the same time')
return
elif isNone(path):
path = learn.path
name = f'{model_name}_part_dep'
folder = 'cache'
path = (path / folder)
if (not Path(f'{(path / name)}.pkl').exists()):
print(f'No chache file')
else:
f = open((path / f'{name}.pkl'), 'rb')
var = load(f)
f.close()
for k in var.keys():
print(k) |
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
'\n deletes the cache file\n '
if (isNone(path) and isNone(learn)):
print('path and learn cannot be None at the same time')
return
elif isNone(path):
path = learn.path
name = f'{model_name}_part_dep'
folder = 'cache'
path = (path / folder)
files = (Path(f'{(path / name)}.pkl'), Path((path / 'pd_interm.pkl')))
for file in files:
if (not file.exists()):
print(f'No chache file {file}')
else:
file.unlink() | 5,048,076,303,994,254,000 | deletes the cache file | fastinference/tabular/pd.py | empty_cache | floleuerer/fastinference | python | @classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
'\n \n '
if (isNone(path) and isNone(learn)):
print('path and learn cannot be None at the same time')
return
elif isNone(path):
path = learn.path
name = f'{model_name}_part_dep'
folder = 'cache'
path = (path / folder)
files = (Path(f'{(path / name)}.pkl'), Path((path / 'pd_interm.pkl')))
for file in files:
if (not file.exists()):
print(f'No chache file {file}')
else:
file.unlink() |
def _cont_into_buckets(self, df_init, CONT_COLS):
"\n Categorical values can be easily distiguished one from another\n But that doesn't work with continious values, we have to divede it's\n values into buckets and then use all values in a bucket as a single value\n that avarages the bucket. This way we convert cont feture into pseudo categorical\n and are able to apply partial dependense analysis to it\n "
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for (x, y) in zip(edges[:], edges[1:]):
df.loc[(((df[col] > x) & (df[col] < y)), col)] = ((x + y) / 2)
return df | 2,281,857,222,200,926,000 | Categorical values can be easily distiguished one from another
But that doesn't work with continious values, we have to divede it's
values into buckets and then use all values in a bucket as a single value
that avarages the bucket. This way we convert cont feture into pseudo categorical
and are able to apply partial dependense analysis to it | fastinference/tabular/pd.py | _cont_into_buckets | floleuerer/fastinference | python | def _cont_into_buckets(self, df_init, CONT_COLS):
"\n Categorical values can be easily distiguished one from another\n But that doesn't work with continious values, we have to divede it's\n values into buckets and then use all values in a bucket as a single value\n that avarages the bucket. This way we convert cont feture into pseudo categorical\n and are able to apply partial dependense analysis to it\n "
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for (x, y) in zip(edges[:], edges[1:]):
df.loc[(((df[col] > x) & (df[col] < y)), col)] = ((x + y) / 2)
return df |
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
"\n This function outputs threshold to number of occurrences different variants of list of columns (fields)\n In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%\n of the least used\n If coef is more 1.0, then 'coef' itself is used as threshold\n "
if (coef > 1):
return math.ceil(coef)
coef = (0.0 if (coef < 0) else coef)
occs = df.groupby(fields).size().reset_index(name='Times').sort_values(['Times'], ascending=False)
num = math.ceil((coef * len(occs)))
if (num <= 0):
return (occs.iloc[0]['Times'] + 1)
else:
return occs.iloc[(num - 1)]['Times'] | 5,400,113,811,807,088,000 | This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold | fastinference/tabular/pd.py | _get_field_uniq_x_coef | floleuerer/fastinference | python | def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
"\n This function outputs threshold to number of occurrences different variants of list of columns (fields)\n In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%\n of the least used\n If coef is more 1.0, then 'coef' itself is used as threshold\n "
if (coef > 1):
return math.ceil(coef)
coef = (0.0 if (coef < 0) else coef)
occs = df.groupby(fields).size().reset_index(name='Times').sort_values(['Times'], ascending=False)
num = math.ceil((coef * len(occs)))
if (num <= 0):
return (occs.iloc[0]['Times'] + 1)
else:
return occs.iloc[(num - 1)]['Times'] |
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
"\n Function calculate partial dependency for column in fields.\n Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.\n For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity\n (it's values are substitute as a pair, not as separate values)\n coef is useful when we don't want to deal with all the variants, but only with most common\n "
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
(coef, is_sorted, use_log, use_int) = (self.coef, self.is_sorted, self.use_log, self.use_int)
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST)
occs = df.groupby(fields).size().reset_index(name='Times').sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan)
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan)
occs = occs[(occs['Times'] >= field_min_occ)]
df_copy = df.merge(occs[fields]).copy()
frame = []
ln = len(occs)
if (ln > 0):
for (_, row) in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = (np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds))
preds = (int(preds) if (use_int == True) else preds)
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
out = pd.DataFrame(frame, columns=(fields + [dep_name, 'times']))
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out | 2,957,460,990,702,026,000 | Function calculate partial dependency for column in fields.
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common | fastinference/tabular/pd.py | _get_part_dep_one | floleuerer/fastinference | python | def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
"\n Function calculate partial dependency for column in fields.\n Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.\n For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity\n (it's values are substitute as a pair, not as separate values)\n coef is useful when we don't want to deal with all the variants, but only with most common\n "
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
(coef, is_sorted, use_log, use_int) = (self.coef, self.is_sorted, self.use_log, self.use_int)
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST)
occs = df.groupby(fields).size().reset_index(name='Times').sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan)
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan)
occs = occs[(occs['Times'] >= field_min_occ)]
df_copy = df.merge(occs[fields]).copy()
frame = []
ln = len(occs)
if (ln > 0):
for (_, row) in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = (np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds))
preds = (int(preds) if (use_int == True) else preds)
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
out = pd.DataFrame(frame, columns=(fields + [dep_name, 'times']))
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out |
def _get_part_dep(self):
'\n Makes a datafreme with partial dependencies for every pair of columns in fields\n '
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
if (is_continue == True):
if Path((cache_path / 'pd_interm.pkl')).exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): 'value'})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path((cache_path / 'pd_interm.pkl')).exists():
Path((cache_path / 'pd_interm.pkl')).unlink()
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result | -488,242,411,160,210,560 | Makes a datafreme with partial dependencies for every pair of columns in fields | fastinference/tabular/pd.py | _get_part_dep | floleuerer/fastinference | python | def _get_part_dep(self):
'\n \n '
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
if (is_continue == True):
if Path((cache_path / 'pd_interm.pkl')).exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): 'value'})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path((cache_path / 'pd_interm.pkl')).exists():
Path((cache_path / 'pd_interm.pkl')).unlink()
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result |
def _save_cached(self):
'\n Saves calculated PartDep df into path.\n Can be saved more than one with as an dict with fields as key\n '
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key((self.fields + [self.coef]))
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path) | 7,031,681,797,881,425,000 | Saves calculated PartDep df into path.
Can be saved more than one with as an dict with fields as key | fastinference/tabular/pd.py | _save_cached | floleuerer/fastinference | python | def _save_cached(self):
'\n Saves calculated PartDep df into path.\n Can be saved more than one with as an dict with fields as key\n '
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key((self.fields + [self.coef]))
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path) |
def _load_cached(self):
'\n Load calculated PartDep df if hash exist.\n '
name = self.save_name
path = self.cache_path
if (not Path(f'{(path / name)}.pkl').exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key((self.fields + [self.coef]))
if (key not in ld_dict):
return None
return ld_dict[key] | -5,927,804,199,348,323,000 | Load calculated PartDep df if hash exist. | fastinference/tabular/pd.py | _load_cached | floleuerer/fastinference | python | def _load_cached(self):
'\n \n '
name = self.save_name
path = self.cache_path
if (not Path(f'{(path / name)}.pkl').exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key((self.fields + [self.coef]))
if (key not in ld_dict):
return None
return ld_dict[key] |
def _load_or_calculate(self):
'\n Calculates part dep or load it from cache if possible\n '
if ((self.is_use_cache == False) or isNone(self._load_cached())):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached() | 4,629,582,466,019,069,000 | Calculates part dep or load it from cache if possible | fastinference/tabular/pd.py | _load_or_calculate | floleuerer/fastinference | python | def _load_or_calculate(self):
'\n \n '
if ((self.is_use_cache == False) or isNone(self._load_cached())):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached() |
def plot_raw(self, field, sample=1.0):
'\n Plot dependency graph from data itself\n field must be list of exactly one feature\n sample is a coef to len(df). Lower if kernel use to shut down on that\n '
df = self.df
df = df.sample(int((len(df) * sample)))
field = field[0]
dep_var = (f'{self._get_dep_var()}_orig' if (self.use_log == True) else self._get_dep_var())
return (ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess')) | -5,214,618,900,920,584,000 | Plot dependency graph from data itself
field must be list of exactly one feature
sample is a coef to len(df). Lower if kernel use to shut down on that | fastinference/tabular/pd.py | plot_raw | floleuerer/fastinference | python | def plot_raw(self, field, sample=1.0):
'\n Plot dependency graph from data itself\n field must be list of exactly one feature\n sample is a coef to len(df). Lower if kernel use to shut down on that\n '
df = self.df
df = df.sample(int((len(df) * sample)))
field = field[0]
dep_var = (f'{self._get_dep_var()}_orig' if (self.use_log == True) else self._get_dep_var())
return (ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess')) |
def plot_model(self, field, strict_recalc=False, sample=1.0):
'\n Plot dependency graph from the model.\n It also take into account times, so plot becomes much more resilient, cause not every value treats as equal\n (more occurences means more power)\n field must be list of exactly one feature\n strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway\n sample is a coef to len(df). Lower if kernel use to shut down on that\n '
cached = self.get_pd(feature=self._list_to_key(field))
if ((strict_recalc == False) and isNotNone(cached)):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f'{self._get_dep_var()}'
rearr = []
for (var, fee, times) in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int((len(rearr) * sample)))
return (ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess')) | 1,374,911,187,912,990,700 | Plot dependency graph from the model.
It also take into account times, so plot becomes much more resilient, cause not every value treats as equal
(more occurences means more power)
field must be list of exactly one feature
strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway
sample is a coef to len(df). Lower if kernel use to shut down on that | fastinference/tabular/pd.py | plot_model | floleuerer/fastinference | python | def plot_model(self, field, strict_recalc=False, sample=1.0):
'\n Plot dependency graph from the model.\n It also take into account times, so plot becomes much more resilient, cause not every value treats as equal\n (more occurences means more power)\n field must be list of exactly one feature\n strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway\n sample is a coef to len(df). Lower if kernel use to shut down on that\n '
cached = self.get_pd(feature=self._list_to_key(field))
if ((strict_recalc == False) and isNotNone(cached)):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f'{self._get_dep_var()}'
rearr = []
for (var, fee, times) in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int((len(rearr) * sample)))
return (ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess')) |
def get_pd(self, feature, min_tm=1):
'\n Gets particular feature subtable from the whole one (min times is optional parameter)\n '
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f'(feature == "{feature}") and (times > {min_tm})')
return self._general2partial(df=df) | 8,928,306,377,913,288,000 | Gets particular feature subtable from the whole one (min times is optional parameter) | fastinference/tabular/pd.py | get_pd | floleuerer/fastinference | python | def get_pd(self, feature, min_tm=1):
'\n \n '
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f'(feature == "{feature}") and (times > {min_tm})')
return self._general2partial(df=df) |
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
'\n Transforms whole features table to get_part_dep_one output table format\n '
def get_xth_el(str_list: str, indexes: list):
lst = (str_list if is_listy(str_list) else ast.literal_eval(str_list))
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}') | -4,872,721,693,105,727,000 | Transforms whole features table to get_part_dep_one output table format | fastinference/tabular/pd.py | get_pd_main_chained_feat | floleuerer/fastinference | python | def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
'\n \n '
def get_xth_el(str_list: str, indexes: list):
lst = (str_list if is_listy(str_list) else ast.literal_eval(str_list))
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}') |
def plot_part_dep(self, fields, limit=20, asc=False):
'\n Plots partial dependency plot for sublist of connected `fields`\n `fields` must be sublist of `fields` given on initalization calculation\n '
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = (heat_max - heat_min)
colors = [(((times - heat_min) / dif), ((times - heat_min) / (4 * dif)), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=(not asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x='value', y=dep_var, sort_columns=True, figsize=(10, 10), color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if self.is_biclassification:
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0, 0), (0, (- 30)), xycoords='axes fraction', textcoords='offset points', va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), (p.get_y() * 1.005)))
ax.annotate(f'{int(t)}', ((p.get_width() * 0.45), (p.get_y() + 0.1)), color='white', weight='bold') | 8,386,247,731,553,893,000 | Plots partial dependency plot for sublist of connected `fields`
`fields` must be sublist of `fields` given on initalization calculation | fastinference/tabular/pd.py | plot_part_dep | floleuerer/fastinference | python | def plot_part_dep(self, fields, limit=20, asc=False):
'\n Plots partial dependency plot for sublist of connected `fields`\n `fields` must be sublist of `fields` given on initalization calculation\n '
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = (heat_max - heat_min)
colors = [(((times - heat_min) / dif), ((times - heat_min) / (4 * dif)), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=(not asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x='value', y=dep_var, sort_columns=True, figsize=(10, 10), color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if self.is_biclassification:
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0, 0), (0, (- 30)), xycoords='axes fraction', textcoords='offset points', va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), (p.get_y() * 1.005)))
ax.annotate(f'{int(t)}', ((p.get_width() * 0.45), (p.get_y() + 0.1)), color='white', weight='bold') |
def _parse_content(response):
'parse the response body as JSON, raise on errors'
if (response.status_code != 200):
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if (not result['ok']):
raise ApiError(f"{result['error']}: {result.get('detail')}")
return result | -553,374,406,510,625,340 | parse the response body as JSON, raise on errors | examples/slack/query.py | _parse_content | ariebovenberg/snug | python | def _parse_content(response):
if (response.status_code != 200):
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if (not result['ok']):
raise ApiError(f"{result['error']}: {result.get('detail')}")
return result |
def paginated_retrieval(methodname, itemtype):
'decorator factory for retrieval queries from query params'
return compose(reusable, basic_interaction, map_yield(partial(_params_as_get, methodname))) | -6,033,415,841,283,409,000 | decorator factory for retrieval queries from query params | examples/slack/query.py | paginated_retrieval | ariebovenberg/snug | python | def paginated_retrieval(methodname, itemtype):
return compose(reusable, basic_interaction, map_yield(partial(_params_as_get, methodname))) |
def json_post(methodname, rtype, key):
'decorator factory for json POST queries'
return compose(reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield) | 4,652,402,797,051,923,000 | decorator factory for json POST queries | examples/slack/query.py | json_post | ariebovenberg/snug | python | def json_post(methodname, rtype, key):
return compose(reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield) |
def retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4):
"\n **Target Assign Layer for the detector RetinaNet.**\n\n This OP finds out positive and negative samples from all anchors\n for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,\n and assigns target labels for classification along with target locations for\n regression to each sample, then takes out the part belonging to positive and\n negative samples from category prediction( :attr:`cls_logits`) and location\n prediction( :attr:`bbox_pred`) which belong to all anchors.\n\n The searching principles for positive and negative samples are as followed:\n\n 1. Anchors are assigned to ground-truth boxes when it has the highest IoU\n overlap with a ground-truth box.\n\n 2. Anchors are assigned to ground-truth boxes when it has an IoU overlap\n higher than :attr:`positive_overlap` with any ground-truth box.\n\n 3. Anchors are assigned to background when its IoU overlap is lower than\n :attr:`negative_overlap` for all ground-truth boxes.\n\n 4. Anchors which do not meet the above conditions do not participate in\n the training process.\n\n Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box\n regression for each anchor, hence the target label for each positive(or negative)\n sample is a :math:`C`-vector and the target locations for each positive sample\n is a 4-vector. As for a positive sample, if the category of its assigned\n ground-truth box is class :math:`i`, the corresponding entry in its length\n :math:`C` label vector is set to 1 and all other entries is set to 0, its box\n regression targets are computed as the offset between itself and its assigned\n ground-truth box. As for a negative sample, all entries in its length :math:`C`\n label vector are set to 0 and box regression targets are omitted because\n negative samples do not participate in the training process of location\n regression.\n\n After the assignment, the part belonging to positive and negative samples is\n taken out from category prediction( :attr:`cls_logits` ), and the part\n belonging to positive samples is taken out from location\n prediction( :attr:`bbox_pred` ).\n\n Args:\n bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents\n the predicted locations of all anchors. :math:`N` is the batch size( the\n number of images in a mini-batch), :math:`M` is the number of all anchors\n of one image, and each anchor has 4 coordinate values. The data type of\n :attr:`bbox_pred` is float32 or float64.\n cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents\n the predicted categories of all anchors. :math:`N` is the batch size,\n :math:`M` is the number of all anchors of one image, and :math:`C` is\n the number of categories (**Notice: excluding background**). The data type\n of :attr:`cls_logits` is float32 or float64.\n anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents\n the locations of all anchors. :math:`M` is the number of all anchors of\n one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,\n :math:`[xmin, ymin]` is the left top coordinate of the anchor box,\n :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.\n The data type of :attr:`anchor_box` is float32 or float64. Please refer\n to the OP :ref:`api_fluid_layers_anchor_generator` \n for the generation of :attr:`anchor_box`.\n anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded \n factors of anchor locations used in loss function. :math:`M` is number of\n all anchors of one image, each anchor possesses a 4-vector expanded factor.\n The data type of :attr:`anchor_var` is float32 or float64. Please refer\n to the OP :ref:`api_fluid_layers_anchor_generator`\n for the generation of :attr:`anchor_var`.\n gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents\n locations of all ground-truth boxes. :math:`G` is the total number of\n all ground-truth boxes in a mini-batch, and each ground-truth box has 4\n coordinate values. The data type of :attr:`gt_boxes` is float32 or\n float64.\n gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents\n categories of all ground-truth boxes, and the values are in the range of\n :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes\n in a mini-batch, and each ground-truth box has one category. The data type\n of :attr:`gt_labels` is int32.\n is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which\n indicates whether a ground-truth box is a crowd. If the value is 1, the\n corresponding box is a crowd, it is ignored during training. :math:`G` is\n the total number of all ground-truth boxes in a mini-batch. The data type\n of :attr:`is_crowd` is int32.\n im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size\n information of input images. :math:`N` is the batch size, the size\n information of each image is a 3-vector which are the height and width\n of the network input along with the factor scaling the origin image to\n the network input. The data type of :attr:`im_info` is float32.\n num_classes(int32): The number of categories for classification, the default\n value is 1.\n positive_overlap(float32): Minimum overlap required between an anchor\n and ground-truth box for the anchor to be a positive sample, the default\n value is 0.5.\n negative_overlap(float32): Maximum overlap allowed between an anchor\n and ground-truth box for the anchor to be a negative sample, the default\n value is 0.4. :attr:`negative_overlap` should be less than or equal to\n :attr:`positive_overlap`, if not, the actual value of\n :attr:`positive_overlap` is :attr:`negative_overlap`.\n\n Returns:\n A tuple with 6 Variables:\n \n **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents\n category prediction belonging to positive and negative samples. :math:`F`\n is the number of positive samples in a mini-batch, :math:`B` is the number\n of negative samples, and :math:`C` is the number of categories\n (**Notice: excluding background**). The data type of :attr:`predict_scores`\n is float32 or float64.\n\n **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents\n location prediction belonging to positive samples. :math:`F` is the number\n of positive samples. :math:`F` is the number of positive samples, and each\n sample has 4 coordinate values. The data type of :attr:`predict_location`\n is float32 or float64.\n\n **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents\n target labels for classification belonging to positive and negative\n samples. :math:`F` is the number of positive samples, :math:`B` is the\n number of negative, and each sample has one target category. The data type\n of :attr:`target_label` is int32.\n\n **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents\n target locations for box regression belonging to positive samples.\n :math:`F` is the number of positive samples, and each sample has 4\n coordinate values. The data type of :attr:`target_bbox` is float32 or\n float64.\n\n **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`\n represents whether a positive sample is fake positive, if a positive\n sample is false positive, the corresponding entries in\n :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number\n of total positive samples in a mini-batch, and each sample has 4\n coordinate values. The data type of :attr:`bbox_inside_weight` is float32\n or float64.\n\n **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number\n of positive samples. :math:`N` is the batch size. **Notice: The number\n of positive samples is used as the denominator of later loss function,\n to avoid the condition that the denominator is zero, this OP has added 1\n to the actual number of positive samples of each image.** The data type of\n :attr:`fg_num` is int32.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],\n dtype='float32')\n cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],\n dtype='float32')\n anchor_box = fluid.data(name='anchor_box', shape=[100, 4],\n dtype='float32')\n anchor_var = fluid.data(name='anchor_var', shape=[100, 4],\n dtype='float32')\n gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],\n dtype='float32')\n gt_labels = fluid.data(name='gt_labels', shape=[10, 1],\n dtype='int32')\n is_crowd = fluid.data(name='is_crowd', shape=[1],\n dtype='int32')\n im_info = fluid.data(name='im_info', shape=[1, 3],\n dtype='float32')\n score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\\n fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,\n anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)\n\n "
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'], 'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], 'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], 'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(type='retinanet_target_assign', inputs={'Anchor': anchor_box, 'GtBoxes': gt_boxes, 'GtLabels': gt_labels, 'IsCrowd': is_crowd, 'ImInfo': im_info}, outputs={'LocationIndex': loc_index, 'ScoreIndex': score_index, 'TargetLabel': target_label, 'TargetBBox': target_bbox, 'BBoxInsideWeight': bbox_inside_weight, 'ForegroundNumber': fg_num}, attrs={'positive_overlap': positive_overlap, 'negative_overlap': negative_overlap})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=((- 1), num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=((- 1), 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return (predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num) | 4,884,496,934,939,049,000 | **Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10) | python/paddle/fluid/layers/detection.py | retinanet_target_assign | 92lqllearning/Paddle | python | def retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4):
"\n **Target Assign Layer for the detector RetinaNet.**\n\n This OP finds out positive and negative samples from all anchors\n for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,\n and assigns target labels for classification along with target locations for\n regression to each sample, then takes out the part belonging to positive and\n negative samples from category prediction( :attr:`cls_logits`) and location\n prediction( :attr:`bbox_pred`) which belong to all anchors.\n\n The searching principles for positive and negative samples are as followed:\n\n 1. Anchors are assigned to ground-truth boxes when it has the highest IoU\n overlap with a ground-truth box.\n\n 2. Anchors are assigned to ground-truth boxes when it has an IoU overlap\n higher than :attr:`positive_overlap` with any ground-truth box.\n\n 3. Anchors are assigned to background when its IoU overlap is lower than\n :attr:`negative_overlap` for all ground-truth boxes.\n\n 4. Anchors which do not meet the above conditions do not participate in\n the training process.\n\n Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box\n regression for each anchor, hence the target label for each positive(or negative)\n sample is a :math:`C`-vector and the target locations for each positive sample\n is a 4-vector. As for a positive sample, if the category of its assigned\n ground-truth box is class :math:`i`, the corresponding entry in its length\n :math:`C` label vector is set to 1 and all other entries is set to 0, its box\n regression targets are computed as the offset between itself and its assigned\n ground-truth box. As for a negative sample, all entries in its length :math:`C`\n label vector are set to 0 and box regression targets are omitted because\n negative samples do not participate in the training process of location\n regression.\n\n After the assignment, the part belonging to positive and negative samples is\n taken out from category prediction( :attr:`cls_logits` ), and the part\n belonging to positive samples is taken out from location\n prediction( :attr:`bbox_pred` ).\n\n Args:\n bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents\n the predicted locations of all anchors. :math:`N` is the batch size( the\n number of images in a mini-batch), :math:`M` is the number of all anchors\n of one image, and each anchor has 4 coordinate values. The data type of\n :attr:`bbox_pred` is float32 or float64.\n cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents\n the predicted categories of all anchors. :math:`N` is the batch size,\n :math:`M` is the number of all anchors of one image, and :math:`C` is\n the number of categories (**Notice: excluding background**). The data type\n of :attr:`cls_logits` is float32 or float64.\n anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents\n the locations of all anchors. :math:`M` is the number of all anchors of\n one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,\n :math:`[xmin, ymin]` is the left top coordinate of the anchor box,\n :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.\n The data type of :attr:`anchor_box` is float32 or float64. Please refer\n to the OP :ref:`api_fluid_layers_anchor_generator` \n for the generation of :attr:`anchor_box`.\n anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded \n factors of anchor locations used in loss function. :math:`M` is number of\n all anchors of one image, each anchor possesses a 4-vector expanded factor.\n The data type of :attr:`anchor_var` is float32 or float64. Please refer\n to the OP :ref:`api_fluid_layers_anchor_generator`\n for the generation of :attr:`anchor_var`.\n gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents\n locations of all ground-truth boxes. :math:`G` is the total number of\n all ground-truth boxes in a mini-batch, and each ground-truth box has 4\n coordinate values. The data type of :attr:`gt_boxes` is float32 or\n float64.\n gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents\n categories of all ground-truth boxes, and the values are in the range of\n :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes\n in a mini-batch, and each ground-truth box has one category. The data type\n of :attr:`gt_labels` is int32.\n is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which\n indicates whether a ground-truth box is a crowd. If the value is 1, the\n corresponding box is a crowd, it is ignored during training. :math:`G` is\n the total number of all ground-truth boxes in a mini-batch. The data type\n of :attr:`is_crowd` is int32.\n im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size\n information of input images. :math:`N` is the batch size, the size\n information of each image is a 3-vector which are the height and width\n of the network input along with the factor scaling the origin image to\n the network input. The data type of :attr:`im_info` is float32.\n num_classes(int32): The number of categories for classification, the default\n value is 1.\n positive_overlap(float32): Minimum overlap required between an anchor\n and ground-truth box for the anchor to be a positive sample, the default\n value is 0.5.\n negative_overlap(float32): Maximum overlap allowed between an anchor\n and ground-truth box for the anchor to be a negative sample, the default\n value is 0.4. :attr:`negative_overlap` should be less than or equal to\n :attr:`positive_overlap`, if not, the actual value of\n :attr:`positive_overlap` is :attr:`negative_overlap`.\n\n Returns:\n A tuple with 6 Variables:\n \n **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents\n category prediction belonging to positive and negative samples. :math:`F`\n is the number of positive samples in a mini-batch, :math:`B` is the number\n of negative samples, and :math:`C` is the number of categories\n (**Notice: excluding background**). The data type of :attr:`predict_scores`\n is float32 or float64.\n\n **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents\n location prediction belonging to positive samples. :math:`F` is the number\n of positive samples. :math:`F` is the number of positive samples, and each\n sample has 4 coordinate values. The data type of :attr:`predict_location`\n is float32 or float64.\n\n **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents\n target labels for classification belonging to positive and negative\n samples. :math:`F` is the number of positive samples, :math:`B` is the\n number of negative, and each sample has one target category. The data type\n of :attr:`target_label` is int32.\n\n **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents\n target locations for box regression belonging to positive samples.\n :math:`F` is the number of positive samples, and each sample has 4\n coordinate values. The data type of :attr:`target_bbox` is float32 or\n float64.\n\n **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`\n represents whether a positive sample is fake positive, if a positive\n sample is false positive, the corresponding entries in\n :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number\n of total positive samples in a mini-batch, and each sample has 4\n coordinate values. The data type of :attr:`bbox_inside_weight` is float32\n or float64.\n\n **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number\n of positive samples. :math:`N` is the batch size. **Notice: The number\n of positive samples is used as the denominator of later loss function,\n to avoid the condition that the denominator is zero, this OP has added 1\n to the actual number of positive samples of each image.** The data type of\n :attr:`fg_num` is int32.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],\n dtype='float32')\n cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],\n dtype='float32')\n anchor_box = fluid.data(name='anchor_box', shape=[100, 4],\n dtype='float32')\n anchor_var = fluid.data(name='anchor_var', shape=[100, 4],\n dtype='float32')\n gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],\n dtype='float32')\n gt_labels = fluid.data(name='gt_labels', shape=[10, 1],\n dtype='int32')\n is_crowd = fluid.data(name='is_crowd', shape=[1],\n dtype='int32')\n im_info = fluid.data(name='im_info', shape=[1, 3],\n dtype='float32')\n score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\\n fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,\n anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)\n\n "
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], 'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'], 'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], 'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], 'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(type='retinanet_target_assign', inputs={'Anchor': anchor_box, 'GtBoxes': gt_boxes, 'GtLabels': gt_labels, 'IsCrowd': is_crowd, 'ImInfo': im_info}, outputs={'LocationIndex': loc_index, 'ScoreIndex': score_index, 'TargetLabel': target_label, 'TargetBBox': target_bbox, 'BBoxInsideWeight': bbox_inside_weight, 'ForegroundNumber': fg_num}, attrs={'positive_overlap': positive_overlap, 'negative_overlap': negative_overlap})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=((- 1), num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=((- 1), 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return (predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num) |
def rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True):
"\n **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**\n\n This layer can be, for given the Intersection-over-Union (IoU) overlap\n between anchors and ground truth boxes, to assign classification and\n regression targets to each each anchor, these target labels are used for\n train RPN. The classification targets is a binary class label (of being\n an object or not). Following the paper of Faster-RCNN, the positive labels\n are two kinds of anchors: (i) the anchor/anchors with the highest IoU\n overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap\n higher than rpn_positive_overlap(0.7) with any ground-truth box. Note\n that a single ground-truth box may assign positive labels to multiple\n anchors. A non-positive anchor is when its IoU ratio is lower than\n rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are\n neither positive nor negative do not contribute to the training objective.\n The regression targets are the encoded ground-truth boxes associated with\n the positive anchors.\n\n Args:\n bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the\n predicted locations of M bounding bboxes. N is the batch size,\n and each bounding box has four coordinate values and the layout\n is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.\n cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the\n predicted confidence predictions. N is the batch size, 1 is the\n frontground and background sigmoid, M is number of bounding boxes.\n The data type can be float32 or float64.\n anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,\n each box is represented as [xmin, ymin, xmax, ymax],\n [xmin, ymin] is the left top coordinate of the anchor box,\n if the input is image feature map, they are close to the origin\n of the coordinate system. [xmax, ymax] is the right bottom\n coordinate of the anchor box. The data type can be float32 or float64.\n anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded \n variances of anchors. The data type can be float32 or float64.\n gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D\n LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth\n bboxes of mini-batch input. The data type can be float32 or float64.\n is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.\n The data type must be int32.\n im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,\n 3 is the height, width and scale.\n rpn_batch_size_per_im(int): Total number of RPN examples per image.\n The data type must be int32.\n rpn_straddle_thresh(float): Remove RPN anchors that go outside the image\n by straddle_thresh pixels. The data type must be float32.\n rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled\n foreground (i.e. class > 0), 0-th class is background. The data type must be float32.\n rpn_positive_overlap(float): Minimum overlap required between an anchor\n and ground-truth box for the (anchor, gt box) pair to be a positive\n example. The data type must be float32.\n rpn_negative_overlap(float): Maximum overlap allowed between an anchor\n and ground-truth box for the (anchor, gt box) pair to be a negative\n examples. The data type must be float32.\n\n Returns:\n tuple:\n A tuple(predicted_scores, predicted_location, target_label,\n target_bbox, bbox_inside_weight) is returned. The predicted_scores \n and predicted_location is the predicted result of the RPN.\n The target_label and target_bbox is the ground truth,\n respectively. The predicted_location is a 2D Tensor with shape\n [F, 4], and the shape of target_bbox is same as the shape of\n the predicted_location, F is the number of the foreground\n anchors. The predicted_scores is a 2D Tensor with shape\n [F + B, 1], and the shape of target_label is same as the shape\n of the predicted_scores, B is the number of the background\n anchors, the F and B is depends on the input of this operator.\n Bbox_inside_weight represents whether the predicted loc is fake_fg\n or not and the shape is [F, 4].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')\n cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')\n anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')\n anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')\n gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')\n is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')\n im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')\n loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(\n bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)\n\n "
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], 'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], 'rpn_target_assign')
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
helper.append_op(type='rpn_target_assign', inputs={'Anchor': anchor_box, 'GtBoxes': gt_boxes, 'IsCrowd': is_crowd, 'ImInfo': im_info}, outputs={'LocationIndex': loc_index, 'ScoreIndex': score_index, 'TargetLabel': target_label, 'TargetBBox': target_bbox, 'BBoxInsideWeight': bbox_inside_weight}, attrs={'rpn_batch_size_per_im': rpn_batch_size_per_im, 'rpn_straddle_thresh': rpn_straddle_thresh, 'rpn_positive_overlap': rpn_positive_overlap, 'rpn_negative_overlap': rpn_negative_overlap, 'rpn_fg_fraction': rpn_fg_fraction, 'use_random': use_random})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=((- 1), 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=((- 1), 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return (predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight) | -5,902,719,678,806,247,000 | **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info) | python/paddle/fluid/layers/detection.py | rpn_target_assign | 92lqllearning/Paddle | python | def rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True):
"\n **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**\n\n This layer can be, for given the Intersection-over-Union (IoU) overlap\n between anchors and ground truth boxes, to assign classification and\n regression targets to each each anchor, these target labels are used for\n train RPN. The classification targets is a binary class label (of being\n an object or not). Following the paper of Faster-RCNN, the positive labels\n are two kinds of anchors: (i) the anchor/anchors with the highest IoU\n overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap\n higher than rpn_positive_overlap(0.7) with any ground-truth box. Note\n that a single ground-truth box may assign positive labels to multiple\n anchors. A non-positive anchor is when its IoU ratio is lower than\n rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are\n neither positive nor negative do not contribute to the training objective.\n The regression targets are the encoded ground-truth boxes associated with\n the positive anchors.\n\n Args:\n bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the\n predicted locations of M bounding bboxes. N is the batch size,\n and each bounding box has four coordinate values and the layout\n is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.\n cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the\n predicted confidence predictions. N is the batch size, 1 is the\n frontground and background sigmoid, M is number of bounding boxes.\n The data type can be float32 or float64.\n anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,\n each box is represented as [xmin, ymin, xmax, ymax],\n [xmin, ymin] is the left top coordinate of the anchor box,\n if the input is image feature map, they are close to the origin\n of the coordinate system. [xmax, ymax] is the right bottom\n coordinate of the anchor box. The data type can be float32 or float64.\n anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded \n variances of anchors. The data type can be float32 or float64.\n gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D\n LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth\n bboxes of mini-batch input. The data type can be float32 or float64.\n is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.\n The data type must be int32.\n im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,\n 3 is the height, width and scale.\n rpn_batch_size_per_im(int): Total number of RPN examples per image.\n The data type must be int32.\n rpn_straddle_thresh(float): Remove RPN anchors that go outside the image\n by straddle_thresh pixels. The data type must be float32.\n rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled\n foreground (i.e. class > 0), 0-th class is background. The data type must be float32.\n rpn_positive_overlap(float): Minimum overlap required between an anchor\n and ground-truth box for the (anchor, gt box) pair to be a positive\n example. The data type must be float32.\n rpn_negative_overlap(float): Maximum overlap allowed between an anchor\n and ground-truth box for the (anchor, gt box) pair to be a negative\n examples. The data type must be float32.\n\n Returns:\n tuple:\n A tuple(predicted_scores, predicted_location, target_label,\n target_bbox, bbox_inside_weight) is returned. The predicted_scores \n and predicted_location is the predicted result of the RPN.\n The target_label and target_bbox is the ground truth,\n respectively. The predicted_location is a 2D Tensor with shape\n [F, 4], and the shape of target_bbox is same as the shape of\n the predicted_location, F is the number of the foreground\n anchors. The predicted_scores is a 2D Tensor with shape\n [F + B, 1], and the shape of target_label is same as the shape\n of the predicted_scores, B is the number of the background\n anchors, the F and B is depends on the input of this operator.\n Bbox_inside_weight represents whether the predicted loc is fake_fg\n or not and the shape is [F, 4].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')\n cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')\n anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')\n anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')\n gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')\n is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')\n im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')\n loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(\n bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)\n\n "
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], 'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], 'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], 'rpn_target_assign')
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(dtype=anchor_box.dtype)
helper.append_op(type='rpn_target_assign', inputs={'Anchor': anchor_box, 'GtBoxes': gt_boxes, 'IsCrowd': is_crowd, 'ImInfo': im_info}, outputs={'LocationIndex': loc_index, 'ScoreIndex': score_index, 'TargetLabel': target_label, 'TargetBBox': target_bbox, 'BBoxInsideWeight': bbox_inside_weight}, attrs={'rpn_batch_size_per_im': rpn_batch_size_per_im, 'rpn_straddle_thresh': rpn_straddle_thresh, 'rpn_positive_overlap': rpn_positive_overlap, 'rpn_negative_overlap': rpn_negative_overlap, 'rpn_fg_fraction': rpn_fg_fraction, 'use_random': use_random})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=((- 1), 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=((- 1), 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return (predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.