body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def initialize(self, run_tracker, start_time=None):
'Initialize with the given RunTracker.\n\n TODO: See `RunTracker.start`.\n '
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, errfile=errfile, log_level=Report.INFO, color=False, indent=True, timing=False, cache_stats=False, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO, html_dir=html_dir, template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
run_tracker.start(report, start_time) | 6,642,605,218,831,490,000 | Initialize with the given RunTracker.
TODO: See `RunTracker.start`. | src/python/pants/reporting/reporting.py | initialize | GoingTharn/pants | python | def initialize(self, run_tracker, start_time=None):
'Initialize with the given RunTracker.\n\n TODO: See `RunTracker.start`.\n '
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, errfile=errfile, log_level=Report.INFO, color=False, indent=True, timing=False, cache_stats=False, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO, html_dir=html_dir, template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
run_tracker.start(report, start_time) |
def update_reporting(self, global_options, is_quiet, run_tracker):
"Updates reporting config once we've parsed cmd-line flags."
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string((global_options.level or 'info'))
color = (global_options.colors and (os.getenv('TERM') != 'dumb'))
timing = global_options.time
cache_stats = global_options.time
if is_quiet:
console_reporter = QuietReporter(run_tracker, QuietReporter.Settings(log_level=log_level, color=color, timing=timing, cache_stats=cache_stats))
else:
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr, color=color, indent=True, timing=timing, cache_stats=cache_stats, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile, color=False, indent=True, timing=True, cache_stats=True, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report | 6,257,123,446,702,456,000 | Updates reporting config once we've parsed cmd-line flags. | src/python/pants/reporting/reporting.py | update_reporting | GoingTharn/pants | python | def update_reporting(self, global_options, is_quiet, run_tracker):
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string((global_options.level or 'info'))
color = (global_options.colors and (os.getenv('TERM') != 'dumb'))
timing = global_options.time
cache_stats = global_options.time
if is_quiet:
console_reporter = QuietReporter(run_tracker, QuietReporter.Settings(log_level=log_level, color=color, timing=timing, cache_stats=cache_stats))
else:
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr, color=color, indent=True, timing=timing, cache_stats=cache_stats, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile, color=False, indent=True, timing=True, cache_stats=True, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report |
@asyncio.coroutine
def test_extract_from_service_available_device(hass):
'Test the extraction of entity from service and device is available.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2', available=False), MockEntity(name='test_3'), MockEntity(name='test_4', available=False)]))
call_1 = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_3'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call_1)))))
call_2 = ha.ServiceCall('test', 'service', data={'entity_id': ['test_domain.test_3', 'test_domain.test_4']})
assert (['test_domain.test_3'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call_2))))) | 8,222,202,552,732,189,000 | Test the extraction of entity from service and device is available. | tests/helpers/test_entity_component.py | test_extract_from_service_available_device | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_extract_from_service_available_device(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2', available=False), MockEntity(name='test_3'), MockEntity(name='test_4', available=False)]))
call_1 = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_3'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call_1)))))
call_2 = ha.ServiceCall('test', 'service', data={'entity_id': ['test_domain.test_3', 'test_domain.test_4']})
assert (['test_domain.test_3'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call_2))))) |
@asyncio.coroutine
def test_platform_not_ready(hass):
'Test that we retry when platform not ready.'
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None])
loader.set_component(hass, 'mod1', MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain', MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_setup({DOMAIN: {'platform': 'mod1'}}))
assert (len(platform1_setup.mock_calls) == 1)
assert ('test_domain.mod1' not in hass.config.components)
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
async_fire_time_changed(hass, (utcnow + timedelta(seconds=29)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 1)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=30)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 2)
assert ('test_domain.mod1' not in hass.config.components)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=59)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 2)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=60)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 3)
assert ('test_domain.mod1' in hass.config.components) | -842,300,693,004,235,100 | Test that we retry when platform not ready. | tests/helpers/test_entity_component.py | test_platform_not_ready | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_platform_not_ready(hass):
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None])
loader.set_component(hass, 'mod1', MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain', MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_setup({DOMAIN: {'platform': 'mod1'}}))
assert (len(platform1_setup.mock_calls) == 1)
assert ('test_domain.mod1' not in hass.config.components)
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
async_fire_time_changed(hass, (utcnow + timedelta(seconds=29)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 1)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=30)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 2)
assert ('test_domain.mod1' not in hass.config.components)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=59)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 2)
async_fire_time_changed(hass, (utcnow + timedelta(seconds=60)))
(yield from hass.async_block_till_done())
assert (len(platform1_setup.mock_calls) == 3)
assert ('test_domain.mod1' in hass.config.components) |
@asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
'Test the extraction of everything from service.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call))))) | 6,420,445,789,876,699,000 | Test the extraction of everything from service. | tests/helpers/test_entity_component.py | test_extract_from_service_returns_all_if_no_entity_id | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (yield from component.async_extract_from_service(call))))) |
@asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
'Test the extraction of non existing entities from service.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service', {'entity_id': ['test_domain.test_2', 'test_domain.non_exist']})
assert (['test_domain.test_2'] == [ent.entity_id for ent in (yield from component.async_extract_from_service(call))]) | 4,302,766,359,275,721,000 | Test the extraction of non existing entities from service. | tests/helpers/test_entity_component.py | test_extract_from_service_filter_out_non_existing_entities | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service', {'entity_id': ['test_domain.test_2', 'test_domain.non_exist']})
assert (['test_domain.test_2'] == [ent.entity_id for ent in (yield from component.async_extract_from_service(call))]) |
@asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
'Test not expanding a group.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = (yield from group.Group.async_create_group(hass, 'test_group', ['light.Ceiling', 'light.Kitchen']))
(yield from component.async_add_entities([test_group]))
call = ha.ServiceCall('test', 'service', {'entity_id': ['group.test_group']})
extracted = (yield from component.async_extract_from_service(call, expand_group=False))
assert (extracted == [test_group]) | 7,651,423,936,688,272,000 | Test not expanding a group. | tests/helpers/test_entity_component.py | test_extract_from_service_no_group_expand | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = (yield from group.Group.async_create_group(hass, 'test_group', ['light.Ceiling', 'light.Kitchen']))
(yield from component.async_add_entities([test_group]))
call = ha.ServiceCall('test', 'service', {'entity_id': ['group.test_group']})
extracted = (yield from component.async_extract_from_service(call, expand_group=False))
assert (extracted == [test_group]) |
@asyncio.coroutine
def test_setup_dependencies_platform(hass):
"Test we setup the dependencies of a platform.\n\n We're explictely testing that we process dependencies even if a component\n with the same name has already been loaded.\n "
loader.set_component(hass, 'test_component', MockModule('test_component', dependencies=['test_component2']))
loader.set_component(hass, 'test_component2', MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_setup({DOMAIN: {'platform': 'test_component'}}))
assert ('test_component' in hass.config.components)
assert ('test_component2' in hass.config.components)
assert ('test_domain.test_component' in hass.config.components) | 2,688,575,218,466,561,000 | Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded. | tests/helpers/test_entity_component.py | test_setup_dependencies_platform | BobbyBleacher/home-assistant | python | @asyncio.coroutine
def test_setup_dependencies_platform(hass):
"Test we setup the dependencies of a platform.\n\n We're explictely testing that we process dependencies even if a component\n with the same name has already been loaded.\n "
loader.set_component(hass, 'test_component', MockModule('test_component', dependencies=['test_component2']))
loader.set_component(hass, 'test_component2', MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
(yield from component.async_setup({DOMAIN: {'platform': 'test_component'}}))
assert ('test_component' in hass.config.components)
assert ('test_component2' in hass.config.components)
assert ('test_domain.test_component' in hass.config.components) |
async def test_setup_entry(hass):
'Test setup entry calls async_setup_entry on platform.'
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
assert (len(mock_setup_entry.mock_calls) == 1)
(p_hass, p_entry, p_add_entities) = mock_setup_entry.mock_calls[0][1]
assert (p_hass is hass)
assert (p_entry is entry)
assert (component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5)) | -6,554,110,248,055,908,000 | Test setup entry calls async_setup_entry on platform. | tests/helpers/test_entity_component.py | test_setup_entry | BobbyBleacher/home-assistant | python | async def test_setup_entry(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
assert (len(mock_setup_entry.mock_calls) == 1)
(p_hass, p_entry, p_add_entities) = mock_setup_entry.mock_calls[0][1]
assert (p_hass is hass)
assert (p_entry is entry)
assert (component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5)) |
async def test_setup_entry_platform_not_exist(hass):
'Test setup entry fails if platform doesnt exist.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert ((await component.async_setup_entry(entry)) is False) | 6,092,304,173,295,340,000 | Test setup entry fails if platform doesnt exist. | tests/helpers/test_entity_component.py | test_setup_entry_platform_not_exist | BobbyBleacher/home-assistant | python | async def test_setup_entry_platform_not_exist(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert ((await component.async_setup_entry(entry)) is False) |
async def test_setup_entry_fails_duplicate(hass):
"Test we don't allow setting up a config entry twice."
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
with pytest.raises(ValueError):
(await component.async_setup_entry(entry)) | 4,654,525,383,403,044,000 | Test we don't allow setting up a config entry twice. | tests/helpers/test_entity_component.py | test_setup_entry_fails_duplicate | BobbyBleacher/home-assistant | python | async def test_setup_entry_fails_duplicate(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
with pytest.raises(ValueError):
(await component.async_setup_entry(entry)) |
async def test_unload_entry_resets_platform(hass):
'Test unloading an entry removes all entities.'
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
assert (len(mock_setup_entry.mock_calls) == 1)
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 1)
assert (await component.async_unload_entry(entry))
assert (len(hass.states.async_entity_ids()) == 0) | -7,203,035,027,081,088,000 | Test unloading an entry removes all entities. | tests/helpers/test_entity_component.py | test_unload_entry_resets_platform | BobbyBleacher/home-assistant | python | async def test_unload_entry_resets_platform(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(hass, 'test_domain.entry_domain', MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert (await component.async_setup_entry(entry))
assert (len(mock_setup_entry.mock_calls) == 1)
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 1)
assert (await component.async_unload_entry(entry))
assert (len(hass.states.async_entity_ids()) == 0) |
async def test_unload_entry_fails_if_never_loaded(hass):
'.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
(await component.async_unload_entry(entry)) | 6,201,961,440,986,574,000 | . | tests/helpers/test_entity_component.py | test_unload_entry_fails_if_never_loaded | BobbyBleacher/home-assistant | python | async def test_unload_entry_fails_if_never_loaded(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytestraises(ValueError):
(await componentasync_unload_entry(entry)) |
async def test_update_entity(hass):
'Test that we can update an entity with the helper.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
(await component.async_add_entities([entity]))
assert (len(entity.async_update_ha_state.mock_calls) == 1)
(await hass.helpers.entity_component.async_update_entity(entity.entity_id))
assert (len(entity.async_update_ha_state.mock_calls) == 2)
assert (entity.async_update_ha_state.mock_calls[(- 1)][1][0] is True) | -6,916,889,164,922,576,000 | Test that we can update an entity with the helper. | tests/helpers/test_entity_component.py | test_update_entity | BobbyBleacher/home-assistant | python | async def test_update_entity(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
(await component.async_add_entities([entity]))
assert (len(entity.async_update_ha_state.mock_calls) == 1)
(await hass.helpers.entity_component.async_update_entity(entity.entity_id))
assert (len(entity.async_update_ha_state.mock_calls) == 2)
assert (entity.async_update_ha_state.mock_calls[(- 1)][1][0] is True) |
async def test_set_service_race(hass):
'Test race condition on setting service.'
exception = False
def async_loop_exception_handler(_, _2) -> None:
'Handle all exception inside the core loop.'
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
(await async_setup_component(hass, 'group', {}))
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
(await hass.async_block_till_done())
assert (not exception) | -4,838,710,095,732,210,000 | Test race condition on setting service. | tests/helpers/test_entity_component.py | test_set_service_race | BobbyBleacher/home-assistant | python | async def test_set_service_race(hass):
exception = False
def async_loop_exception_handler(_, _2) -> None:
'Handle all exception inside the core loop.'
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
(await async_setup_component(hass, 'group', {}))
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
(await hass.async_block_till_done())
assert (not exception) |
async def test_extract_all_omit_entity_id(hass, caplog):
'Test extract all with None and *.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
(await component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (await component.async_extract_from_service(call)))))
assert ('Not passing an entity ID to a service to target all entities is deprecated' in caplog.text) | 6,499,691,931,174,889,000 | Test extract all with None and *. | tests/helpers/test_entity_component.py | test_extract_all_omit_entity_id | BobbyBleacher/home-assistant | python | async def test_extract_all_omit_entity_id(hass, caplog):
component = EntityComponent(_LOGGER, DOMAIN, hass)
(await component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service')
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (await component.async_extract_from_service(call)))))
assert ('Not passing an entity ID to a service to target all entities is deprecated' in caplog.text) |
async def test_extract_all_use_match_all(hass, caplog):
'Test extract all with None and *.'
component = EntityComponent(_LOGGER, DOMAIN, hass)
(await component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (await component.async_extract_from_service(call)))))
assert ('Not passing an entity ID to a service to target all entities is deprecated' not in caplog.text) | -157,309,294,548,301,440 | Test extract all with None and *. | tests/helpers/test_entity_component.py | test_extract_all_use_match_all | BobbyBleacher/home-assistant | python | async def test_extract_all_use_match_all(hass, caplog):
component = EntityComponent(_LOGGER, DOMAIN, hass)
(await component.async_add_entities([MockEntity(name='test_1'), MockEntity(name='test_2')]))
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert (['test_domain.test_1', 'test_domain.test_2'] == sorted((ent.entity_id for ent in (await component.async_extract_from_service(call)))))
assert ('Not passing an entity ID to a service to target all entities is deprecated' not in caplog.text) |
def setUp(self):
'Initialize a test Home Assistant instance.'
self.hass = get_test_home_assistant() | 9,083,279,011,530,655,000 | Initialize a test Home Assistant instance. | tests/helpers/test_entity_component.py | setUp | BobbyBleacher/home-assistant | python | def setUp(self):
self.hass = get_test_home_assistant() |
def tearDown(self):
'Clean up the test Home Assistant instance.'
self.hass.stop() | -5,348,143,838,058,471,000 | Clean up the test Home Assistant instance. | tests/helpers/test_entity_component.py | tearDown | BobbyBleacher/home-assistant | python | def tearDown(self):
self.hass.stop() |
def test_setting_up_group(self):
'Set up the setting of a group.'
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass, group_name='everyone')
assert (len(self.hass.states.entity_ids()) == 0)
component.add_entities([MockEntity()])
self.hass.block_till_done()
assert (len(self.hass.states.entity_ids()) == 2)
assert (self.hass.states.entity_ids('group') == ['group.everyone'])
group = self.hass.states.get('group.everyone')
assert (group.attributes.get('entity_id') == ('test_domain.unnamed_device',))
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert (len(self.hass.states.entity_ids()) == 3)
group = self.hass.states.get('group.everyone')
assert (group.attributes.get('entity_id') == ('test_domain.goodbye', 'test_domain.unnamed_device')) | -7,333,364,860,958,330,000 | Set up the setting of a group. | tests/helpers/test_entity_component.py | test_setting_up_group | BobbyBleacher/home-assistant | python | def test_setting_up_group(self):
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass, group_name='everyone')
assert (len(self.hass.states.entity_ids()) == 0)
component.add_entities([MockEntity()])
self.hass.block_till_done()
assert (len(self.hass.states.entity_ids()) == 2)
assert (self.hass.states.entity_ids('group') == ['group.everyone'])
group = self.hass.states.get('group.everyone')
assert (group.attributes.get('entity_id') == ('test_domain.unnamed_device',))
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert (len(self.hass.states.entity_ids()) == 3)
group = self.hass.states.get('group.everyone')
assert (group.attributes.get('entity_id') == ('test_domain.goodbye', 'test_domain.unnamed_device')) |
def test_setup_loads_platforms(self):
'Test the loading of the platforms.'
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass, MockModule('test_component', setup=component_setup))
mock_integration(self.hass, MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2', MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert (not component_setup.called)
assert (not platform_setup.called)
component.setup({DOMAIN: {'platform': 'mod2'}})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called | 1,190,858,218,687,881,700 | Test the loading of the platforms. | tests/helpers/test_entity_component.py | test_setup_loads_platforms | BobbyBleacher/home-assistant | python | def test_setup_loads_platforms(self):
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass, MockModule('test_component', setup=component_setup))
mock_integration(self.hass, MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2', MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert (not component_setup.called)
assert (not platform_setup.called)
component.setup({DOMAIN: {'platform': 'mod2'}})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called |
def test_setup_recovers_when_setup_raises(self):
'Test the setup if exceptions are happening.'
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1', MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2', MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert (not platform1_setup.called)
assert (not platform2_setup.called)
component.setup(OrderedDict([(DOMAIN, {'platform': 'mod1'}), ('{} 2'.format(DOMAIN), {'platform': 'non_exist'}), ('{} 3'.format(DOMAIN), {'platform': 'mod2'})]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called | 8,994,935,806,947,191,000 | Test the setup if exceptions are happening. | tests/helpers/test_entity_component.py | test_setup_recovers_when_setup_raises | BobbyBleacher/home-assistant | python | def test_setup_recovers_when_setup_raises(self):
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1', MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2', MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert (not platform1_setup.called)
assert (not platform2_setup.called)
component.setup(OrderedDict([(DOMAIN, {'platform': 'mod1'}), ('{} 2'.format(DOMAIN), {'platform': 'non_exist'}), ('{} 3'.format(DOMAIN), {'platform': 'mod2'})]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called |
@patch('homeassistant.helpers.entity_component.EntityComponent._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component', return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
'Test setup for discovery.'
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test', {'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert (('platform_test', {}, {'msg': 'discovery_info'}) == mock_setup.call_args[0]) | -5,922,677,092,688,795,000 | Test setup for discovery. | tests/helpers/test_entity_component.py | test_setup_does_discovery | BobbyBleacher/home-assistant | python | @patch('homeassistant.helpers.entity_component.EntityComponent._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component', return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test', {'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert (('platform_test', {}, {'msg': 'discovery_info'}) == mock_setup.call_args[0]) |
@patch('homeassistant.helpers.entity_platform.async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
'Test the setting of the scan interval via configuration.'
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform', MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({DOMAIN: {'platform': 'platform', 'scan_interval': timedelta(seconds=30)}})
self.hass.block_till_done()
assert mock_track.called
assert (timedelta(seconds=30) == mock_track.call_args[0][2]) | 4,659,996,049,484,274,000 | Test the setting of the scan interval via configuration. | tests/helpers/test_entity_component.py | test_set_scan_interval_via_config | BobbyBleacher/home-assistant | python | @patch('homeassistant.helpers.entity_platform.async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform', MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({DOMAIN: {'platform': 'platform', 'scan_interval': timedelta(seconds=30)}})
self.hass.block_till_done()
assert mock_track.called
assert (timedelta(seconds=30) == mock_track.call_args[0][2]) |
def test_set_entity_namespace_via_config(self):
'Test setting an entity namespace.'
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(name='beer'), MockEntity(name=None)])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({DOMAIN: {'platform': 'platform', 'entity_namespace': 'yummy'}})
self.hass.block_till_done()
assert (sorted(self.hass.states.entity_ids()) == ['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']) | -1,436,440,670,925,141,000 | Test setting an entity namespace. | tests/helpers/test_entity_component.py | test_set_entity_namespace_via_config | BobbyBleacher/home-assistant | python | def test_set_entity_namespace_via_config(self):
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(name='beer'), MockEntity(name=None)])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({DOMAIN: {'platform': 'platform', 'entity_namespace': 'yummy'}})
self.hass.block_till_done()
assert (sorted(self.hass.states.entity_ids()) == ['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']) |
def async_loop_exception_handler(_, _2) -> None:
'Handle all exception inside the core loop.'
nonlocal exception
exception = True | -6,541,915,942,138,100,000 | Handle all exception inside the core loop. | tests/helpers/test_entity_component.py | async_loop_exception_handler | BobbyBleacher/home-assistant | python | def async_loop_exception_handler(_, _2) -> None:
nonlocal exception
exception = True |
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(should_poll=True)]) | -8,607,022,983,448,196,000 | Test the platform setup. | tests/helpers/test_entity_component.py | platform_setup | BobbyBleacher/home-assistant | python | def platform_setup(hass, config, add_entities, discovery_info=None):
add_entities([MockEntity(should_poll=True)]) |
def platform_setup(hass, config, add_entities, discovery_info=None):
'Test the platform setup.'
add_entities([MockEntity(name='beer'), MockEntity(name=None)]) | 7,361,666,536,476,458,000 | Test the platform setup. | tests/helpers/test_entity_component.py | platform_setup | BobbyBleacher/home-assistant | python | def platform_setup(hass, config, add_entities, discovery_info=None):
add_entities([MockEntity(name='beer'), MockEntity(name=None)]) |
def quadprog(Q, q, G, h, A, b):
'\n Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html\n Output: Numpy array of the solution\n '
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x']) | 7,936,910,832,750,283,000 | Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution | fedlab_benchmarks/fedmgda+/standalone.py | quadprog | KarhouTam/FedLab-benchmarks | python | def quadprog(Q, q, G, h, A, b):
'\n Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html\n Output: Numpy array of the solution\n '
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x']) |
def handle_mark(self, time, mark):
' Handle a single trace item (scoped entry and exit).\n Translates:\n - Automatically generated HIDL traces into NNTRACE layers and phases\n - SPEC:Switch phase during function into dummy items\n - SPEC:Subtracting time when nesting is violated into "subtract"\n markers\n - CPU/Driver layer distinction based on whether the process is the\n driver or an application\n This function is called multiple times for a single application run,\n afterwards the statistics can be calculated.\n '
if (mark[0] == 'B'):
switch = False
subtract = False
if (('ANeuralNetworksEvent_free' in mark) or ('ANeuralNetworksExecution_free' in mark)):
mark = mark.replace('_PT', '_PE')
if ('getSupportedExtensions' in mark):
mark = mark.replace('_PC', '_PI')
elif ('[SW][NN_LA_PR]executeWithCompilation' in mark):
mark = mark.replace('[SW]', '')
if (MARKER_SWITCH in mark):
switch = True
if (MARKER_SUBTRACT in mark):
subtract = True
if switch:
self.handle_mark(time, 'E')
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if (m is None):
tag = translate_hidl_mark_to_nn_and_tag(mark)
if (tag is None):
raise Exception(("Couldn't parse mark " + mark))
else:
tag = m.group(1)
[_, layer, phase] = tag.split('_')
if ((layer == LAYER_APPLICATION) and (phase in [PHASE_WARMUP, PHASE_BENCHMARK])):
self.app_phase.push(phase)
if (not self.is_driver):
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if ((layer == LAYER_APPLICATION) and (phase == PHASE_EXECUTION)):
self.la_pe_counts[self.app_phase.current()] = (self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif (mark[0] == 'E'):
try:
node = self.mytree.pop(time)
if node.is_dummy():
pass
else:
if ((node.layer == LAYER_APPLICATION) and (node.phase in [PHASE_WARMUP, PHASE_BENCHMARK])):
self.app_phase.pop()
function = ((node.app_phase + '::') + get_function_name_from_mark(node.mark))
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) + [[(float(node.start_time_s) * 1000.0), (float(node.end_time_s) * 1000.0)]])
except IndexError as e:
raise Exception('Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n') from e | -6,323,244,300,068,887,000 | Handle a single trace item (scoped entry and exit).
Translates:
- Automatically generated HIDL traces into NNTRACE layers and phases
- SPEC:Switch phase during function into dummy items
- SPEC:Subtracting time when nesting is violated into "subtract"
markers
- CPU/Driver layer distinction based on whether the process is the
driver or an application
This function is called multiple times for a single application run,
afterwards the statistics can be calculated. | tools/systrace_parser/parser/tracker.py | handle_mark | PotatoProject-next/ackages_modules_NeuralNetworks | python | def handle_mark(self, time, mark):
' Handle a single trace item (scoped entry and exit).\n Translates:\n - Automatically generated HIDL traces into NNTRACE layers and phases\n - SPEC:Switch phase during function into dummy items\n - SPEC:Subtracting time when nesting is violated into "subtract"\n markers\n - CPU/Driver layer distinction based on whether the process is the\n driver or an application\n This function is called multiple times for a single application run,\n afterwards the statistics can be calculated.\n '
if (mark[0] == 'B'):
switch = False
subtract = False
if (('ANeuralNetworksEvent_free' in mark) or ('ANeuralNetworksExecution_free' in mark)):
mark = mark.replace('_PT', '_PE')
if ('getSupportedExtensions' in mark):
mark = mark.replace('_PC', '_PI')
elif ('[SW][NN_LA_PR]executeWithCompilation' in mark):
mark = mark.replace('[SW]', )
if (MARKER_SWITCH in mark):
switch = True
if (MARKER_SUBTRACT in mark):
subtract = True
if switch:
self.handle_mark(time, 'E')
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if (m is None):
tag = translate_hidl_mark_to_nn_and_tag(mark)
if (tag is None):
raise Exception(("Couldn't parse mark " + mark))
else:
tag = m.group(1)
[_, layer, phase] = tag.split('_')
if ((layer == LAYER_APPLICATION) and (phase in [PHASE_WARMUP, PHASE_BENCHMARK])):
self.app_phase.push(phase)
if (not self.is_driver):
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if ((layer == LAYER_APPLICATION) and (phase == PHASE_EXECUTION)):
self.la_pe_counts[self.app_phase.current()] = (self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif (mark[0] == 'E'):
try:
node = self.mytree.pop(time)
if node.is_dummy():
pass
else:
if ((node.layer == LAYER_APPLICATION) and (node.phase in [PHASE_WARMUP, PHASE_BENCHMARK])):
self.app_phase.pop()
function = ((node.app_phase + '::') + get_function_name_from_mark(node.mark))
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) + [[(float(node.start_time_s) * 1000.0), (float(node.end_time_s) * 1000.0)]])
except IndexError as e:
raise Exception('Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n') from e |
def is_complete(self):
" Checks if we've seen all end tracepoints for the begin tracepoints.\n "
return self.mytree.current.is_root() | -2,324,503,509,758,186,000 | Checks if we've seen all end tracepoints for the begin tracepoints. | tools/systrace_parser/parser/tracker.py | is_complete | PotatoProject-next/ackages_modules_NeuralNetworks | python | def is_complete(self):
" \n "
return self.mytree.current.is_root() |
def test_water_at_freezing(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 0C\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = (- 11.529101)
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500500.0
ref_phi = (- 0.04)
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9154.0
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual(ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) | 7,141,919,799,247,304,000 | Reproduce verification results from IAPWS-IF97 for water at 0C
http://www.iapws.org/relguide/supsat.pdf | armi/materials/tests/test_water.py | test_water_at_freezing | youngmit/armi | python | def test_water_at_freezing(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 0C\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = (- 11.529101)
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500500.0
ref_phi = (- 0.04)
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9154.0
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual(ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) |
def test_water_at_boiling(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 100C\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 101325.0
ref_dp_dT = 3616.0
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417650.0
ref_saturated_water_enthalpy = 417050.0
ref_saturated_steam_enthalpy = 2675700.0
ref_phi = 1303.0
ref_saturated_water_entropy = 1307.0
ref_saturated_steam_entropy = 7355.0
self.assertAlmostEqual((ref_vapor_pressure / water.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_vapor_pressure / steam.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / water.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / steam.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual((ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_entropy / water.entropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) | -7,031,717,173,156,379,000 | Reproduce verification results from IAPWS-IF97 for water at 100C
http://www.iapws.org/relguide/supsat.pdf | armi/materials/tests/test_water.py | test_water_at_boiling | youngmit/armi | python | def test_water_at_boiling(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 100C\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 101325.0
ref_dp_dT = 3616.0
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417650.0
ref_saturated_water_enthalpy = 417050.0
ref_saturated_steam_enthalpy = 2675700.0
ref_phi = 1303.0
ref_saturated_water_entropy = 1307.0
ref_saturated_steam_entropy = 7355.0
self.assertAlmostEqual((ref_vapor_pressure / water.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_vapor_pressure / steam.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / water.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / steam.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual((ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_entropy / water.entropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) |
def test_water_at_critcalPoint(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 647.096K\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22064000.0
ref_dp_dT = 268000.0
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548000.0
ref_saturated_water_enthalpy = 2086600.0
ref_saturated_steam_enthalpy = 2086600.0
ref_phi = 3578.0
ref_saturated_water_entropy = 4410.0
ref_saturated_steam_entropy = 4410.0
self.assertAlmostEqual((ref_vapor_pressure / water.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_vapor_pressure / steam.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / water.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / steam.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual((ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_entropy / water.entropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) | 1,907,172,182,332,172,500 | Reproduce verification results from IAPWS-IF97 for water at 647.096K
http://www.iapws.org/relguide/supsat.pdf | armi/materials/tests/test_water.py | test_water_at_critcalPoint | youngmit/armi | python | def test_water_at_critcalPoint(self):
'\n Reproduce verification results from IAPWS-IF97 for water at 647.096K\n http://www.iapws.org/relguide/supsat.pdf\n '
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22064000.0
ref_dp_dT = 268000.0
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548000.0
ref_saturated_water_enthalpy = 2086600.0
ref_saturated_steam_enthalpy = 2086600.0
ref_phi = 3578.0
ref_saturated_water_entropy = 4410.0
ref_saturated_steam_entropy = 4410.0
self.assertAlmostEqual((ref_vapor_pressure / water.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_vapor_pressure / steam.vaporPressure(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / water.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_dp_dT / steam.vaporPressurePrime(Tk=Tk)), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual((ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk)), 1, 2)
self.assertAlmostEqual((ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_water_entropy / water.entropy(Tk=Tk)), 1, 3)
self.assertAlmostEqual((ref_saturated_steam_entropy / steam.entropy(Tk=Tk)), 1, 3) |
def count_evens(start, end):
'Returns the number of even numbers between start and end.'
counter = start
num_evens = 0
while (counter <= end):
if ((counter % 2) == 0):
num_evens += 1
counter += 1
return num_evens | 4,659,412,109,170,044,000 | Returns the number of even numbers between start and end. | exercise_brokencounts_solution.py | count_evens | annezola/gdi-python | python | def count_evens(start, end):
counter = start
num_evens = 0
while (counter <= end):
if ((counter % 2) == 0):
num_evens += 1
counter += 1
return num_evens |
def count_multiples(start, end, divisor):
'Returns the number of multiples of divisor between start and end.'
counter = start
num_multiples = 0
while (counter <= end):
if ((counter % divisor) == 0):
num_multiples += 1
counter += 1
return num_multiples | 3,766,785,650,277,561,300 | Returns the number of multiples of divisor between start and end. | exercise_brokencounts_solution.py | count_multiples | annezola/gdi-python | python | def count_multiples(start, end, divisor):
counter = start
num_multiples = 0
while (counter <= end):
if ((counter % divisor) == 0):
num_multiples += 1
counter += 1
return num_multiples |
def calculate(pxarray: np.ndarray):
'Calculates one or more values from plot-level RGB data\n Arguments:\n pxarray: Array of RGB data for a single plot\n Return:\n Returns one or more calculated values\n '
channel_size = pxarray[:, :, 1].size
return channel_size | -8,294,379,030,707,513,000 | Calculates one or more values from plot-level RGB data
Arguments:
pxarray: Array of RGB data for a single plot
Return:
Returns one or more calculated values | .github/workflows/algorithm_rgb.py | calculate | AgPipeline/plot-base-rgb | python | def calculate(pxarray: np.ndarray):
'Calculates one or more values from plot-level RGB data\n Arguments:\n pxarray: Array of RGB data for a single plot\n Return:\n Returns one or more calculated values\n '
channel_size = pxarray[:, :, 1].size
return channel_size |
def _eval(self, segment, **kwargs):
'Join/From clauses should not contain subqueries. Use CTEs instead.\n\n NB: No fix for this routine because it would be very complex to\n implement reliably.\n '
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
table_expression = segment.get_child('table_expression')
if (not table_expression):
return None
table_expression = table_expression.get_child('main_table_expression')
if (not table_expression):
return None
problem_children = ['with_compound_statement', 'set_expression', 'select_statement']
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(anchor=seg, description=f'{parent_type} clauses should not contain subqueries. Use CTEs instead') | -681,525,606,612,198,700 | Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably. | src/sqlfluff/core/rules/std/L042.py | _eval | Jophish/sqlfluff | python | def _eval(self, segment, **kwargs):
'Join/From clauses should not contain subqueries. Use CTEs instead.\n\n NB: No fix for this routine because it would be very complex to\n implement reliably.\n '
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
table_expression = segment.get_child('table_expression')
if (not table_expression):
return None
table_expression = table_expression.get_child('main_table_expression')
if (not table_expression):
return None
problem_children = ['with_compound_statement', 'set_expression', 'select_statement']
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(anchor=seg, description=f'{parent_type} clauses should not contain subqueries. Use CTEs instead') |
def cog_unload(self):
' Cog unload handler. This removes any event hooks that were registered. '
self.bot.lavalink._event_hooks.clear() | 5,768,431,661,943,328,000 | Cog unload handler. This removes any event hooks that were registered. | cogs/music.py | cog_unload | 1Prototype1/HexBot | python | def cog_unload(self):
' '
self.bot.lavalink._event_hooks.clear() |
async def cog_before_invoke(self, ctx):
' Command before-invoke handler. '
guild_check = (ctx.guild is not None)
if guild_check:
(await self.ensure_voice(ctx))
return guild_check | -198,743,436,622,049,540 | Command before-invoke handler. | cogs/music.py | cog_before_invoke | 1Prototype1/HexBot | python | async def cog_before_invoke(self, ctx):
' '
guild_check = (ctx.guild is not None)
if guild_check:
(await self.ensure_voice(ctx))
return guild_check |
async def ensure_voice(self, ctx):
' This check ensures that the bot and command author are in the same voicechannel. '
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = (ctx.command.name in ('play',))
if ((not ctx.author.voice) or (not ctx.author.voice.channel)):
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if (not player.is_connected):
if (not should_connect):
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if ((not permissions.connect) or (not permissions.speak)):
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
(await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id)))
elif (int(player.channel_id) != ctx.author.voice.channel.id):
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:') | 7,336,377,337,078,352,000 | This check ensures that the bot and command author are in the same voicechannel. | cogs/music.py | ensure_voice | 1Prototype1/HexBot | python | async def ensure_voice(self, ctx):
' '
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = (ctx.command.name in ('play',))
if ((not ctx.author.voice) or (not ctx.author.voice.channel)):
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if (not player.is_connected):
if (not should_connect):
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if ((not permissions.connect) or (not permissions.speak)):
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
(await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id)))
elif (int(player.channel_id) != ctx.author.voice.channel.id):
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:') |
async def connect_to(self, guild_id: int, channel_id: str):
' Connects to the given voicechannel ID. A channel_id of `None` means disconnect. '
ws = self.bot._connection._get_websocket(guild_id)
(await ws.voice_state(str(guild_id), channel_id)) | 8,130,363,841,742,988,000 | Connects to the given voicechannel ID. A channel_id of `None` means disconnect. | cogs/music.py | connect_to | 1Prototype1/HexBot | python | async def connect_to(self, guild_id: int, channel_id: str):
' '
ws = self.bot._connection._get_websocket(guild_id)
(await ws.voice_state(str(guild_id), channel_id)) |
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=''):
'Get lyrics of current song'
if (not query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if (not player.is_playing):
return (await ctx.send("I'm not currently playing anything :warning:"))
query = player.current.title
try:
async with ctx.typing():
results = (await self.kclient.music.lyrics(query, limit=1))
except ksoftapi.NoResults:
(await ctx.send(f'No lyrics found for `{query}`'))
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(13434624), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name='Lyrics:')
lyrics = lyrics[2048:]
embeds = [embed]
while ((len(lyrics) > 0) and (len(embeds) < 10)):
embed = discord.Embed(color=discord.Color(13434624), description=lyrics[:2048])
lyrics = lyrics[(len(embeds) * 2048):]
embeds.append(embed)
embeds[(- 1)].set_footer(text='Source: KSoft.Si')
for embed in embeds:
(await ctx.send(embed=embed)) | -1,764,598,707,970,382,600 | Get lyrics of current song | cogs/music.py | get_lyrics | 1Prototype1/HexBot | python | @commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=):
if (not query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if (not player.is_playing):
return (await ctx.send("I'm not currently playing anything :warning:"))
query = player.current.title
try:
async with ctx.typing():
results = (await self.kclient.music.lyrics(query, limit=1))
except ksoftapi.NoResults:
(await ctx.send(f'No lyrics found for `{query}`'))
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(13434624), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name='Lyrics:')
lyrics = lyrics[2048:]
embeds = [embed]
while ((len(lyrics) > 0) and (len(embeds) < 10)):
embed = discord.Embed(color=discord.Color(13434624), description=lyrics[:2048])
lyrics = lyrics[(len(embeds) * 2048):]
embeds.append(embed)
embeds[(- 1)].set_footer(text='Source: KSoft.Si')
for embed in embeds:
(await ctx.send(embed=embed)) |
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
'Equalizer'
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if (len(args) == 0):
(await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:'))
elif (len(args) == 1):
presets = {'reset': 'Default', 'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, (- 0.04), (- 0.06), (- 0.08), (- 0.1), (- 0.12), (- 0.14)], 'jazz': [(- 0.13), (- 0.11), (- 0.1), (- 0.1), 0.14, 0.2, (- 0.18), 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0], 'pop': [(- 0.02), (- 0.01), 0.08, 0.1, 0.15, 0.1, 0.03, (- 0.02), (- 0.035), (- 0.05), (- 0.05), (- 0.05), (- 0.05), (- 0.05), (- 0.05)], 'treble': [(- 0.1), (- 0.12), (- 0.12), (- 0.12), (- 0.08), (- 0.04), 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]}
preset = args[0].lower()
if (preset in ['reset', 'default']):
(await player.reset_equalizer())
elif (preset in presets):
gain_list = enumerate(presets[preset])
(await player.set_gains(*gain_list))
elif (preset == '--list'):
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(16740095), description='\n'.join(presets.keys()))
return (await ctx.send(embed=em))
else:
return (await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets'))
elif (len(args) == 2):
try:
band = int(args[0])
gain = float(args[1])
(await player.set_gain(band, gain))
except ValueError:
return (await ctx.send('Specify valid `band gain` values :control_knobs:'))
else:
return (await ctx.send('Specify `band gain` or `preset` :control_knobs:'))
eq_frequencies = [f'`{gain}`' for gain in player.equalizer]
(await ctx.send((':level_slider: Current Values:\n' + ' '.join(eq_frequencies)))) | 2,920,680,446,155,721,000 | Equalizer | cogs/music.py | equalizer | 1Prototype1/HexBot | python | @commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if (len(args) == 0):
(await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:'))
elif (len(args) == 1):
presets = {'reset': 'Default', 'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, (- 0.04), (- 0.06), (- 0.08), (- 0.1), (- 0.12), (- 0.14)], 'jazz': [(- 0.13), (- 0.11), (- 0.1), (- 0.1), 0.14, 0.2, (- 0.18), 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0], 'pop': [(- 0.02), (- 0.01), 0.08, 0.1, 0.15, 0.1, 0.03, (- 0.02), (- 0.035), (- 0.05), (- 0.05), (- 0.05), (- 0.05), (- 0.05), (- 0.05)], 'treble': [(- 0.1), (- 0.12), (- 0.12), (- 0.12), (- 0.08), (- 0.04), 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]}
preset = args[0].lower()
if (preset in ['reset', 'default']):
(await player.reset_equalizer())
elif (preset in presets):
gain_list = enumerate(presets[preset])
(await player.set_gains(*gain_list))
elif (preset == '--list'):
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(16740095), description='\n'.join(presets.keys()))
return (await ctx.send(embed=em))
else:
return (await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets'))
elif (len(args) == 2):
try:
band = int(args[0])
gain = float(args[1])
(await player.set_gain(band, gain))
except ValueError:
return (await ctx.send('Specify valid `band gain` values :control_knobs:'))
else:
return (await ctx.send('Specify `band gain` or `preset` :control_knobs:'))
eq_frequencies = [f'`{gain}`' for gain in player.equalizer]
(await ctx.send((':level_slider: Current Values:\n' + ' '.join(eq_frequencies)))) |
def __init__(self):
'\n initialize your data structure here.\n '
self.stack = []
self.min = math.inf | -9,064,414,778,785,991,000 | initialize your data structure here. | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py | __init__ | bgoonz/INTERVIEW-PREP-COMPLETE | python | def __init__(self):
'\n \n '
self.stack = []
self.min = math.inf |
def get_full_mapping(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg=False, lowercase=True):
' Get full mapping give align.\n\n Args:\n src_filename:\n trg_filename:\n align_filename:\n mapping_filename:\n reverse_src2trg:\n lowercase:\n\n Returns:\n\n '
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg))
src2trg_mapping = defaultdict((lambda : defaultdict(int)))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(align_filename) as fa:
for (ls, lt, la) in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
(src_pos, trg_pos) = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
for (pos, c) in src_pos_counter.items():
if (c == 1):
valid_src_pos.add(pos)
for (pos, c) in trg_pos_counter.items():
if (c == 1):
valid_trg_pos.add(pos)
for align in la_aligns:
(src_pos, trg_pos) = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if (_is_token_alnum(ls_words[src_pos]) and _is_token_alnum(lt_words[trg_pos]) and (src_pos in valid_src_pos) and (trg_pos in valid_trg_pos)):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][lt_words[trg_pos]] += 1
if ((processed_line % 1000000) == 0):
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping | 691,652,501,439,763,500 | Get full mapping give align.
Args:
src_filename:
trg_filename:
align_filename:
mapping_filename:
reverse_src2trg:
lowercase:
Returns: | examples/wmt/tools/align/extract_bilingual_vocabulary.py | get_full_mapping | JiangtaoFeng/ParaGen | python | def get_full_mapping(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg=False, lowercase=True):
' Get full mapping give align.\n\n Args:\n src_filename:\n trg_filename:\n align_filename:\n mapping_filename:\n reverse_src2trg:\n lowercase:\n\n Returns:\n\n '
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg))
src2trg_mapping = defaultdict((lambda : defaultdict(int)))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(align_filename) as fa:
for (ls, lt, la) in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
(src_pos, trg_pos) = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
for (pos, c) in src_pos_counter.items():
if (c == 1):
valid_src_pos.add(pos)
for (pos, c) in trg_pos_counter.items():
if (c == 1):
valid_trg_pos.add(pos)
for align in la_aligns:
(src_pos, trg_pos) = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if (_is_token_alnum(ls_words[src_pos]) and _is_token_alnum(lt_words[trg_pos]) and (src_pos in valid_src_pos) and (trg_pos in valid_trg_pos)):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][lt_words[trg_pos]] += 1
if ((processed_line % 1000000) == 0):
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping |
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
" Clean dictionary based on frequency and gap of frequency.\n For example,\n {'s1': ['t1': 999, 't2': 199, 't3':1],\n 's2': ['m1': 2000, 'm2': 100]}\n =>\n {'s1': ['t1': 999, 't2': 199],\n 's2': ['m1': 2000]}\n\n Args:\n full_mapping:\n clean_dict_filename:\n threshold:\n ignore_gap:\n\n Returns:\n\n "
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(full_mapping.items(), key=(lambda x: sum(x[1].values())), reverse=True)
with open(clean_dict_filename, 'w') as fw:
for (idx, src2trg) in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=(lambda x: x[1]), reverse=True)
total_count = sum((c[1] for c in trg))
clean_trg = dict()
p = trg[0][1]
for (w, c) in trg:
if ((c / total_count) < threshold):
break
if (((p / c) > ignore_gap) and ((c / total_count) < (THRESHOLD * 5))):
break
p = c
clean_trg.update({w: round((c / total_count), 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False))) | -6,374,763,968,999,119,000 | Clean dictionary based on frequency and gap of frequency.
For example,
{'s1': ['t1': 999, 't2': 199, 't3':1],
's2': ['m1': 2000, 'm2': 100]}
=>
{'s1': ['t1': 999, 't2': 199],
's2': ['m1': 2000]}
Args:
full_mapping:
clean_dict_filename:
threshold:
ignore_gap:
Returns: | examples/wmt/tools/align/extract_bilingual_vocabulary.py | refine_dict | JiangtaoFeng/ParaGen | python | def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
" Clean dictionary based on frequency and gap of frequency.\n For example,\n {'s1': ['t1': 999, 't2': 199, 't3':1],\n 's2': ['m1': 2000, 'm2': 100]}\n =>\n {'s1': ['t1': 999, 't2': 199],\n 's2': ['m1': 2000]}\n\n Args:\n full_mapping:\n clean_dict_filename:\n threshold:\n ignore_gap:\n\n Returns:\n\n "
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(full_mapping.items(), key=(lambda x: sum(x[1].values())), reverse=True)
with open(clean_dict_filename, 'w') as fw:
for (idx, src2trg) in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=(lambda x: x[1]), reverse=True)
total_count = sum((c[1] for c in trg))
clean_trg = dict()
p = trg[0][1]
for (w, c) in trg:
if ((c / total_count) < threshold):
break
if (((p / c) > ignore_gap) and ((c / total_count) < (THRESHOLD * 5))):
break
p = c
clean_trg.update({w: round((c / total_count), 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False))) |
def test_TreeTest1(self):
'Test Tree module.'
f = data_stream('nexus/test_Nexus_input.nex')
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(['t1', 't5'])
self.assertEqual(t3.is_monophyletic(['t1', 't5']), 13)
t3.split(parent_id=t3.search_taxon('t9'))
f.close() | 8,496,966,509,704,470,000 | Test Tree module. | tests/test_nexus.py | test_TreeTest1 | WebLogo/weblogo | python | def test_TreeTest1(self):
f = data_stream('nexus/test_Nexus_input.nex')
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(['t1', 't5'])
self.assertEqual(t3.is_monophyletic(['t1', 't5']), 13)
t3.split(parent_id=t3.search_taxon('t9'))
f.close() |
def _vec(x):
'Stacks column of matrix to form a single column.'
return array_ops.reshape(array_ops.matrix_transpose(x), array_ops.concat([array_ops.shape(x)[:(- 2)], [(- 1)]], axis=0)) | -5,485,323,311,372,672,000 | Stacks column of matrix to form a single column. | tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | _vec | ADiegoCAlonso/tensorflow | python | def _vec(x):
return array_ops.reshape(array_ops.matrix_transpose(x), array_ops.concat([array_ops.shape(x)[:(- 2)], [(- 1)]], axis=0)) |
def _unvec_by(y, num_col):
'Unstack vector to form a matrix, with a specified amount of columns.'
return array_ops.matrix_transpose(array_ops.reshape(y, array_ops.concat([array_ops.shape(y)[:(- 1)], [num_col, (- 1)]], axis=0))) | 1,865,925,301,402,786,300 | Unstack vector to form a matrix, with a specified amount of columns. | tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | _unvec_by | ADiegoCAlonso/tensorflow | python | def _unvec_by(y, num_col):
return array_ops.matrix_transpose(array_ops.reshape(y, array_ops.concat([array_ops.shape(y)[:(- 1)], [num_col, (- 1)]], axis=0))) |
def _rotate_last_dim(x, rotate_right=False):
'Rotate the last dimension either left or right.'
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat([[(ndims - 1)], math_ops.range(0, (ndims - 1))], axis=0)
else:
transpose_perm = array_ops.concat([math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm) | 8,692,827,826,145,462,000 | Rotate the last dimension either left or right. | tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | _rotate_last_dim | ADiegoCAlonso/tensorflow | python | def _rotate_last_dim(x, rotate_right=False):
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat([[(ndims - 1)], math_ops.range(0, (ndims - 1))], axis=0)
else:
transpose_perm = array_ops.concat([math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm) |
def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None):
'Initialize a `LinearOperatorKronecker`.\n\n `LinearOperatorKronecker` is initialized with a list of operators\n `[op_1,...,op_J]`.\n\n Args:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape, representing the Kronecker\n factors.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix\\\n #Extension_for_non_symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_x_`.\n\n Raises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.\n '
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if (not operators):
raise ValueError(('Expected a list of >=1 operators. Found: %s' % operators))
self._operators = operators
dtype = operators[0].dtype
for operator in operators:
if (operator.dtype != dtype):
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(('Expected all operators to have the same dtype. Found %s' % ' '.join(name_type)))
if all((operator.is_non_singular for operator in operators)):
if (is_non_singular is False):
raise ValueError('The Kronecker product of non-singular operators is always non-singular.')
is_non_singular = True
if all((operator.is_self_adjoint for operator in operators)):
if (is_self_adjoint is False):
raise ValueError('The Kronecker product of self-adjoint operators is always self-adjoint.')
is_self_adjoint = True
if all((operator.is_positive_definite for operator in operators)):
if (is_positive_definite is False):
raise ValueError('The Kronecker product of positive-definite operators is always positive-definite.')
is_positive_definite = True
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if (name is None):
name = operators[0].name
for operator in operators[1:]:
name += ('_x_' + operator.name)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(dtype=dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) | -419,869,077,990,686,340 | Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty. | tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | __init__ | ADiegoCAlonso/tensorflow | python | def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None):
'Initialize a `LinearOperatorKronecker`.\n\n `LinearOperatorKronecker` is initialized with a list of operators\n `[op_1,...,op_J]`.\n\n Args:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape, representing the Kronecker\n factors.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix\\\n #Extension_for_non_symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_x_`.\n\n Raises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.\n '
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if (not operators):
raise ValueError(('Expected a list of >=1 operators. Found: %s' % operators))
self._operators = operators
dtype = operators[0].dtype
for operator in operators:
if (operator.dtype != dtype):
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(('Expected all operators to have the same dtype. Found %s' % ' '.join(name_type)))
if all((operator.is_non_singular for operator in operators)):
if (is_non_singular is False):
raise ValueError('The Kronecker product of non-singular operators is always non-singular.')
is_non_singular = True
if all((operator.is_self_adjoint for operator in operators)):
if (is_self_adjoint is False):
raise ValueError('The Kronecker product of self-adjoint operators is always self-adjoint.')
is_self_adjoint = True
if all((operator.is_positive_definite for operator in operators)):
if (is_positive_definite is False):
raise ValueError('The Kronecker product of positive-definite operators is always positive-definite.')
is_positive_definite = True
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if (name is None):
name = operators[0].name
for operator in operators[1:]:
name += ('_x_' + operator.name)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(dtype=dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) |
@app.route(('/api/' + version), methods=['GET'])
def test():
'\n GET method to test the API.\n '
message = {'response': [{'text': 'Hello world!'}]}
return jsonify(message) | 5,445,447,162,671,225,000 | GET method to test the API. | app.py | test | RodolfoFerro/iris-api | python | @app.route(('/api/' + version), methods=['GET'])
def test():
'\n \n '
message = {'response': [{'text': 'Hello world!'}]}
return jsonify(message) |
@app.route((('/api/' + version) + '/predict'), methods=['POST'])
def predict():
'\n POST method to predict with our classification model.\n '
req_data = request.get_json()
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
message = {'response': [{'input': {'sepal_length': sl, 'sepal_width': sw, 'petal_length': pl, 'petal_width': pw}}, {'prediction': int(prediction[0])}, {'species': species[str(prediction[0])]}]}
return jsonify(message) | -4,948,526,796,356,483,000 | POST method to predict with our classification model. | app.py | predict | RodolfoFerro/iris-api | python | @app.route((('/api/' + version) + '/predict'), methods=['POST'])
def predict():
'\n \n '
req_data = request.get_json()
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
message = {'response': [{'input': {'sepal_length': sl, 'sepal_width': sw, 'petal_length': pl, 'petal_width': pw}}, {'prediction': int(prediction[0])}, {'species': species[str(prediction[0])]}]}
return jsonify(message) |
def format_yaml(yaml, **kwargs):
'Formats a yaml template.\n\n Example usage:\n format_yaml(\'{"abc": ${x.y}}\', x={\'y\': 123})\n output should be \'{"abc": 123}\'\n '
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten((kwargs or {}), reducer='dot'))
except KeyError as e:
raise RuntimeError('Unknown placeholder: {}'.format(e.args[0])) from e | -7,073,362,382,966,232,000 | Formats a yaml template.
Example usage:
format_yaml('{"abc": ${x.y}}', x={'y': 123})
output should be '{"abc": 123}' | web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py | format_yaml | duanbing/fedlearner | python | def format_yaml(yaml, **kwargs):
'Formats a yaml template.\n\n Example usage:\n format_yaml(\'{"abc": ${x.y}}\', x={\'y\': 123})\n output should be \'{"abc": 123}\'\n '
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten((kwargs or {}), reducer='dot'))
except KeyError as e:
raise RuntimeError('Unknown placeholder: {}'.format(e.args[0])) from e |
def generate_yaml_template(base_yaml, slots_proto):
"\n Args:\n base_yaml: A string representation of one type job's base yaml.\n slots_proto: A proto map object representation of modification\n template's operable smallest units.\n Returns:\n string: A yaml_template\n "
slots = {}
for key in slots_proto:
if (slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT):
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots) | 7,733,384,208,342,072,000 | Args:
base_yaml: A string representation of one type job's base yaml.
slots_proto: A proto map object representation of modification
template's operable smallest units.
Returns:
string: A yaml_template | web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py | generate_yaml_template | duanbing/fedlearner | python | def generate_yaml_template(base_yaml, slots_proto):
"\n Args:\n base_yaml: A string representation of one type job's base yaml.\n slots_proto: A proto map object representation of modification\n template's operable smallest units.\n Returns:\n string: A yaml_template\n "
slots = {}
for key in slots_proto:
if (slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT):
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots) |
def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
' Helper to construct tensor from size, device and common params. '
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if (create_op == CreateOp.ONES):
return torch.ones(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (create_op == CreateOp.EMPTY):
return torch.empty(*size, dtype=dtype, layout=layout, device=local_device, requires_grad=requires_grad, memory_format=memory_format, pin_memory=pin_memory)
elif (tensor_init_params.create_op == CreateOp.ZEROS):
return torch.zeros(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (tensor_init_params.create_op == CreateOp.RAND):
return torch.rand(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (tensor_init_params.create_op == CreateOp.FULL):
return torch.full(size=size, fill_value=tensor_init_params.fill_value, layout=layout, dtype=dtype, requires_grad=requires_grad, device=local_device)
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}') | -4,788,187,083,809,758,000 | Helper to construct tensor from size, device and common params. | torch/distributed/_sharded_tensor/api.py | _create_tensor_from_params | dannis999/tensorflow | python | def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
' '
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if (create_op == CreateOp.ONES):
return torch.ones(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (create_op == CreateOp.EMPTY):
return torch.empty(*size, dtype=dtype, layout=layout, device=local_device, requires_grad=requires_grad, memory_format=memory_format, pin_memory=pin_memory)
elif (tensor_init_params.create_op == CreateOp.ZEROS):
return torch.zeros(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (tensor_init_params.create_op == CreateOp.RAND):
return torch.rand(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad)
elif (tensor_init_params.create_op == CreateOp.FULL):
return torch.full(size=size, fill_value=tensor_init_params.fill_value, layout=layout, dtype=dtype, requires_grad=requires_grad, device=local_device)
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}') |
def gather(self, dst: int=0, out: Optional[torch.Tensor]=None) -> None:
'\n Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the\n sharded tensor.\n\n The API needs to be called on all ranks in SPMD fashion. All ranks should have\n the same ``dst``. ``out`` should be a tensor of the same size as the overall\n size of the sharded tensor on ``dst`` and ``None`` on all other ranks.\n\n Args:\n dst(int): The rank where full tensor is constructed.\n Default: 0\n out (:class `torch.Tensor`, optional): The output full tensor.\n Must to be provided ONLY on ``dst`` rank.\n Default: ``None``\n '
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = ([None] * world_size)
dist.all_gather_object(obj=local_shards, object_list=gathered_shards, group=self._process_group)
if (rank == dst):
dims = len(full_size)
for shards in gathered_shards:
if (shards is None):
raise RuntimeError('Gathered shards cannot be None on dst rank {dst}')
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(dim, metadata.shard_offsets[dim], metadata.shard_sizes[dim])
out_narrow_view.copy_(tensor) | 6,785,489,561,761,985,000 | Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None`` | torch/distributed/_sharded_tensor/api.py | gather | dannis999/tensorflow | python | def gather(self, dst: int=0, out: Optional[torch.Tensor]=None) -> None:
'\n Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the\n sharded tensor.\n\n The API needs to be called on all ranks in SPMD fashion. All ranks should have\n the same ``dst``. ``out`` should be a tensor of the same size as the overall\n size of the sharded tensor on ``dst`` and ``None`` on all other ranks.\n\n Args:\n dst(int): The rank where full tensor is constructed.\n Default: 0\n out (:class `torch.Tensor`, optional): The output full tensor.\n Must to be provided ONLY on ``dst`` rank.\n Default: ``None``\n '
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = ([None] * world_size)
dist.all_gather_object(obj=local_shards, object_list=gathered_shards, group=self._process_group)
if (rank == dst):
dims = len(full_size)
for shards in gathered_shards:
if (shards is None):
raise RuntimeError('Gathered shards cannot be None on dst rank {dst}')
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(dim, metadata.shard_offsets[dim], metadata.shard_sizes[dim])
out_narrow_view.copy_(tensor) |
@classmethod
def _init_from_local_shards_and_global_metadata(cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False) -> 'ShardedTensor':
'\n Initialize a ShardedTensor with local shards and a global\n ShardedTensorMetadata built on each rank.\n\n Warning: This API is experimental and subject to change. It does\n not do cross rank validations, and fully rely on the user\n for the correctness of sharded_tensor_metadata on each rank\n '
process_group = (process_group if (process_group is not None) else distributed_c10d._get_default_group())
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if (len(shards_metadata) == 0):
raise ValueError('shards_metadata must not be empty!')
if (tensor_properties.layout != torch.strided):
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = ('tensor property' if is_property else 'local ShardMetadata')
if (expected != actual):
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with {tensor_property_or_metadata} on rank {rank}: {tensor_property_or_metadata} {prop_name}={expected}, local shard tensor {prop_name}={actual}.")
for shard_metadata in shards_metadata:
(rank, local_device) = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if (current_rank == rank):
local_shard_metadatas.append(shard_metadata)
if (len(local_shards) != len(local_shard_metadatas)):
raise RuntimeError(f'Number of local shards ({len(local_shards)}) does not match number of local shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) on rank ({current_rank}) ')
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
(rank, local_device) = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
assert (shard_meta in local_shard_metadatas), 'local shard metadata not in sharded_tensor_metadata!'
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, 'layout', current_rank, True)
if (not local_shard_tensor.is_contiguous()):
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), 'size', current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), 'pin_memory', current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, 'device', current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, 'dtype', current_rank, True)
_raise_if_mismatch(tensor_properties.requires_grad, local_shard_tensor.requires_grad, 'requires_grad', current_rank, True)
validate_non_overlapping_shards_metadata(shards_metadata)
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
sharded_tensor._local_shards = local_shards
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
sharded_tensor._post_init()
return sharded_tensor | 8,566,875,488,481,365,000 | Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank | torch/distributed/_sharded_tensor/api.py | _init_from_local_shards_and_global_metadata | dannis999/tensorflow | python | @classmethod
def _init_from_local_shards_and_global_metadata(cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False) -> 'ShardedTensor':
'\n Initialize a ShardedTensor with local shards and a global\n ShardedTensorMetadata built on each rank.\n\n Warning: This API is experimental and subject to change. It does\n not do cross rank validations, and fully rely on the user\n for the correctness of sharded_tensor_metadata on each rank\n '
process_group = (process_group if (process_group is not None) else distributed_c10d._get_default_group())
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if (len(shards_metadata) == 0):
raise ValueError('shards_metadata must not be empty!')
if (tensor_properties.layout != torch.strided):
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = ('tensor property' if is_property else 'local ShardMetadata')
if (expected != actual):
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with {tensor_property_or_metadata} on rank {rank}: {tensor_property_or_metadata} {prop_name}={expected}, local shard tensor {prop_name}={actual}.")
for shard_metadata in shards_metadata:
(rank, local_device) = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if (current_rank == rank):
local_shard_metadatas.append(shard_metadata)
if (len(local_shards) != len(local_shard_metadatas)):
raise RuntimeError(f'Number of local shards ({len(local_shards)}) does not match number of local shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) on rank ({current_rank}) ')
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
(rank, local_device) = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
assert (shard_meta in local_shard_metadatas), 'local shard metadata not in sharded_tensor_metadata!'
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, 'layout', current_rank, True)
if (not local_shard_tensor.is_contiguous()):
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), 'size', current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), 'pin_memory', current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, 'device', current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, 'dtype', current_rank, True)
_raise_if_mismatch(tensor_properties.requires_grad, local_shard_tensor.requires_grad, 'requires_grad', current_rank, True)
validate_non_overlapping_shards_metadata(shards_metadata)
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
sharded_tensor._local_shards = local_shards
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
sharded_tensor._post_init()
return sharded_tensor |
def sharding_spec(self) -> ShardingSpec:
'\n Returns the ShardingSpec for the tensor.\n '
return self._sharding_spec | -8,737,724,293,681,844,000 | Returns the ShardingSpec for the tensor. | torch/distributed/_sharded_tensor/api.py | sharding_spec | dannis999/tensorflow | python | def sharding_spec(self) -> ShardingSpec:
'\n \n '
return self._sharding_spec |
def metadata(self) -> ShardedTensorMetadata:
'\n Returns a :class:`ShardedTensorMetadata` object corresponding to the\n metadata for the entire tensor.\n '
return self._metadata | 8,535,982,073,666,668,000 | Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor. | torch/distributed/_sharded_tensor/api.py | metadata | dannis999/tensorflow | python | def metadata(self) -> ShardedTensorMetadata:
'\n Returns a :class:`ShardedTensorMetadata` object corresponding to the\n metadata for the entire tensor.\n '
return self._metadata |
def local_shards(self) -> List[Shard]:
"\n Returns a list of :class:`Shard' corresponding to the\n local shards for this rank. Returns an empty list if the current rank\n does not host any shards for this Tensor.\n "
return self._local_shards | -6,682,747,474,173,311,000 | Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor. | torch/distributed/_sharded_tensor/api.py | local_shards | dannis999/tensorflow | python | def local_shards(self) -> List[Shard]:
"\n Returns a list of :class:`Shard' corresponding to the\n local shards for this rank. Returns an empty list if the current rank\n does not host any shards for this Tensor.\n "
return self._local_shards |
def size(self, dim: int=None) -> Union[(torch.Size, int)]:
'\n Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.\n The dimension can be specified.\n\n Args:\n dim (int, optional): the dimension over which the size represents.\n If specified, it returns the size of the given dimension.\n If not, it returns a subclass of tuple.\n Default: ``None``\n\n Returns:\n A :Union:`[torch.Size, int]` represents the size of the tensor.\n '
size = self._metadata.size
if (dim is None):
return size
if ((dim < 0) or (dim >= len(size))):
raise ValueError(f'Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})')
return size[dim] | -6,670,305,957,667,188,000 | Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.
The dimension can be specified.
Args:
dim (int, optional): the dimension over which the size represents.
If specified, it returns the size of the given dimension.
If not, it returns a subclass of tuple.
Default: ``None``
Returns:
A :Union:`[torch.Size, int]` represents the size of the tensor. | torch/distributed/_sharded_tensor/api.py | size | dannis999/tensorflow | python | def size(self, dim: int=None) -> Union[(torch.Size, int)]:
'\n Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.\n The dimension can be specified.\n\n Args:\n dim (int, optional): the dimension over which the size represents.\n If specified, it returns the size of the given dimension.\n If not, it returns a subclass of tuple.\n Default: ``None``\n\n Returns:\n A :Union:`[torch.Size, int]` represents the size of the tensor.\n '
size = self._metadata.size
if (dim is None):
return size
if ((dim < 0) or (dim >= len(size))):
raise ValueError(f'Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})')
return size[dim] |
def is_pinned(self) -> bool:
'\n Returns True if the sharded tensor (each local shard) resides in pinned memory.\n '
return self._metadata.tensor_properties.pin_memory | -8,720,569,316,649,941,000 | Returns True if the sharded tensor (each local shard) resides in pinned memory. | torch/distributed/_sharded_tensor/api.py | is_pinned | dannis999/tensorflow | python | def is_pinned(self) -> bool:
'\n \n '
return self._metadata.tensor_properties.pin_memory |
def is_contiguous(self) -> bool:
'\n Returns True if the sharded tensor (each local shard) is contiguous in memory\n in the order specified by memory format.\n '
return (self._metadata.tensor_properties.memory_format == torch.contiguous_format) | -169,953,434,054,276,770 | Returns True if the sharded tensor (each local shard) is contiguous in memory
in the order specified by memory format. | torch/distributed/_sharded_tensor/api.py | is_contiguous | dannis999/tensorflow | python | def is_contiguous(self) -> bool:
'\n Returns True if the sharded tensor (each local shard) is contiguous in memory\n in the order specified by memory format.\n '
return (self._metadata.tensor_properties.memory_format == torch.contiguous_format) |
def remote_shards(self) -> Dict[(int, List[rpc.RRef[Shard]])]:
'\n Returns a Dict[int, RRef] with keys being the RPC rank and values\n being RRefs to shards on that rank. Need to initialize the\n RPC framework for this functionality.\n\n Raises an exception if ShardedTensor was created with ``init_rrefs=False``\n '
if (not self._init_rrefs):
raise RuntimeError('ShardedTensor created with init_rrefs=False, no RRefs to remote shards available')
return self._remote_shards | -8,189,682,645,657,949,000 | Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False`` | torch/distributed/_sharded_tensor/api.py | remote_shards | dannis999/tensorflow | python | def remote_shards(self) -> Dict[(int, List[rpc.RRef[Shard]])]:
'\n Returns a Dict[int, RRef] with keys being the RPC rank and values\n being RRefs to shards on that rank. Need to initialize the\n RPC framework for this functionality.\n\n Raises an exception if ShardedTensor was created with ``init_rrefs=False``\n '
if (not self._init_rrefs):
raise RuntimeError('ShardedTensor created with init_rrefs=False, no RRefs to remote shards available')
return self._remote_shards |
def add_stats(self, a, b):
'\n Add two stats dict that are returned by the process function.\n This is used for multiple files\n :param a: stats dict\n :param b: stats dict\n :return: stats dict\n '
stats = {}
stats['skipped_because_min_length'] = (a['skipped_because_min_length'] + b['skipped_because_min_length'])
stats['skipped_alpha_count'] = (a['skipped_alpha_count'] + b['skipped_alpha_count'])
stats['skipped_because_max_numeric'] = (a['skipped_because_max_numeric'] + b['skipped_because_max_numeric'])
stats['skipped_because_max_non_ascii'] = (a['skipped_because_max_non_ascii'] + b['skipped_because_max_non_ascii'])
stats['skipped_because_forbidden_chars'] = (a['skipped_because_forbidden_chars'] + b['skipped_because_forbidden_chars'])
stats['total_original_length'] = (a['total_original_length'] + b['total_original_length'])
stats['total_clean_length'] = (a['total_clean_length'] + b['total_clean_length'])
return stats | 5,214,798,530,183,328,000 | Add two stats dict that are returned by the process function.
This is used for multiple files
:param a: stats dict
:param b: stats dict
:return: stats dict | corpus/text_cleaner.py | add_stats | senisioi/Romanian-Transformers | python | def add_stats(self, a, b):
'\n Add two stats dict that are returned by the process function.\n This is used for multiple files\n :param a: stats dict\n :param b: stats dict\n :return: stats dict\n '
stats = {}
stats['skipped_because_min_length'] = (a['skipped_because_min_length'] + b['skipped_because_min_length'])
stats['skipped_alpha_count'] = (a['skipped_alpha_count'] + b['skipped_alpha_count'])
stats['skipped_because_max_numeric'] = (a['skipped_because_max_numeric'] + b['skipped_because_max_numeric'])
stats['skipped_because_max_non_ascii'] = (a['skipped_because_max_non_ascii'] + b['skipped_because_max_non_ascii'])
stats['skipped_because_forbidden_chars'] = (a['skipped_because_forbidden_chars'] + b['skipped_because_forbidden_chars'])
stats['total_original_length'] = (a['total_original_length'] + b['total_original_length'])
stats['total_clean_length'] = (a['total_clean_length'] + b['total_clean_length'])
return stats |
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
'Runs the atlas-based cardiac segmentation\n\n Args:\n img (sitk.Image):\n settings (dict, optional): Dictionary containing settings for algorithm.\n Defaults to default_settings.\n\n Returns:\n dict: Dictionary containing output of segmentation\n '
results = {}
results_prob = {}
return_as_cropped = settings['return_as_cropped']
"\n Initialisation - Read in atlases\n - image files\n - structure files\n\n Atlas structure:\n 'ID': 'Original': 'CT Image' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'RIR' : 'CT Image' : sitk.Image\n 'Transform' : transform parameter map\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'DIR' : 'CT Image' : sitk.Image\n 'Transform' : displacement field transform\n 'Weight Map' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n\n\n "
logger.info('')
atlas_path = settings['atlas_settings']['atlas_path']
atlas_id_list = settings['atlas_settings']['atlas_id_list']
atlas_structure_list = settings['atlas_settings']['atlas_structure_list']
atlas_image_format = settings['atlas_settings']['atlas_image_format']
atlas_label_format = settings['atlas_settings']['atlas_label_format']
crop_atlas_to_structures = settings['atlas_settings']['crop_atlas_to_structures']
crop_atlas_expansion_mm = settings['atlas_settings']['crop_atlas_expansion_mm']
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]['Original'] = {}
image = sitk.ReadImage(f'{atlas_path}/{atlas_image_format.format(atlas_id)}')
structures = {struct: sitk.ReadImage(f'{atlas_path}/{atlas_label_format.format(atlas_id, struct)}') for struct in atlas_structure_list}
if crop_atlas_to_structures:
logger.info(f'Automatically cropping atlas: {atlas_id}')
original_volume = np.product(image.GetSize())
(crop_box_size, crop_box_index) = label_to_roi(structures.values(), expansion_mm=crop_atlas_expansion_mm)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f' > Volume reduced by factor {(original_volume / final_volume):.2f}')
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(structures[struct], size=crop_box_size, index=crop_box_index)
atlas_set[atlas_id]['Original']['CT Image'] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]['Original'][struct] = structures[struct]
'\n Step 1 - Automatic cropping\n If we have a guide structure:\n - use structure to crop target image\n\n Otherwise:\n - using a quick registration to register each atlas\n - expansion of the bounding box to ensure entire volume of interest is enclosed\n - target image is cropped\n '
expansion_mm = settings['auto_crop_target_image_settings']['expansion_mm']
if guide_structure:
(crop_box_size, crop_box_index) = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {'reg_method': 'similarity', 'shrink_factors': [8], 'smooth_sigmas': [0], 'sampling_rate': 0.75, 'default_value': (- 1000), 'number_of_iterations': 25, 'final_interp': sitk.sitkLinear, 'metric': 'mean_squares', 'optimiser': 'gradient_descent_line_search'}
registered_crop_images = []
logger.info('Running initial Translation tranform to crop image volume')
for atlas_id in atlas_id_list[:min([8, len(atlas_id_list)])]:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['RIR'] = {}
atlas_image = atlas_set[atlas_id]['Original']['CT Image']
(reg_image, _) = linear_registration(img, atlas_image, **quick_reg_settings)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = ((sum(registered_crop_images) / len(registered_crop_images)) > (- 1000))
(crop_box_size, crop_box_index) = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info('Calculated crop box:')
logger.info(f' > {crop_box_index}')
logger.info(f' > {crop_box_size}')
logger.info(f' > Vol reduction = {(np.product(img.GetSize()) / np.product(crop_box_size)):.2f}')
'\n Step 2 - Rigid registration of target images\n - Individual atlas images are registered to the target\n - The transformation is used to propagate the labels onto the target\n '
linear_registration_settings = settings['linear_registration_settings']
logger.info(f"Running {linear_registration_settings['reg_method']} tranform to align atlas images")
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['RIR'] = {}
if guide_structure:
guide_structure_name = settings['atlas_settings']['guide_structure_name']
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(atlas_set[atlas_id]['Original'][guide_structure_name], expansion=2)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]['Original']['CT Image']
(_, initial_tfm) = linear_registration(target_reg_image, atlas_reg_image, **linear_registration_settings)
atlas_set[atlas_id]['RIR']['Transform'] = initial_tfm
if guide_structure:
atlas_set[atlas_id]['RIR']['Reg Mask'] = apply_transform(input_image=atlas_reg_image, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkLinear)
expanded_atlas_guide_structure = extend_mask(atlas_set[atlas_id]['Original'][guide_structure_name], direction=('ax', 'sup'), extension_mm=settings['atlas_settings']['superior_extension'], interior_mm_shape=(settings['atlas_settings']['superior_extension'] / 2))
atlas_set[atlas_id]['RIR'][(guide_structure_name + 'EXPANDED')] = apply_transform(input_image=expanded_atlas_guide_structure, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['RIR']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id]['Original']['CT Image'], reference_image=img_crop, transform=initial_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]['Original'][struct]
atlas_set[atlas_id]['RIR'][struct] = apply_transform(input_image=input_struct, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['Original'] = None
'\n Step 3 - Deformable image registration\n - Using Fast Symmetric Diffeomorphic Demons\n '
if guide_structure:
structure_guided_registration_settings = settings['structure_guided_registration_settings']
logger.info('Running structure-guided deformable registration on atlas labels')
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['DIR_STRUCT'] = {}
(deform_image, struct_guided_tfm, _) = fast_symmetric_forces_demons_registration(target_reg_structure, atlas_set[atlas_id]['RIR']['Reg Mask'], **structure_guided_registration_settings)
atlas_set[atlas_id]['DIR_STRUCT']['Reg Mask'] = deform_image
atlas_set[atlas_id]['DIR_STRUCT']['Transform'] = struct_guided_tfm
atlas_set[atlas_id]['DIR_STRUCT']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id]['RIR']['CT Image'], transform=struct_guided_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
atlas_set[atlas_id]['DIR_STRUCT'][(guide_structure_name + 'EXPANDED')] = apply_transform(input_image=atlas_set[atlas_id]['RIR'][(guide_structure_name + 'EXPANDED')], reference_image=img_crop, transform=struct_guided_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]['RIR'][struct]
atlas_set[atlas_id]['DIR_STRUCT'][struct] = apply_transform(input_image=input_struct, transform=struct_guided_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['RIR'] = None
deformable_registration_settings = settings['deformable_registration_settings']
logger.info('Running DIR to refine atlas image registration')
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['DIR'] = {}
if guide_structure:
label = 'DIR_STRUCT'
else:
label = 'RIR'
atlas_reg_image = atlas_set[atlas_id][label]['CT Image']
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]['DIR_STRUCT'][(guide_structure_name + 'EXPANDED')]
expanded_target_mask = extend_mask(guide_structure, direction=('ax', 'sup'), extension_mm=settings['atlas_settings']['superior_extension'], interior_mm_shape=(settings['atlas_settings']['superior_extension'] / 2))
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=(- 1000))
atlas_reg_image = sitk.Mask(atlas_reg_image, (atlas_reg_image > (- 400)), outsideValue=(- 1000))
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=(- 1000))
target_reg_image = sitk.Mask(target_reg_image, (atlas_reg_image > (- 400)), outsideValue=(- 1000))
(deform_image, dir_tfm, _) = fast_symmetric_forces_demons_registration(target_reg_image, atlas_reg_image, **deformable_registration_settings)
atlas_set[atlas_id]['DIR']['Transform'] = dir_tfm
atlas_set[atlas_id]['DIR']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id][label]['CT Image'], transform=dir_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]['DIR'][struct] = apply_transform(input_image=input_struct, transform=dir_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id][label] = None
'\n Step 4 - Iterative atlas removal\n - This is an automatic process that will attempt to remove inconsistent atlases from the entire set\n\n '
iar_settings = settings['iar_settings']
if iar_settings['reference_structure']:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]['DIR']['CT Image']
weight_map = compute_weight_map(img_crop, atlas_image, vote_type='global')
atlas_set[atlas_id]['DIR']['Weight Map'] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info('IAR: No reference structure, skipping iterative atlas removal.')
'\n Step 4 - Vessel Splining\n\n '
vessel_spline_settings = settings['vessel_spline_settings']
if (len(vessel_spline_settings['vessel_name_list']) > 0):
segmented_vessel_dict = vessel_spline_generation(img_crop, atlas_set, **vessel_spline_settings)
else:
logger.info('No vessel splining required, continue.')
'\n Step 5 - Label Fusion\n '
vote_type = settings['label_fusion_settings']['vote_type']
vote_params = settings['label_fusion_settings']['vote_params']
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]['DIR']['CT Image']
weight_map = compute_weight_map(img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params)
atlas_set[atlas_id]['DIR']['Weight Map'] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
'\n Step 6 - Paste the cropped structure into the original image space\n '
logger.info('Generating binary segmentations.')
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings['label_fusion_settings']['optimal_threshold'].keys()
vote_structures = [i for i in vote_structures if (i in atlas_structure_list)]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings['label_fusion_settings']['optimal_threshold'][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings['return_proba_as_contours']:
atlas_contours = [(atlas_set[atlas_id]['DIR'][structure_name] >= 2) for atlas_id in atlas_id_list]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
if ((not settings['return_atlas_guide_structure']) and (guide_structure is not None)):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings['return_proba_as_contours']:
atlas_contours = [(atlas_set[atlas_id]['DIR'][structure_name] >= 2) for atlas_id in atlas_id_list]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
paste_img_binary = sitk.Paste(template_img_binary, binary_struct, binary_struct.GetSize(), (0, 0, 0), crop_box_index)
results[structure_name] = paste_img_binary
paste_prob_img = sitk.Paste(template_img_prob, probability_img, probability_img.GetSize(), (0, 0, 0), crop_box_index)
results_prob[structure_name] = paste_prob_img
if ((not settings['return_atlas_guide_structure']) and (guide_structure is not None)):
new_guide_structure = sitk.Paste(template_img_binary, guide_structure, guide_structure.GetSize(), (0, 0, 0), crop_box_index)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings['vessel_name_list']:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [atlas_set[atlas_id]['DIR'][structure_name] for atlas_id in list(atlas_set.keys())]
else:
paste_img_binary = sitk.Paste(template_img_binary, binary_struct, binary_struct.GetSize(), (0, 0, 0), crop_box_index)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(template_img_binary, atlas_set[atlas_id]['DIR'][structure_name], atlas_set[atlas_id]['DIR'][structure_name].GetSize(), (0, 0, 0), crop_box_index)
vessel_list.append(paste_img_binary)
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
'\n Step 7 - Geometric definitions of cardiac valves and conduction system nodes\n '
geometric_segmentation_settings = settings['geometric_segmentation_settings']
if geometric_segmentation_settings['run_geometric_algorithms']:
logger.info('Computing geometric definitions for valves and conduction system.')
geom_atlas_names = geometric_segmentation_settings['atlas_structure_names']
geom_valve_defs = geometric_segmentation_settings['valve_definitions']
geom_conduction_defs = geometric_segmentation_settings['conduction_system_definitions']
mv_name = ('MITRALVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[mv_name] = generate_valve_using_cylinder(label_atrium=results[geom_atlas_names['atlas_left_atrium']], label_ventricle=results[geom_atlas_names['atlas_left_ventricle']], radius_mm=geom_valve_defs['mitral_valve_radius_mm'], height_mm=geom_valve_defs['mitral_valve_thickness_mm'])
tv_name = ('TRICUSPIDVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[tv_name] = generate_valve_using_cylinder(label_atrium=results[geom_atlas_names['atlas_right_atrium']], label_ventricle=results[geom_atlas_names['atlas_right_ventricle']], radius_mm=geom_valve_defs['tricuspid_valve_radius_mm'], height_mm=geom_valve_defs['tricuspid_valve_thickness_mm'])
av_name = ('AORTICVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[av_name] = generate_valve_from_great_vessel(label_great_vessel=results[geom_atlas_names['atlas_ascending_aorta']], label_ventricle=results[geom_atlas_names['atlas_left_ventricle']], valve_thickness_mm=geom_valve_defs['aortic_valve_thickness_mm'])
pv_name = ('PULMONICVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[pv_name] = generate_valve_from_great_vessel(label_great_vessel=results[geom_atlas_names['atlas_pulmonary_artery']], label_ventricle=results[geom_atlas_names['atlas_right_ventricle']], valve_thickness_mm=geom_valve_defs['pulmonic_valve_thickness_mm'])
san_name = ('SAN' + geometric_segmentation_settings['geometric_name_suffix'])
results[san_name] = geometric_sinoatrialnode(label_svc=results[geom_atlas_names['atlas_superior_vena_cava']], label_ra=results[geom_atlas_names['atlas_right_atrium']], label_wholeheart=results[geom_atlas_names['atlas_whole_heart']], radius_mm=geom_conduction_defs['sinoatrial_node_radius_mm'])
avn_name = ('AVN' + geometric_segmentation_settings['geometric_name_suffix'])
results[avn_name] = geometric_atrioventricularnode(label_la=results[geom_atlas_names['atlas_left_atrium']], label_lv=results[geom_atlas_names['atlas_left_ventricle']], label_ra=results[geom_atlas_names['atlas_right_atrium']], label_rv=results[geom_atlas_names['atlas_right_ventricle']], radius_mm=geom_conduction_defs['atrioventricular_node_radius_mm'])
'\n Step 8 - Post-processing\n '
postprocessing_settings = settings['postprocessing_settings']
if postprocessing_settings['run_postprocessing']:
logger.info('Running post-processing.')
binaryfillhole_img = [int((postprocessing_settings['binaryfillhole_mm'] / sp)) for sp in img.GetSpacing()]
for structure_name in postprocessing_settings['structures_for_binaryfillhole']:
if (structure_name not in results.keys()):
continue
contour_s = results[structure_name]
contour_s = (sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1)
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
input_overlap = {s: results[s] for s in postprocessing_settings['structures_for_overlap_correction']}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings['structures_for_overlap_correction']:
results[s] = output_overlap[s]
if return_as_cropped:
results['CROP_IMAGE'] = img_crop
logger.info('Done!')
return (results, results_prob) | 1,093,644,460,725,558,900 | Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation | platipy/imaging/projects/cardiac/run.py | run_cardiac_segmentation | RadiotherapyAI/platipy | python | def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
'Runs the atlas-based cardiac segmentation\n\n Args:\n img (sitk.Image):\n settings (dict, optional): Dictionary containing settings for algorithm.\n Defaults to default_settings.\n\n Returns:\n dict: Dictionary containing output of segmentation\n '
results = {}
results_prob = {}
return_as_cropped = settings['return_as_cropped']
"\n Initialisation - Read in atlases\n - image files\n - structure files\n\n Atlas structure:\n 'ID': 'Original': 'CT Image' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'RIR' : 'CT Image' : sitk.Image\n 'Transform' : transform parameter map\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'DIR' : 'CT Image' : sitk.Image\n 'Transform' : displacement field transform\n 'Weight Map' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n\n\n "
logger.info()
atlas_path = settings['atlas_settings']['atlas_path']
atlas_id_list = settings['atlas_settings']['atlas_id_list']
atlas_structure_list = settings['atlas_settings']['atlas_structure_list']
atlas_image_format = settings['atlas_settings']['atlas_image_format']
atlas_label_format = settings['atlas_settings']['atlas_label_format']
crop_atlas_to_structures = settings['atlas_settings']['crop_atlas_to_structures']
crop_atlas_expansion_mm = settings['atlas_settings']['crop_atlas_expansion_mm']
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]['Original'] = {}
image = sitk.ReadImage(f'{atlas_path}/{atlas_image_format.format(atlas_id)}')
structures = {struct: sitk.ReadImage(f'{atlas_path}/{atlas_label_format.format(atlas_id, struct)}') for struct in atlas_structure_list}
if crop_atlas_to_structures:
logger.info(f'Automatically cropping atlas: {atlas_id}')
original_volume = np.product(image.GetSize())
(crop_box_size, crop_box_index) = label_to_roi(structures.values(), expansion_mm=crop_atlas_expansion_mm)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f' > Volume reduced by factor {(original_volume / final_volume):.2f}')
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(structures[struct], size=crop_box_size, index=crop_box_index)
atlas_set[atlas_id]['Original']['CT Image'] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]['Original'][struct] = structures[struct]
'\n Step 1 - Automatic cropping\n If we have a guide structure:\n - use structure to crop target image\n\n Otherwise:\n - using a quick registration to register each atlas\n - expansion of the bounding box to ensure entire volume of interest is enclosed\n - target image is cropped\n '
expansion_mm = settings['auto_crop_target_image_settings']['expansion_mm']
if guide_structure:
(crop_box_size, crop_box_index) = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {'reg_method': 'similarity', 'shrink_factors': [8], 'smooth_sigmas': [0], 'sampling_rate': 0.75, 'default_value': (- 1000), 'number_of_iterations': 25, 'final_interp': sitk.sitkLinear, 'metric': 'mean_squares', 'optimiser': 'gradient_descent_line_search'}
registered_crop_images = []
logger.info('Running initial Translation tranform to crop image volume')
for atlas_id in atlas_id_list[:min([8, len(atlas_id_list)])]:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['RIR'] = {}
atlas_image = atlas_set[atlas_id]['Original']['CT Image']
(reg_image, _) = linear_registration(img, atlas_image, **quick_reg_settings)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = ((sum(registered_crop_images) / len(registered_crop_images)) > (- 1000))
(crop_box_size, crop_box_index) = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info('Calculated crop box:')
logger.info(f' > {crop_box_index}')
logger.info(f' > {crop_box_size}')
logger.info(f' > Vol reduction = {(np.product(img.GetSize()) / np.product(crop_box_size)):.2f}')
'\n Step 2 - Rigid registration of target images\n - Individual atlas images are registered to the target\n - The transformation is used to propagate the labels onto the target\n '
linear_registration_settings = settings['linear_registration_settings']
logger.info(f"Running {linear_registration_settings['reg_method']} tranform to align atlas images")
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['RIR'] = {}
if guide_structure:
guide_structure_name = settings['atlas_settings']['guide_structure_name']
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(atlas_set[atlas_id]['Original'][guide_structure_name], expansion=2)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]['Original']['CT Image']
(_, initial_tfm) = linear_registration(target_reg_image, atlas_reg_image, **linear_registration_settings)
atlas_set[atlas_id]['RIR']['Transform'] = initial_tfm
if guide_structure:
atlas_set[atlas_id]['RIR']['Reg Mask'] = apply_transform(input_image=atlas_reg_image, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkLinear)
expanded_atlas_guide_structure = extend_mask(atlas_set[atlas_id]['Original'][guide_structure_name], direction=('ax', 'sup'), extension_mm=settings['atlas_settings']['superior_extension'], interior_mm_shape=(settings['atlas_settings']['superior_extension'] / 2))
atlas_set[atlas_id]['RIR'][(guide_structure_name + 'EXPANDED')] = apply_transform(input_image=expanded_atlas_guide_structure, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['RIR']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id]['Original']['CT Image'], reference_image=img_crop, transform=initial_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]['Original'][struct]
atlas_set[atlas_id]['RIR'][struct] = apply_transform(input_image=input_struct, reference_image=img_crop, transform=initial_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['Original'] = None
'\n Step 3 - Deformable image registration\n - Using Fast Symmetric Diffeomorphic Demons\n '
if guide_structure:
structure_guided_registration_settings = settings['structure_guided_registration_settings']
logger.info('Running structure-guided deformable registration on atlas labels')
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['DIR_STRUCT'] = {}
(deform_image, struct_guided_tfm, _) = fast_symmetric_forces_demons_registration(target_reg_structure, atlas_set[atlas_id]['RIR']['Reg Mask'], **structure_guided_registration_settings)
atlas_set[atlas_id]['DIR_STRUCT']['Reg Mask'] = deform_image
atlas_set[atlas_id]['DIR_STRUCT']['Transform'] = struct_guided_tfm
atlas_set[atlas_id]['DIR_STRUCT']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id]['RIR']['CT Image'], transform=struct_guided_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
atlas_set[atlas_id]['DIR_STRUCT'][(guide_structure_name + 'EXPANDED')] = apply_transform(input_image=atlas_set[atlas_id]['RIR'][(guide_structure_name + 'EXPANDED')], reference_image=img_crop, transform=struct_guided_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]['RIR'][struct]
atlas_set[atlas_id]['DIR_STRUCT'][struct] = apply_transform(input_image=input_struct, transform=struct_guided_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id]['RIR'] = None
deformable_registration_settings = settings['deformable_registration_settings']
logger.info('Running DIR to refine atlas image registration')
for atlas_id in atlas_id_list:
logger.info(f' > atlas {atlas_id}')
atlas_set[atlas_id]['DIR'] = {}
if guide_structure:
label = 'DIR_STRUCT'
else:
label = 'RIR'
atlas_reg_image = atlas_set[atlas_id][label]['CT Image']
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]['DIR_STRUCT'][(guide_structure_name + 'EXPANDED')]
expanded_target_mask = extend_mask(guide_structure, direction=('ax', 'sup'), extension_mm=settings['atlas_settings']['superior_extension'], interior_mm_shape=(settings['atlas_settings']['superior_extension'] / 2))
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=(- 1000))
atlas_reg_image = sitk.Mask(atlas_reg_image, (atlas_reg_image > (- 400)), outsideValue=(- 1000))
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=(- 1000))
target_reg_image = sitk.Mask(target_reg_image, (atlas_reg_image > (- 400)), outsideValue=(- 1000))
(deform_image, dir_tfm, _) = fast_symmetric_forces_demons_registration(target_reg_image, atlas_reg_image, **deformable_registration_settings)
atlas_set[atlas_id]['DIR']['Transform'] = dir_tfm
atlas_set[atlas_id]['DIR']['CT Image'] = apply_transform(input_image=atlas_set[atlas_id][label]['CT Image'], transform=dir_tfm, default_value=(- 1000), interpolator=sitk.sitkLinear)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]['DIR'][struct] = apply_transform(input_image=input_struct, transform=dir_tfm, default_value=0, interpolator=sitk.sitkNearestNeighbor)
atlas_set[atlas_id][label] = None
'\n Step 4 - Iterative atlas removal\n - This is an automatic process that will attempt to remove inconsistent atlases from the entire set\n\n '
iar_settings = settings['iar_settings']
if iar_settings['reference_structure']:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]['DIR']['CT Image']
weight_map = compute_weight_map(img_crop, atlas_image, vote_type='global')
atlas_set[atlas_id]['DIR']['Weight Map'] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info('IAR: No reference structure, skipping iterative atlas removal.')
'\n Step 4 - Vessel Splining\n\n '
vessel_spline_settings = settings['vessel_spline_settings']
if (len(vessel_spline_settings['vessel_name_list']) > 0):
segmented_vessel_dict = vessel_spline_generation(img_crop, atlas_set, **vessel_spline_settings)
else:
logger.info('No vessel splining required, continue.')
'\n Step 5 - Label Fusion\n '
vote_type = settings['label_fusion_settings']['vote_type']
vote_params = settings['label_fusion_settings']['vote_params']
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]['DIR']['CT Image']
weight_map = compute_weight_map(img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params)
atlas_set[atlas_id]['DIR']['Weight Map'] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
'\n Step 6 - Paste the cropped structure into the original image space\n '
logger.info('Generating binary segmentations.')
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings['label_fusion_settings']['optimal_threshold'].keys()
vote_structures = [i for i in vote_structures if (i in atlas_structure_list)]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings['label_fusion_settings']['optimal_threshold'][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings['return_proba_as_contours']:
atlas_contours = [(atlas_set[atlas_id]['DIR'][structure_name] >= 2) for atlas_id in atlas_id_list]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
if ((not settings['return_atlas_guide_structure']) and (guide_structure is not None)):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings['return_proba_as_contours']:
atlas_contours = [(atlas_set[atlas_id]['DIR'][structure_name] >= 2) for atlas_id in atlas_id_list]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
paste_img_binary = sitk.Paste(template_img_binary, binary_struct, binary_struct.GetSize(), (0, 0, 0), crop_box_index)
results[structure_name] = paste_img_binary
paste_prob_img = sitk.Paste(template_img_prob, probability_img, probability_img.GetSize(), (0, 0, 0), crop_box_index)
results_prob[structure_name] = paste_prob_img
if ((not settings['return_atlas_guide_structure']) and (guide_structure is not None)):
new_guide_structure = sitk.Paste(template_img_binary, guide_structure, guide_structure.GetSize(), (0, 0, 0), crop_box_index)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings['vessel_name_list']:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [atlas_set[atlas_id]['DIR'][structure_name] for atlas_id in list(atlas_set.keys())]
else:
paste_img_binary = sitk.Paste(template_img_binary, binary_struct, binary_struct.GetSize(), (0, 0, 0), crop_box_index)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(template_img_binary, atlas_set[atlas_id]['DIR'][structure_name], atlas_set[atlas_id]['DIR'][structure_name].GetSize(), (0, 0, 0), crop_box_index)
vessel_list.append(paste_img_binary)
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
'\n Step 7 - Geometric definitions of cardiac valves and conduction system nodes\n '
geometric_segmentation_settings = settings['geometric_segmentation_settings']
if geometric_segmentation_settings['run_geometric_algorithms']:
logger.info('Computing geometric definitions for valves and conduction system.')
geom_atlas_names = geometric_segmentation_settings['atlas_structure_names']
geom_valve_defs = geometric_segmentation_settings['valve_definitions']
geom_conduction_defs = geometric_segmentation_settings['conduction_system_definitions']
mv_name = ('MITRALVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[mv_name] = generate_valve_using_cylinder(label_atrium=results[geom_atlas_names['atlas_left_atrium']], label_ventricle=results[geom_atlas_names['atlas_left_ventricle']], radius_mm=geom_valve_defs['mitral_valve_radius_mm'], height_mm=geom_valve_defs['mitral_valve_thickness_mm'])
tv_name = ('TRICUSPIDVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[tv_name] = generate_valve_using_cylinder(label_atrium=results[geom_atlas_names['atlas_right_atrium']], label_ventricle=results[geom_atlas_names['atlas_right_ventricle']], radius_mm=geom_valve_defs['tricuspid_valve_radius_mm'], height_mm=geom_valve_defs['tricuspid_valve_thickness_mm'])
av_name = ('AORTICVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[av_name] = generate_valve_from_great_vessel(label_great_vessel=results[geom_atlas_names['atlas_ascending_aorta']], label_ventricle=results[geom_atlas_names['atlas_left_ventricle']], valve_thickness_mm=geom_valve_defs['aortic_valve_thickness_mm'])
pv_name = ('PULMONICVALVE' + geometric_segmentation_settings['geometric_name_suffix'])
results[pv_name] = generate_valve_from_great_vessel(label_great_vessel=results[geom_atlas_names['atlas_pulmonary_artery']], label_ventricle=results[geom_atlas_names['atlas_right_ventricle']], valve_thickness_mm=geom_valve_defs['pulmonic_valve_thickness_mm'])
san_name = ('SAN' + geometric_segmentation_settings['geometric_name_suffix'])
results[san_name] = geometric_sinoatrialnode(label_svc=results[geom_atlas_names['atlas_superior_vena_cava']], label_ra=results[geom_atlas_names['atlas_right_atrium']], label_wholeheart=results[geom_atlas_names['atlas_whole_heart']], radius_mm=geom_conduction_defs['sinoatrial_node_radius_mm'])
avn_name = ('AVN' + geometric_segmentation_settings['geometric_name_suffix'])
results[avn_name] = geometric_atrioventricularnode(label_la=results[geom_atlas_names['atlas_left_atrium']], label_lv=results[geom_atlas_names['atlas_left_ventricle']], label_ra=results[geom_atlas_names['atlas_right_atrium']], label_rv=results[geom_atlas_names['atlas_right_ventricle']], radius_mm=geom_conduction_defs['atrioventricular_node_radius_mm'])
'\n Step 8 - Post-processing\n '
postprocessing_settings = settings['postprocessing_settings']
if postprocessing_settings['run_postprocessing']:
logger.info('Running post-processing.')
binaryfillhole_img = [int((postprocessing_settings['binaryfillhole_mm'] / sp)) for sp in img.GetSpacing()]
for structure_name in postprocessing_settings['structures_for_binaryfillhole']:
if (structure_name not in results.keys()):
continue
contour_s = results[structure_name]
contour_s = (sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1)
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
input_overlap = {s: results[s] for s in postprocessing_settings['structures_for_overlap_correction']}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings['structures_for_overlap_correction']:
results[s] = output_overlap[s]
if return_as_cropped:
results['CROP_IMAGE'] = img_crop
logger.info('Done!')
return (results, results_prob) |
def test_programs(self):
'\n Checks the evaluation of programs\n '
p1 = BasicPrimitive('MAP')
p2 = BasicPrimitive('MAP', type_=PolymorphicType(name='test'))
self.assertTrue((repr(p1) == repr(p2)))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse((id(p1) == id(p2)))
t0 = PolymorphicType('t0')
t1 = PolymorphicType('t1')
semantics = {'+1': (lambda x: (x + 1)), 'MAP': (lambda f: (lambda l: list(map(f, l))))}
primitive_types = {'+1': Arrow(INT, INT), 'MAP': Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1)))}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive('+1'), [Variable(0)])
env = (2, None)
self.assertTrue((p0.eval(toy_DSL, env, 0) == 3))
p1 = Function(BasicPrimitive('MAP'), [BasicPrimitive('+1'), Variable(0)])
env = ([2, 4], None)
self.assertTrue((p1.eval(toy_DSL, env, 0) == [3, 5])) | -6,552,360,153,264,010,000 | Checks the evaluation of programs | unit_tests_programs.py | test_programs | agissaud/DeepSynth | python | def test_programs(self):
'\n \n '
p1 = BasicPrimitive('MAP')
p2 = BasicPrimitive('MAP', type_=PolymorphicType(name='test'))
self.assertTrue((repr(p1) == repr(p2)))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse((id(p1) == id(p2)))
t0 = PolymorphicType('t0')
t1 = PolymorphicType('t1')
semantics = {'+1': (lambda x: (x + 1)), 'MAP': (lambda f: (lambda l: list(map(f, l))))}
primitive_types = {'+1': Arrow(INT, INT), 'MAP': Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1)))}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive('+1'), [Variable(0)])
env = (2, None)
self.assertTrue((p0.eval(toy_DSL, env, 0) == 3))
p1 = Function(BasicPrimitive('MAP'), [BasicPrimitive('+1'), Variable(0)])
env = ([2, 4], None)
self.assertTrue((p1.eval(toy_DSL, env, 0) == [3, 5])) |
def test_evaluation_from_compressed(self):
'\n Check if evaluation_from_compressed evaluates correctly the programs\n '
N = 20000
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(program_compressed, deepcoder, environment, r)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program) | -7,977,442,394,043,425,000 | Check if evaluation_from_compressed evaluates correctly the programs | unit_tests_programs.py | test_evaluation_from_compressed | agissaud/DeepSynth | python | def test_evaluation_from_compressed(self):
'\n \n '
N = 20000
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(program_compressed, deepcoder, environment, r)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program) |
def prerelease_local_scheme(version):
'\n Return local scheme version unless building on master in CircleCI.\n\n This function returns the local scheme version number\n (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a\n pre-release in which case it ignores the hash and produces a\n PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).\n '
from setuptools_scm.version import get_local_node_and_date
if (os.getenv('CIRCLE_BRANCH') in ('master',)):
return ''
else:
return get_local_node_and_date(version) | -4,038,724,985,312,240,000 | Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). | setup.py | prerelease_local_scheme | abcsFrederick/HistomicsUI | python | def prerelease_local_scheme(version):
'\n Return local scheme version unless building on master in CircleCI.\n\n This function returns the local scheme version number\n (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a\n pre-release in which case it ignores the hash and produces a\n PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).\n '
from setuptools_scm.version import get_local_node_and_date
if (os.getenv('CIRCLE_BRANCH') in ('master',)):
return
else:
return get_local_node_and_date(version) |
def on_demand_feature_view(*args, features: Optional[List[Feature]]=None, sources: Optional[Dict[(str, Union[(FeatureView, RequestSource)])]]=None, inputs: Optional[Dict[(str, Union[(FeatureView, RequestSource)])]]=None, schema: Optional[List[Field]]=None, description: str='', tags: Optional[Dict[(str, str)]]=None, owner: str=''):
'\n Creates an OnDemandFeatureView object with the given user function as udf.\n\n Args:\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n '
positional_attributes = ['features', 'inputs']
_schema = (schema or [])
if ((len(_schema) == 0) and (features is not None)):
_schema = [Field.from_feature(feature) for feature in features]
if (features is not None):
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
_sources = (sources or inputs)
if (inputs and sources):
raise ValueError('At most one of `sources` or `inputs` can be specified.')
elif inputs:
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if args:
warnings.warn('On demand feature view parameters should be specified as keyword arguments instead of positional arguments. Feast 0.23 and onwards will not support positional arguments in on demand feature view definitions.', DeprecationWarning)
if (len(args) > len(positional_attributes)):
raise ValueError(f"Only {', '.join(positional_attributes)} are allowed as positional args when defining feature views, for backwards compatibility.")
if (len(args) >= 1):
_schema = args[0]
if ((len(_schema) > 0) and isinstance(_schema[0], Feature)):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
if (len(args) >= 2):
_sources = args[1]
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if (not _sources):
raise ValueError('The `sources` parameter must be specified.')
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(name=user_function.__name__, sources=_sources, schema=_schema, udf=user_function, description=description, tags=tags, owner=owner)
functools.update_wrapper(wrapper=on_demand_feature_view_obj, wrapped=user_function)
return on_demand_feature_view_obj
return decorator | 2,463,973,979,159,200,300 | Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer. | sdk/python/feast/on_demand_feature_view.py | on_demand_feature_view | aurobindoc/feast | python | def on_demand_feature_view(*args, features: Optional[List[Feature]]=None, sources: Optional[Dict[(str, Union[(FeatureView, RequestSource)])]]=None, inputs: Optional[Dict[(str, Union[(FeatureView, RequestSource)])]]=None, schema: Optional[List[Field]]=None, description: str=, tags: Optional[Dict[(str, str)]]=None, owner: str=):
'\n Creates an OnDemandFeatureView object with the given user function as udf.\n\n Args:\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n '
positional_attributes = ['features', 'inputs']
_schema = (schema or [])
if ((len(_schema) == 0) and (features is not None)):
_schema = [Field.from_feature(feature) for feature in features]
if (features is not None):
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
_sources = (sources or inputs)
if (inputs and sources):
raise ValueError('At most one of `sources` or `inputs` can be specified.')
elif inputs:
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if args:
warnings.warn('On demand feature view parameters should be specified as keyword arguments instead of positional arguments. Feast 0.23 and onwards will not support positional arguments in on demand feature view definitions.', DeprecationWarning)
if (len(args) > len(positional_attributes)):
raise ValueError(f"Only {', '.join(positional_attributes)} are allowed as positional args when defining feature views, for backwards compatibility.")
if (len(args) >= 1):
_schema = args[0]
if ((len(_schema) > 0) and isinstance(_schema[0], Feature)):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
if (len(args) >= 2):
_sources = args[1]
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if (not _sources):
raise ValueError('The `sources` parameter must be specified.')
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(name=user_function.__name__, sources=_sources, schema=_schema, udf=user_function, description=description, tags=tags, owner=owner)
functools.update_wrapper(wrapper=on_demand_feature_view_obj, wrapped=user_function)
return on_demand_feature_view_obj
return decorator |
@log_exceptions
def __init__(self, *args, name: Optional[str]=None, features: Optional[List[Feature]]=None, sources: Optional[Dict[(str, Union[(FeatureView, FeatureViewProjection, RequestSource)])]]=None, udf: Optional[MethodType]=None, inputs: Optional[Dict[(str, Union[(FeatureView, FeatureViewProjection, RequestSource)])]]=None, schema: Optional[List[Field]]=None, description: str='', tags: Optional[Dict[(str, str)]]=None, owner: str=''):
'\n Creates an OnDemandFeatureView object.\n\n Args:\n name: The unique name of the on demand feature view.\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n udf (optional): The user defined transformation function, which must take pandas\n dataframes as inputs.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n '
positional_attributes = ['name', 'features', 'inputs', 'udf']
_name = name
_schema = (schema or [])
if ((len(_schema) == 0) and (features is not None)):
_schema = [Field.from_feature(feature) for feature in features]
if (features is not None):
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
_sources = (sources or inputs)
if (inputs and sources):
raise ValueError('At most one of `sources` or `inputs` can be specified.')
elif inputs:
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
_udf = udf
if args:
warnings.warn('On demand feature view parameters should be specified as keyword arguments instead of positional arguments. Feast 0.23 and onwards will not support positional arguments in on demand feature view definitions.', DeprecationWarning)
if (len(args) > len(positional_attributes)):
raise ValueError(f"Only {', '.join(positional_attributes)} are allowed as positional args when defining feature views, for backwards compatibility.")
if (len(args) >= 1):
_name = args[0]
if (len(args) >= 2):
_schema = args[1]
if ((len(_schema) > 0) and isinstance(_schema[0], Feature)):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
if (len(args) >= 3):
_sources = args[2]
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if (len(args) >= 4):
_udf = args[3]
if (not _name):
raise ValueError('The name of the on demand feature view must be specified.')
if (not _sources):
raise ValueError('The `sources` parameter must be specified.')
super().__init__(name=_name, features=_schema, description=description, tags=tags, owner=owner)
assert (_sources is not None)
self.source_feature_view_projections: Dict[(str, FeatureViewProjection)] = {}
self.source_request_sources: Dict[(str, RequestSource)] = {}
for (source_name, odfv_source) in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[source_name] = odfv_source.projection
if (_udf is None):
raise ValueError('The `udf` parameter must be specified.')
assert _udf
self.udf = _udf | -7,160,352,753,140,764,000 | Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer. | sdk/python/feast/on_demand_feature_view.py | __init__ | aurobindoc/feast | python | @log_exceptions
def __init__(self, *args, name: Optional[str]=None, features: Optional[List[Feature]]=None, sources: Optional[Dict[(str, Union[(FeatureView, FeatureViewProjection, RequestSource)])]]=None, udf: Optional[MethodType]=None, inputs: Optional[Dict[(str, Union[(FeatureView, FeatureViewProjection, RequestSource)])]]=None, schema: Optional[List[Field]]=None, description: str=, tags: Optional[Dict[(str, str)]]=None, owner: str=):
'\n Creates an OnDemandFeatureView object.\n\n Args:\n name: The unique name of the on demand feature view.\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n udf (optional): The user defined transformation function, which must take pandas\n dataframes as inputs.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n '
positional_attributes = ['name', 'features', 'inputs', 'udf']
_name = name
_schema = (schema or [])
if ((len(_schema) == 0) and (features is not None)):
_schema = [Field.from_feature(feature) for feature in features]
if (features is not None):
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
_sources = (sources or inputs)
if (inputs and sources):
raise ValueError('At most one of `sources` or `inputs` can be specified.')
elif inputs:
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
_udf = udf
if args:
warnings.warn('On demand feature view parameters should be specified as keyword arguments instead of positional arguments. Feast 0.23 and onwards will not support positional arguments in on demand feature view definitions.', DeprecationWarning)
if (len(args) > len(positional_attributes)):
raise ValueError(f"Only {', '.join(positional_attributes)} are allowed as positional args when defining feature views, for backwards compatibility.")
if (len(args) >= 1):
_name = args[0]
if (len(args) >= 2):
_schema = args[1]
if ((len(_schema) > 0) and isinstance(_schema[0], Feature)):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn('The `features` parameter is being deprecated in favor of the `schema` parameter. Please switch from using `features` to `schema`. This will also requiring switching feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not support the `features` parameter.', DeprecationWarning)
if (len(args) >= 3):
_sources = args[2]
warnings.warn('The `inputs` parameter is being deprecated. Please use `sources` instead. Feast 0.21 and onwards will not support the `inputs` parameter.', DeprecationWarning)
if (len(args) >= 4):
_udf = args[3]
if (not _name):
raise ValueError('The name of the on demand feature view must be specified.')
if (not _sources):
raise ValueError('The `sources` parameter must be specified.')
super().__init__(name=_name, features=_schema, description=description, tags=tags, owner=owner)
assert (_sources is not None)
self.source_feature_view_projections: Dict[(str, FeatureViewProjection)] = {}
self.source_request_sources: Dict[(str, RequestSource)] = {}
for (source_name, odfv_source) in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[source_name] = odfv_source.projection
if (_udf is None):
raise ValueError('The `udf` parameter must be specified.')
assert _udf
self.udf = _udf |
def to_proto(self) -> OnDemandFeatureViewProto:
'\n Converts an on demand feature view object to its protobuf representation.\n\n Returns:\n A OnDemandFeatureViewProto protobuf.\n '
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for (source_name, fv_projection) in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(feature_view_projection=fv_projection.to_proto())
for (source_name, request_sources) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(request_data_source=request_sources.to_proto())
spec = OnDemandFeatureViewSpec(name=self.name, features=[feature.to_proto() for feature in self.features], sources=sources, user_defined_function=UserDefinedFunctionProto(name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True)), description=self.description, tags=self.tags, owner=self.owner)
return OnDemandFeatureViewProto(spec=spec, meta=meta) | 3,485,548,422,337,629,000 | Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf. | sdk/python/feast/on_demand_feature_view.py | to_proto | aurobindoc/feast | python | def to_proto(self) -> OnDemandFeatureViewProto:
'\n Converts an on demand feature view object to its protobuf representation.\n\n Returns:\n A OnDemandFeatureViewProto protobuf.\n '
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for (source_name, fv_projection) in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(feature_view_projection=fv_projection.to_proto())
for (source_name, request_sources) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(request_data_source=request_sources.to_proto())
spec = OnDemandFeatureViewSpec(name=self.name, features=[feature.to_proto() for feature in self.features], sources=sources, user_defined_function=UserDefinedFunctionProto(name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True)), description=self.description, tags=self.tags, owner=self.owner)
return OnDemandFeatureViewProto(spec=spec, meta=meta) |
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
'\n Creates an on demand feature view from a protobuf representation.\n\n Args:\n on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.\n\n Returns:\n A OnDemandFeatureView object based on the on-demand feature view protobuf.\n '
sources = {}
for (source_name, on_demand_source) in on_demand_feature_view_proto.spec.sources.items():
if (on_demand_source.WhichOneof('source') == 'feature_view'):
sources[source_name] = FeatureView.from_proto(on_demand_source.feature_view).projection
elif (on_demand_source.WhichOneof('source') == 'feature_view_projection'):
sources[source_name] = FeatureViewProjection.from_proto(on_demand_source.feature_view_projection)
else:
sources[source_name] = RequestSource.from_proto(on_demand_source.request_data_source)
on_demand_feature_view_obj = cls(name=on_demand_feature_view_proto.spec.name, schema=[Field(name=feature.name, dtype=from_value_type(ValueType(feature.value_type))) for feature in on_demand_feature_view_proto.spec.features], sources=sources, udf=dill.loads(on_demand_feature_view_proto.spec.user_defined_function.body), description=on_demand_feature_view_proto.spec.description, tags=dict(on_demand_feature_view_proto.spec.tags), owner=on_demand_feature_view_proto.spec.owner)
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(on_demand_feature_view_obj)
if on_demand_feature_view_proto.meta.HasField('created_timestamp'):
on_demand_feature_view_obj.created_timestamp = on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
if on_demand_feature_view_proto.meta.HasField('last_updated_timestamp'):
on_demand_feature_view_obj.last_updated_timestamp = on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
return on_demand_feature_view_obj | 6,164,696,982,730,159,000 | Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf. | sdk/python/feast/on_demand_feature_view.py | from_proto | aurobindoc/feast | python | @classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
'\n Creates an on demand feature view from a protobuf representation.\n\n Args:\n on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.\n\n Returns:\n A OnDemandFeatureView object based on the on-demand feature view protobuf.\n '
sources = {}
for (source_name, on_demand_source) in on_demand_feature_view_proto.spec.sources.items():
if (on_demand_source.WhichOneof('source') == 'feature_view'):
sources[source_name] = FeatureView.from_proto(on_demand_source.feature_view).projection
elif (on_demand_source.WhichOneof('source') == 'feature_view_projection'):
sources[source_name] = FeatureViewProjection.from_proto(on_demand_source.feature_view_projection)
else:
sources[source_name] = RequestSource.from_proto(on_demand_source.request_data_source)
on_demand_feature_view_obj = cls(name=on_demand_feature_view_proto.spec.name, schema=[Field(name=feature.name, dtype=from_value_type(ValueType(feature.value_type))) for feature in on_demand_feature_view_proto.spec.features], sources=sources, udf=dill.loads(on_demand_feature_view_proto.spec.user_defined_function.body), description=on_demand_feature_view_proto.spec.description, tags=dict(on_demand_feature_view_proto.spec.tags), owner=on_demand_feature_view_proto.spec.owner)
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(on_demand_feature_view_obj)
if on_demand_feature_view_proto.meta.HasField('created_timestamp'):
on_demand_feature_view_obj.created_timestamp = on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
if on_demand_feature_view_proto.meta.HasField('last_updated_timestamp'):
on_demand_feature_view_obj.last_updated_timestamp = on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
return on_demand_feature_view_obj |
def infer_features(self):
'\n Infers the set of features associated to this feature view from the input source.\n\n Raises:\n RegistryInferenceFailure: The set of features could not be inferred.\n '
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f'{feature_view_projection.name}__{feature.name}'] = pd.Series(dtype=dtype)
df[f'{feature.name}'] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f'{field.name}'] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for (f, dt) in zip(output_df.columns, output_df.dtypes):
inferred_features.append(Field(name=f, dtype=from_value_type(python_type_to_feast_value_type(f, type_name=str(dt)))))
if self.features:
missing_features = []
for specified_features in self.features:
if (specified_features not in inferred_features):
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError([f.name for f in missing_features], self.name)
else:
self.features = inferred_features
if (not self.features):
raise RegistryInferenceFailure('OnDemandFeatureView', f"Could not infer Features for the feature view '{self.name}'.") | 251,879,823,335,674,460 | Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred. | sdk/python/feast/on_demand_feature_view.py | infer_features | aurobindoc/feast | python | def infer_features(self):
'\n Infers the set of features associated to this feature view from the input source.\n\n Raises:\n RegistryInferenceFailure: The set of features could not be inferred.\n '
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f'{feature_view_projection.name}__{feature.name}'] = pd.Series(dtype=dtype)
df[f'{feature.name}'] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f'{field.name}'] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for (f, dt) in zip(output_df.columns, output_df.dtypes):
inferred_features.append(Field(name=f, dtype=from_value_type(python_type_to_feast_value_type(f, type_name=str(dt)))))
if self.features:
missing_features = []
for specified_features in self.features:
if (specified_features not in inferred_features):
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError([f.name for f in missing_features], self.name)
else:
self.features = inferred_features
if (not self.features):
raise RegistryInferenceFailure('OnDemandFeatureView', f"Could not infer Features for the feature view '{self.name}'.") |
def initUI(self):
' Инициализируем содержимое окна '
self.sub_objs = QtWidgets.QListWidget()
for obj in self.__obj.sub_objects:
a = QtWidgets.QListWidgetItem()
a.sub_obj = obj
a.setText(obj.name)
self.sub_objs.addItem(a)
self.form = QtWidgets.QFormLayout()
self.form.addRow(self.sub_objs)
self.setLayout(self.form)
self.sub_objs.itemDoubleClicked.connect(self.isDoubleClicked) | 5,570,166,808,828,233,000 | Инициализируем содержимое окна | src/gui/SubVision.py | initUI | bochkovoi/AHP | python | def initUI(self):
' '
self.sub_objs = QtWidgets.QListWidget()
for obj in self.__obj.sub_objects:
a = QtWidgets.QListWidgetItem()
a.sub_obj = obj
a.setText(obj.name)
self.sub_objs.addItem(a)
self.form = QtWidgets.QFormLayout()
self.form.addRow(self.sub_objs)
self.setLayout(self.form)
self.sub_objs.itemDoubleClicked.connect(self.isDoubleClicked) |
@staticmethod
def extract_intent_and_entities(user_input):
'Parse the user input using regexes to extract intent & entities.'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
m = re.search((('^[' + prefixes) + ']?([^{]+)([{].+)?'), user_input)
if (m is not None):
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2), m.start(2), m.end(2), user_input)
return (event_name, entities)
else:
logger.warning("Failed to parse intent end entities from '{}'. ".format(user_input))
return (None, []) | 6,407,435,312,694,949,000 | Parse the user input using regexes to extract intent & entities. | rasa_core/interpreter.py | extract_intent_and_entities | RocketChat/rasa_core | python | @staticmethod
def extract_intent_and_entities(user_input):
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
m = re.search((('^[' + prefixes) + ']?([^{]+)([{].+)?'), user_input)
if (m is not None):
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2), m.start(2), m.end(2), user_input)
return (event_name, entities)
else:
logger.warning("Failed to parse intent end entities from '{}'. ".format(user_input))
return (None, []) |
@staticmethod
def deprecated_extraction(user_input):
'DEPRECATED parse of user input message.'
value_assign_rx = '\\s*(.+)\\s*=\\s*(.+)\\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = (('^[' + prefixes) + ']?([^\\[]+)(\\[(.+)\\])?')
m = re.search(structured_message_rx, user_input)
if (m is not None):
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if (entities_str is not None):
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = (match.start(2) + offset)
end = (match.end(0) + offset)
entity = {'entity': match.group(1), 'start': start, 'end': end, 'value': match.group(2)}
entities.append(entity)
return (intent, entities)
else:
return (None, []) | 1,870,368,407,113,227,800 | DEPRECATED parse of user input message. | rasa_core/interpreter.py | deprecated_extraction | RocketChat/rasa_core | python | @staticmethod
def deprecated_extraction(user_input):
value_assign_rx = '\\s*(.+)\\s*=\\s*(.+)\\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = (('^[' + prefixes) + ']?([^\\[]+)(\\[(.+)\\])?')
m = re.search(structured_message_rx, user_input)
if (m is not None):
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if (entities_str is not None):
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = (match.start(2) + offset)
end = (match.end(0) + offset)
entity = {'entity': match.group(1), 'start': start, 'end': end, 'value': match.group(2)}
entities.append(entity)
return (intent, entities)
else:
return (None, []) |
@staticmethod
def is_using_deprecated_format(text):
'Indicates if the text string is using the deprecated intent format.\n\n In the deprecated format entities where annotated using `[name=Rasa]`\n which has been replaced with `{"name": "Rasa"}`.'
return ((text.find('[') != (- 1)) and ((text.find('{') == (- 1)) or (text.find('[') < text.find('{')))) | -736,614,347,310,115,300 | Indicates if the text string is using the deprecated intent format.
In the deprecated format entities where annotated using `[name=Rasa]`
which has been replaced with `{"name": "Rasa"}`. | rasa_core/interpreter.py | is_using_deprecated_format | RocketChat/rasa_core | python | @staticmethod
def is_using_deprecated_format(text):
'Indicates if the text string is using the deprecated intent format.\n\n In the deprecated format entities where annotated using `[name=Rasa]`\n which has been replaced with `{"name": "Rasa"}`.'
return ((text.find('[') != (- 1)) and ((text.find('{') == (- 1)) or (text.find('[') < text.find('{')))) |
def parse(self, text):
'Parse a text message.'
if self.is_using_deprecated_format(text):
(intent, entities) = self.deprecated_extraction(text)
else:
(intent, entities) = self.extract_intent_and_entities(text)
return {'text': text, 'intent': {'name': intent, 'confidence': 1.0}, 'intent_ranking': [{'name': intent, 'confidence': 1.0}], 'entities': entities} | 4,211,144,143,960,487,000 | Parse a text message. | rasa_core/interpreter.py | parse | RocketChat/rasa_core | python | def parse(self, text):
if self.is_using_deprecated_format(text):
(intent, entities) = self.deprecated_extraction(text)
else:
(intent, entities) = self.extract_intent_and_entities(text)
return {'text': text, 'intent': {'name': intent, 'confidence': 1.0}, 'intent_ranking': [{'name': intent, 'confidence': 1.0}], 'entities': entities} |
def parse(self, text):
'Parse a text message.\n\n Return a default value if the parsing of the text failed.'
default_return = {'intent': {'name': '', 'confidence': 0.0}, 'entities': [], 'text': ''}
result = self._rasa_http_parse(text)
return (result if (result is not None) else default_return) | 4,051,425,145,987,794,000 | Parse a text message.
Return a default value if the parsing of the text failed. | rasa_core/interpreter.py | parse | RocketChat/rasa_core | python | def parse(self, text):
'Parse a text message.\n\n Return a default value if the parsing of the text failed.'
default_return = {'intent': {'name': , 'confidence': 0.0}, 'entities': [], 'text': }
result = self._rasa_http_parse(text)
return (result if (result is not None) else default_return) |
def _rasa_http_parse(self, text):
'Send a text message to a running rasa NLU http server.\n\n Return `None` on failure.'
if (not self.server):
logger.error("Failed to parse text '{}' using rasa NLU over http. No rasa NLU server specified!".format(text))
return None
params = {'token': self.token, 'model': self.model_name, 'project': self.project_name, 'q': text}
url = '{}/parse'.format(self.server)
try:
result = requests.get(url, params=params)
if (result.status_code == 200):
return result.json()
else:
logger.error("Failed to parse text '{}' using rasa NLU over http. Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error("Failed to parse text '{}' using rasa NLU over http. Error: {}".format(text, e))
return None | 3,105,361,765,552,769,500 | Send a text message to a running rasa NLU http server.
Return `None` on failure. | rasa_core/interpreter.py | _rasa_http_parse | RocketChat/rasa_core | python | def _rasa_http_parse(self, text):
'Send a text message to a running rasa NLU http server.\n\n Return `None` on failure.'
if (not self.server):
logger.error("Failed to parse text '{}' using rasa NLU over http. No rasa NLU server specified!".format(text))
return None
params = {'token': self.token, 'model': self.model_name, 'project': self.project_name, 'q': text}
url = '{}/parse'.format(self.server)
try:
result = requests.get(url, params=params)
if (result.status_code == 200):
return result.json()
else:
logger.error("Failed to parse text '{}' using rasa NLU over http. Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error("Failed to parse text '{}' using rasa NLU over http. Error: {}".format(text, e))
return None |
def parse(self, text):
'Parse a text message.\n\n Return a default value if the parsing of the text failed.'
if (self.lazy_init and (self.interpreter is None)):
self._load_interpreter()
return self.interpreter.parse(text) | 7,794,856,214,773,793,000 | Parse a text message.
Return a default value if the parsing of the text failed. | rasa_core/interpreter.py | parse | RocketChat/rasa_core | python | def parse(self, text):
'Parse a text message.\n\n Return a default value if the parsing of the text failed.'
if (self.lazy_init and (self.interpreter is None)):
self._load_interpreter()
return self.interpreter.parse(text) |
def register_dummy_task(task_name: str, dataset_fn: Callable[([str, str], tf.data.Dataset)], output_feature_names: Sequence[str]=('inputs', 'targets')) -> None:
'Register a dummy task for GetDatasetTest.'
dataset_providers.TaskRegistry.add(task_name, source=dataset_providers.FunctionDataSource(dataset_fn=dataset_fn, splits=['train', 'validation']), preprocessors=[dataset_providers.CacheDatasetPlaceholder(), preprocessors.append_eos_after_trim], output_features={feat: dataset_providers.Feature(test_utils.sentencepiece_vocab()) for feat in output_feature_names}, metric_fns=[]) | 6,762,093,965,352,016,000 | Register a dummy task for GetDatasetTest. | seqio/dataset_providers_test.py | register_dummy_task | 00mjk/seqio | python | def register_dummy_task(task_name: str, dataset_fn: Callable[([str, str], tf.data.Dataset)], output_feature_names: Sequence[str]=('inputs', 'targets')) -> None:
dataset_providers.TaskRegistry.add(task_name, source=dataset_providers.FunctionDataSource(dataset_fn=dataset_fn, splits=['train', 'validation']), preprocessors=[dataset_providers.CacheDatasetPlaceholder(), preprocessors.append_eos_after_trim], output_features={feat: dataset_providers.Feature(test_utils.sentencepiece_vocab()) for feat in output_feature_names}, metric_fns=[]) |
def sequential_intereave(datasets: Sequence[tf.data.Dataset], rates: Sequence[float], sample_seed: Optional[int]) -> tf.data.Dataset:
'Sample function that simply concatenates two datasets.'
del rates, sample_seed
return datasets[0].concatenate(datasets[1]) | 5,979,708,542,510,301,000 | Sample function that simply concatenates two datasets. | seqio/dataset_providers_test.py | sequential_intereave | 00mjk/seqio | python | def sequential_intereave(datasets: Sequence[tf.data.Dataset], rates: Sequence[float], sample_seed: Optional[int]) -> tf.data.Dataset:
del rates, sample_seed
return datasets[0].concatenate(datasets[1]) |
def next_token_metrics_epoch_end(self, outputs, stage):
'\n Logic for validation & testing epoch end:\n 1) Calculate accuracy@1, accuracy@5, MRR@5\n 2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint\n 3) Log everything to wandb\n '
loss = torch.stack([x['loss'] for x in outputs]).mean()
metrics = {f'{stage}_loss_epoch': loss}
if (stage == 'val'):
self.log('val_loss_epoch', metrics['val_loss_epoch'], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count) | 2,297,948,692,306,093,000 | Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb | src/model/encoder_decoder_module.py | next_token_metrics_epoch_end | saridormi/commit_message_generation | python | def next_token_metrics_epoch_end(self, outputs, stage):
'\n Logic for validation & testing epoch end:\n 1) Calculate accuracy@1, accuracy@5, MRR@5\n 2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint\n 3) Log everything to wandb\n '
loss = torch.stack([x['loss'] for x in outputs]).mean()
metrics = {f'{stage}_loss_epoch': loss}
if (stage == 'val'):
self.log('val_loss_epoch', metrics['val_loss_epoch'], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count) |
def custom_name_func(testcase_func, param_num, param):
"\n A custom test name function that will ensure that the tests are run such that they're batched with all tests for a\n given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical\n order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes\n so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10\n tests results in tests running in an order similar to:\n\n test_*.test_scenario_0_*\n\n test_*.test_scenario_10_*\n\n test_*.test_scenario_11_*\n\n ...\n\n test_*.test_scenario_19_*\n\n test_*.test_scenario_1_*\n\n test_*.test_scenario_20_*\n "
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args)
return str(('%s_%s' % (testcase_func.__name__, parameterized.to_safe_name('_'.join((str(x) for x in param.args)))))) | 896,388,110,667,100,500 | A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_* | tests/testUtils.py | custom_name_func | NPCC-Joe/Radiomics-pyradiomics | python | def custom_name_func(testcase_func, param_num, param):
"\n A custom test name function that will ensure that the tests are run such that they're batched with all tests for a\n given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical\n order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes\n so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10\n tests results in tests running in an order similar to:\n\n test_*.test_scenario_0_*\n\n test_*.test_scenario_10_*\n\n test_*.test_scenario_11_*\n\n ...\n\n test_*.test_scenario_19_*\n\n test_*.test_scenario_1_*\n\n test_*.test_scenario_20_*\n "
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args)
return str(('%s_%s' % (testcase_func.__name__, parameterized.to_safe_name('_'.join((str(x) for x in param.args)))))) |
def readBaselineFiles(self):
"\n Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.\n These files should therefore be named as follows: 'baseline_<className>.csv'.\n "
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if (os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_'))]
assert (len(baselineFiles) > 0)
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests | 3,192,976,214,395,778,000 | Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'. | tests/testUtils.py | readBaselineFiles | NPCC-Joe/Radiomics-pyradiomics | python | def readBaselineFiles(self):
"\n Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.\n These files should therefore be named as follows: 'baseline_<className>.csv'.\n "
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if (os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_'))]
assert (len(baselineFiles) > 0)
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests |
def getTests(self):
'\n Return all the tests for which there are baseline information.\n '
return self._tests | 3,367,122,534,872,929,300 | Return all the tests for which there are baseline information. | tests/testUtils.py | getTests | NPCC-Joe/Radiomics-pyradiomics | python | def getTests(self):
'\n \n '
return self._tests |
def getFeatureNames(self, className, test):
'\n Gets all features for which a baseline value is available for the current class and test case. Returns a list\n containing the feature names (without image type and feature class specifiers, i.e. just the feature name).\n '
if (className not in self._baseline):
return None
return self._baseline[className].getTestFeatures(test) | 944,828,873,189,261,000 | Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name). | tests/testUtils.py | getFeatureNames | NPCC-Joe/Radiomics-pyradiomics | python | def getFeatureNames(self, className, test):
'\n Gets all features for which a baseline value is available for the current class and test case. Returns a list\n containing the feature names (without image type and feature class specifiers, i.e. just the feature name).\n '
if (className not in self._baseline):
return None
return self._baseline[className].getTestFeatures(test) |
def setFeatureClassAndTestCase(self, className, test):
'\n Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case\n are not recognized. These have to be set here together, as the settings with which the test case has to be loaded\n are defined per feature class in the baseline (extracted from provenance information).\n\n Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test\n settings.\n\n If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature\n class or test case is changed, function returns True.\n '
global TEST_CASES
if ((self._featureClassName == className) and (self._test == test)):
return False
self._test = test
self._testedSet.add(self._test)
if (self._featureClassName != className):
self._logger.debug('Setting feature class name to %s', className)
assert (className in self._baseline.keys())
self._featureClassName = className
if (self._current_config != self._baseline[className].getTestConfig(test)):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None
if (self._testCase != self._current_config['TestCase']):
self._testCase = self._current_config['TestCase']
self._logger.info('Reading the image and mask for test case %s', self._testCase)
assert (self._current_config['TestCase'] in TEST_CASES)
(imageName, maskName) = getTestCase(self._testCase)
assert (imageName is not None)
assert (maskName is not None)
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if ('ImageHash' in self._current_config):
assert (sitk.Hash(self._image) == self._current_config['ImageHash'])
if ('MaskHash' in self._current_config):
assert (sitk.Hash(self._mask) == self._current_config['MaskHash'])
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if ((interpolator is not None) and (resampledPixelSpacing is not None)):
(self._image, self._mask) = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5))
(self._bb, correctedMask) = imageoperations.checkMask(self._image, self._mask, **settings)
if (correctedMask is not None):
self._mask = correctedMask
self._imageType = None
return True | -1,088,853,398,139,280,100 | Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True. | tests/testUtils.py | setFeatureClassAndTestCase | NPCC-Joe/Radiomics-pyradiomics | python | def setFeatureClassAndTestCase(self, className, test):
'\n Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case\n are not recognized. These have to be set here together, as the settings with which the test case has to be loaded\n are defined per feature class in the baseline (extracted from provenance information).\n\n Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test\n settings.\n\n If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature\n class or test case is changed, function returns True.\n '
global TEST_CASES
if ((self._featureClassName == className) and (self._test == test)):
return False
self._test = test
self._testedSet.add(self._test)
if (self._featureClassName != className):
self._logger.debug('Setting feature class name to %s', className)
assert (className in self._baseline.keys())
self._featureClassName = className
if (self._current_config != self._baseline[className].getTestConfig(test)):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None
if (self._testCase != self._current_config['TestCase']):
self._testCase = self._current_config['TestCase']
self._logger.info('Reading the image and mask for test case %s', self._testCase)
assert (self._current_config['TestCase'] in TEST_CASES)
(imageName, maskName) = getTestCase(self._testCase)
assert (imageName is not None)
assert (maskName is not None)
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if ('ImageHash' in self._current_config):
assert (sitk.Hash(self._image) == self._current_config['ImageHash'])
if ('MaskHash' in self._current_config):
assert (sitk.Hash(self._mask) == self._current_config['MaskHash'])
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if ((interpolator is not None) and (resampledPixelSpacing is not None)):
(self._image, self._mask) = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5))
(self._bb, correctedMask) = imageoperations.checkMask(self._image, self._mask, **settings)
if (correctedMask is not None):
self._mask = correctedMask
self._imageType = None
return True |
def checkResult(self, featureName, value):
'\n Use utility methods to get and test the results against the expected baseline value for this key.\n '
longName = '_'.join(featureName)
if (value is None):
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert (baselineValue is not None)
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if (baselineValue == 0.0):
if ((value - baselineValue) == 0.0):
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs((1.0 - (value / baselineValue)))
self._diffs[self._test][longName] = percentDiff
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, (percentDiff * 100))
assert (percentDiff < 0.03) | 2,661,342,036,451,224,000 | Use utility methods to get and test the results against the expected baseline value for this key. | tests/testUtils.py | checkResult | NPCC-Joe/Radiomics-pyradiomics | python | def checkResult(self, featureName, value):
'\n \n '
longName = '_'.join(featureName)
if (value is None):
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert (baselineValue is not None)
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if (baselineValue == 0.0):
if ((value - baselineValue) == 0.0):
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs((1.0 - (value / baselineValue)))
self._diffs[self._test][longName] = percentDiff
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, (percentDiff * 100))
assert (percentDiff < 0.03) |
def writeCSV(self, data, fileName):
"\n Write out data in a csv file.\n Assumes a data structure with:\n\n {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}\n "
if (len(self._testedSet) > 0):
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = (['testCase'] + header)
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = (row + [thisCase.get(h, 'N/A')])
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName) | -6,234,742,826,333,706,000 | Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} | tests/testUtils.py | writeCSV | NPCC-Joe/Radiomics-pyradiomics | python | def writeCSV(self, data, fileName):
"\n Write out data in a csv file.\n Assumes a data structure with:\n\n {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}\n "
if (len(self._testedSet) > 0):
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = (['testCase'] + header)
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = (row + [thisCase.get(h, 'N/A')])
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName) |
def getTestFeatures(self, test):
'\n Gets all features for which a baseline value is available for the current class and test case. Returns a list\n containing the feature names.\n '
if (test not in self.baseline):
return None
return list(self.baseline[test].keys()) | 6,512,197,557,371,750,000 | Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names. | tests/testUtils.py | getTestFeatures | NPCC-Joe/Radiomics-pyradiomics | python | def getTestFeatures(self, test):
'\n Gets all features for which a baseline value is available for the current class and test case. Returns a list\n containing the feature names.\n '
if (test not in self.baseline):
return None
return list(self.baseline[test].keys()) |
def __init__(self, **kwargs):
'\n Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param display_name:\n The value to assign to the display_name property of this UpdateHttpRedirectDetails.\n :type display_name: str\n\n :param target:\n The value to assign to the target property of this UpdateHttpRedirectDetails.\n :type target: HttpRedirectTarget\n\n :param response_code:\n The value to assign to the response_code property of this UpdateHttpRedirectDetails.\n :type response_code: int\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n '
self.swagger_types = {'display_name': 'str', 'target': 'HttpRedirectTarget', 'response_code': 'int', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))'}
self.attribute_map = {'display_name': 'displayName', 'target': 'target', 'response_code': 'responseCode', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags'}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None | 6,103,624,177,813,616,000 | Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateHttpRedirectDetails.
:type display_name: str
:param target:
The value to assign to the target property of this UpdateHttpRedirectDetails.
:type target: HttpRedirectTarget
:param response_code:
The value to assign to the response_code property of this UpdateHttpRedirectDetails.
:type response_code: int
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.
:type defined_tags: dict(str, dict(str, object)) | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | __init__ | revnav/sandbox | python | def __init__(self, **kwargs):
'\n Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param display_name:\n The value to assign to the display_name property of this UpdateHttpRedirectDetails.\n :type display_name: str\n\n :param target:\n The value to assign to the target property of this UpdateHttpRedirectDetails.\n :type target: HttpRedirectTarget\n\n :param response_code:\n The value to assign to the response_code property of this UpdateHttpRedirectDetails.\n :type response_code: int\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n '
self.swagger_types = {'display_name': 'str', 'target': 'HttpRedirectTarget', 'response_code': 'int', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))'}
self.attribute_map = {'display_name': 'displayName', 'target': 'target', 'response_code': 'responseCode', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags'}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None |
@property
def display_name(self):
'\n Gets the display_name of this UpdateHttpRedirectDetails.\n The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.\n\n\n :return: The display_name of this UpdateHttpRedirectDetails.\n :rtype: str\n '
return self._display_name | -4,049,829,361,402,219,000 | Gets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:return: The display_name of this UpdateHttpRedirectDetails.
:rtype: str | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | display_name | revnav/sandbox | python | @property
def display_name(self):
'\n Gets the display_name of this UpdateHttpRedirectDetails.\n The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.\n\n\n :return: The display_name of this UpdateHttpRedirectDetails.\n :rtype: str\n '
return self._display_name |
@display_name.setter
def display_name(self, display_name):
'\n Sets the display_name of this UpdateHttpRedirectDetails.\n The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.\n\n\n :param display_name: The display_name of this UpdateHttpRedirectDetails.\n :type: str\n '
self._display_name = display_name | 937,187,494,521,535,200 | Sets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:param display_name: The display_name of this UpdateHttpRedirectDetails.
:type: str | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | display_name | revnav/sandbox | python | @display_name.setter
def display_name(self, display_name):
'\n Sets the display_name of this UpdateHttpRedirectDetails.\n The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.\n\n\n :param display_name: The display_name of this UpdateHttpRedirectDetails.\n :type: str\n '
self._display_name = display_name |
@property
def target(self):
'\n Gets the target of this UpdateHttpRedirectDetails.\n The redirect target object including all the redirect data.\n\n\n :return: The target of this UpdateHttpRedirectDetails.\n :rtype: HttpRedirectTarget\n '
return self._target | 5,426,157,337,298,315,000 | Gets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:return: The target of this UpdateHttpRedirectDetails.
:rtype: HttpRedirectTarget | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | target | revnav/sandbox | python | @property
def target(self):
'\n Gets the target of this UpdateHttpRedirectDetails.\n The redirect target object including all the redirect data.\n\n\n :return: The target of this UpdateHttpRedirectDetails.\n :rtype: HttpRedirectTarget\n '
return self._target |
@target.setter
def target(self, target):
'\n Sets the target of this UpdateHttpRedirectDetails.\n The redirect target object including all the redirect data.\n\n\n :param target: The target of this UpdateHttpRedirectDetails.\n :type: HttpRedirectTarget\n '
self._target = target | -3,774,355,794,326,944,000 | Sets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:param target: The target of this UpdateHttpRedirectDetails.
:type: HttpRedirectTarget | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | target | revnav/sandbox | python | @target.setter
def target(self, target):
'\n Sets the target of this UpdateHttpRedirectDetails.\n The redirect target object including all the redirect data.\n\n\n :param target: The target of this UpdateHttpRedirectDetails.\n :type: HttpRedirectTarget\n '
self._target = target |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.