content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_fixed_weight_optimiser(initial_weights, expected_weights):
"""
Tests initialisation and 'pass through' capability of
FixedWeightPortfolioOptimiser.
"""
dt = pd.Timestamp('2019-01-01 00:00:00', tz=pytz.UTC)
data_handler = DataHandlerMock()
fwo = FixedWeightPortfolioOptimiser(data_handler=data_handler)
assert fwo(dt, initial_weights) == expected_weights | 4,300 |
def _UpdateCountsForNewFlake(start_date):
"""Updates counts for new, re-occurred or rare flakes.
Args:
start_date(datetime): Earliest time to check.
"""
more = True
cursor = None
while more:
ndb.get_context().clear_cache()
flakes, cursor, more = Flake.query().filter(
Flake.last_occurred_time > start_date).filter(
Flake.flake_score_last_week == 0).fetch_page(
100, start_cursor=cursor)
for flake in flakes:
_UpdateFlakeCountsAndScore(flake, start_date)
ndb.put_multi(flakes) | 4,301 |
def save_quotas() -> None:
"""Create or update existing project quotas."""
nova = nova_client.Client(
version='2.1',
session=settings.OPENSTACK_SESSION,
endpoint_type='public',
)
cinder = cinder_client.Client(
version='3.40',
session=settings.OPENSTACK_SESSION
)
neutron = neutron_client.Client(
session=settings.OPENSTACK_SESSION
)
for project in Projects.objects.all():
nova_quotas = nova.quotas.get(tenant_id=project.id).to_dict()
cinder_quotas = cinder.quotas.get(tenant_id=project.id).to_dict()
neutron_quotas = neutron.show_quota_details(tenant_id=project.id)
try:
quotas_obj = ProjectQuotas.objects.get(project=project)
except ProjectQuotas.DoesNotExist:
quotas_obj = ProjectQuotas()
quotas_obj.project = project
quotas_obj.cores = nova_quotas['cores']
quotas_obj.ram = nova_quotas['ram']
quotas_obj.instances = nova_quotas['instances']
quotas_obj.volumes = cinder_quotas['volumes']
quotas_obj.gigabytes = cinder_quotas['gigabytes']
quotas_obj.backups = cinder_quotas['backups']
quotas_obj.backup_gigabytes = cinder_quotas['backup_gigabytes']
quotas_obj.snapshots = cinder_quotas['snapshots']
quotas_obj.floatingip_limit = neutron_quotas['quota']['floatingip']['limit']
quotas_obj.floatingip_reserved = neutron_quotas['quota']['floatingip']['reserved']
quotas_obj.floatingip_used = neutron_quotas['quota']['floatingip']['used']
quotas_obj.loadbalancer_limit = neutron_quotas['quota']['loadbalancer']['limit']
quotas_obj.loadbalancer_reserved = neutron_quotas['quota']['loadbalancer']['reserved']
quotas_obj.loadbalancer_used = neutron_quotas['quota']['loadbalancer']['used']
quotas_obj.save() | 4,302 |
def main():
"""
the application startup functions
:return:
"""
app = QtWidgets.QApplication(sys.argv)
# set stylesheet
file = QFile("UI/dark.qss")
file.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(file)
app.setStyleSheet(stream.readAll())
main_window = QtWidgets.QMainWindow()
ImageProcessor(main_window)
main_window.show()
sys.exit(app.exec_()) | 4,303 |
def balance_generic(array: np.ndarray, classes: np.ndarray, balancing_max: int, output: int, random_state:int=42)->Tuple:
"""Balance given arrays using given max and expected output class.
arrays: np.ndarray, array to balance
classes: np.ndarray, output classes
balancing_max: int, maximum numbers per balancing maximum
output: int, expected output class.
"""
output_class_mask = np.array(classes == output)
retain_mask = np.bitwise_not(output_class_mask)
n = np.sum(output_class_mask)
if n > balancing_max:
datapoints_to_remove = n - balancing_max
mask = np.ones(shape=n)
mask[:datapoints_to_remove] = 0
np.random.seed(random_state)
np.random.shuffle(mask)
output_class_mask[np.where(output_class_mask)] = mask
array = array[np.logical_or(
output_class_mask, retain_mask).reshape(-1)]
return array | 4,304 |
def jsexternal(args, result, **kwds):
"""Decorator to define stubbed-out external javascript functions.
This decorator can be applied to a python function to register it as
the stubbed-out implementation of an external javascript function.
The llinterpreter will run the python code, the compiled interpreter
will link to the javascript function of the same name.
"""
def do_register(func):
kwds.setdefault('_callable', func)
kwds.setdefault('random_effects_on_gcobjs', False)
kwds.setdefault('compilation_info', compilation_info)
return rffi.llexternal(func.__name__, args, result, **kwds)
return do_register | 4,305 |
def get_icon(filename):
""" """
icon = get_image_path(filename)
if icon:
return QIcon(icon)
else:
return QIcon() | 4,306 |
def permission_confirm(perm_key_pair: list) -> Union[bool, str, None]:
"""Converts string versions of bool inputs to raw bool values."""
if perm_key_pair[1].strip() == 'true': pi = True
elif perm_key_pair[1].strip() == 'false': pi = False
elif perm_key_pair[1].strip() == 'none': pi = None
else: pi = 'None'
return pi | 4,307 |
def _parse_policy_controller(configmanagement, msg):
"""Load PolicyController with the parsed config-management.yaml.
Args:
configmanagement: dict, The data loaded from the config-management.yaml
given by user.
msg: The Hub messages package.
Returns:
policy_controller: The Policy Controller configuration for
MembershipConfigs, filled in the data parsed from
configmanagement.spec.policyController
Raises: Error, if Policy Controller `enabled` is missing or not a boolean
"""
if ('spec' not in configmanagement or
'policyController' not in configmanagement['spec']):
return None
spec_policy_controller = configmanagement['spec']['policyController']
# Required field
if configmanagement['spec'][
'policyController'] is None or 'enabled' not in spec_policy_controller:
raise exceptions.Error(
'Missing required field .spec.policyController.enabled')
enabled = spec_policy_controller['enabled']
if not isinstance(enabled, bool):
raise exceptions.Error(
'policyController.enabled should be `true` or `false`')
policy_controller = msg.ConfigManagementPolicyController()
# When the policyController is set to be enabled, policy_controller will
# be filled with the valid fields set in spec_policy_controller, which
# were mapped from the config-management.yaml
for field in spec_policy_controller:
if field not in [
'enabled', 'templateLibraryInstalled', 'auditIntervalSeconds',
'referentialRulesEnabled', 'exemptableNamespaces', 'logDeniesEnabled',
'mutationEnabled'
]:
raise exceptions.Error(
'Please remove illegal field .spec.policyController.{}'.format(field))
setattr(policy_controller, field, spec_policy_controller[field])
return policy_controller | 4,308 |
def main(source_file):
"""Simple solution to adventofcode problem 3."""
data = ''
with open(source_file) as source:
data = source.read()
print('Santa gave at least one present to {} houses.'.format(
number_of_houses_covered(data))) | 4,309 |
def _type_cast(type_cast: Any, content_to_typecast: bytes, func_dict: dict) -> Any:
"""
Basis for type casting on the server
If testing, replace `func_dict` with a dummy one
Currently NOT guarenteed to return, please remember to change this API
"""
if type_cast == bytes:
return content_to_typecast
if type_cast == str:
try:
typecasted_content = content_to_typecast.decode()
return typecasted_content # Remember to change this, but I"m lazy rn
except UnicodeDecodeError as e:
raise TypeError(
f"Type casting from bytes to string failed for function "
f"\"{func_dict['name']}\"\n{str(e)}"
) from UnicodeDecodeError
elif type_cast == int:
try:
typecasted_content = int(content_to_typecast)
return typecasted_content # Remember to change this, but I"m lazy rn
except ValueError as e:
raise TypeError(
f"Type casting from bytes to int failed for function "
f"\"{func_dict['name']}\":\n {e}"
) from ValueError
elif type_cast == float:
try:
typecasted_content = float(content_to_typecast)
return typecasted_content # Remember to change this, but I"m lazy rn
except ValueError as e:
raise TypeError(
f"Type casting from bytes to float failed for function "
f"\"{func_dict['name']}\":\n {e}"
) from ValueError
elif type_cast is None:
return content_to_typecast
for _type in [list, dict]:
if type_cast == _type:
try:
typecasted_content = json.loads(content_to_typecast)
return typecasted_content
except UnicodeDecodeError:
raise TypeError(
f"Cannot decode message data during "
f"bytes->{_type.__name__} type cast"
"(current implementation requires string to "
"type cast, not bytes)"
) from UnicodeDecodeError
except ValueError:
raise TypeError(
f"Type casting from bytes to {_type.__name__} "
f"failed for function \"{func_dict['name']}\""
f":\n Message is not a {_type.__name__}"
) from ValueError
except Exception as e:
raise TypeError(
f"Type casting from bytes to {_type.__name__} "
f"failed for function \"{func_dict['name']}\""
f":\n {e}"
) from type(e) | 4,310 |
def paster_create(package, tempdir, user, template, email, fullname):
"""
Run paster to create a new package given a template and user info.
"""
dist_root = os.path.join(tempdir, package)
name = get_name(user)
email = get_email(user)
url = '%s/%s/%s' % (config.GITHUB_URL, user, package)
conffile = os.path.join(tempdir, 'pastescript.ini')
paster_config = config.PASTER_CONFIG % (name, email, url)
conf = open(conffile, 'w')
# XXX Kill me
try:
conf.write(paster_config.encode('utf-8'))
except:
paster_config = config.PASTER_CONFIG % ('', email, url)
conf.write(paster_config)
conf.close()
os.chdir(tempdir)
# Support pyramid's pcreate
if template in ('alchemy', 'starter', 'zodb'):
out = pbs.pcreate('-t', template, package)
else:
out = pbs.paster('create', '-t', template, '--config=%s' %
conffile, '--no-interactive', package)
manifest = open(os.path.join(dist_root, 'MANIFEST.in'), 'w')
try: # Handle namespace packages
parts = package.split('.')
parent_dir = parts[0]
except:
parent_dir = package
manifest.write(config.MANIFEST_IN % parent_dir)
manifest.close()
return dist_root, out._stdout | 4,311 |
def report_trend(old_report, new_report, html_tag):
"""
Report the trending
"""
if 'trend' in old_report and 'trend' in new_report:
old_trend = analysis.trend(old_report)
new_trend = analysis.trend(new_report)
if old_trend['short term'] != new_trend['short term']:
print(f'{html_tag["li_in"]}{html_tag["bold_in"]}Tendance court terme{html_tag["bold_out"]}: {old_trend["short term"]}/5 -> {new_trend["short term"]}/5{html_tag["li_out"]}')
else:
print(f'{html_tag["li_in"]}{html_tag["bold_in"]}Tendance court terme{html_tag["bold_out"]}: {new_trend["short term"]}/5{html_tag["li_out"]}')
if old_trend['mid term'] != new_trend['mid term']:
print(f'{html_tag["li_in"]}{html_tag["bold_in"]}Tendance moyen terme{html_tag["bold_out"]}: {old_trend["mid term"]}/5 -> {new_trend["mid term"]}/5{html_tag["li_out"]}')
else:
print(f'{html_tag["li_in"]}{html_tag["bold_in"]}Tendance moyen terme{html_tag["bold_out"]}: {new_trend["mid term"]}/5{html_tag["li_out"]}') | 4,312 |
def get_data_dir() -> Path:
"""
Get the pda data dir
"""
app_name = "pda"
app_author = "StarrFox"
cache_dir = Path(appdirs.user_data_dir(app_name, app_author))
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir | 4,313 |
def directory_structure_to_front_matter(file_path: str) -> dict[str, str]:
"""
Converts the directory structure of a recipe into a front matter.
"""
# Make sure the path is well-formed and normalised
path_to_recipe = os.path.normpath(file_path)
# Unpack the directory structure into variable names
*_, meal, difficulty, recipe_filename = path_to_recipe.split(os.sep)
# Set some front matter using the extracted data
return {
"layout": "recipe",
"difficulties": difficulty,
"meals": meal,
"originalfilename": recipe_filename,
"originalpath": os.path.join(meal, difficulty, recipe_filename),
} | 4,314 |
def is_gradle_build_python_test(file_name):
"""
Return True if file_name matches a regexp for on of the python test run during gradle build. False otherwise.
:param file_name: file to test
"""
return file_name in ["gen_all.py", "test_gbm_prostate.py", "test_rest_api.py"] | 4,315 |
def ppis_as_cxs(ppis, cxs):
"""
Use the complex number to both prefix the protein id and add as an
attribute. Copy the original ids to the end for mapping in cytoscape.
"""
ppis = ppis_label_cxs(ppis, cxs)
# Requires that the last column be a list of complex ids. Replace that.
def pfx(id, cxnum):
return str(cxnum) + '_' + id
return [[pfx(p[0],cx), pfx(p[1],cx)] + p[2:-1] + [cx]
for p in ppis for cx in p[-1]] | 4,316 |
def patch_get(monkeypatch, mockresponse):
"""monkeypatch the requests.get function to return response dict for API calls. succesful API responses come from Tradier website.
:param mockresponse: [description]
:type mockresponse: [type]
:return: [description]
:rtype: [type]
:yield: [description]
:rtype: [type]
"""
class PatchGet:
def __init__(self, status, response_json_path):
self.mocked = mockresponse(status, response_json_path)
self.setter()
def mock_get(self, url, params, headers):
return self.mocked
def setter(self):
monkeypatch.setattr(requests, "get", self.mock_get)
yield PatchGet | 4,317 |
def test_reaction_auto2():
"""
init
auto
0 7
4 7
4 4
4 4
end
"""
print('init')
m = MyObject3()
print(m.report.get_mode())
loop.iter()
# Invoke the reaction by modifying foo
m.set_foo(3)
m.set_foo(4)
loop.iter()
# Or bar
m.set_bar(3)
m.set_bar(24)
m.set_bar(4)
m.set_bar(4)
loop.iter()
m.emit('spam')
loop.iter()
# Modifying foo, but value does not change: no reaction
m.set_foo(4)
loop.iter()
print('end') | 4,318 |
def test_compare_img_hist():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_img_helper:test_compare_img_hist --skip_jupyter 1
"""
_remove_test_img()
img = Image.new(mode='RGB', size=(50, 50), color='#ff0000')
img.save(TEST_IMG_PATH_1)
img.save(TEST_IMG_PATH_2)
img.close()
similarity = img_helper.compare_img_hist(
img_path_1=TEST_IMG_PATH_1,
img_path_2=TEST_IMG_PATH_2)
assert_equal(similarity, 1.0)
img = Image.new(mode='RGB', size=(50, 50), color='#00ff00')
img.save(TEST_IMG_PATH_2)
img.close()
similarity = img_helper.compare_img_hist(
img_path_1=TEST_IMG_PATH_1,
img_path_2=TEST_IMG_PATH_2)
assert_less_equal(similarity, 0.5)
_remove_test_img() | 4,319 |
def _split_out_parameters(initdoc):
"""Split documentation into (header, parameters, suffix)
Parameters
----------
initdoc : string
The documentation string
"""
# TODO: bind it to the only word in the line
p_res = __parameters_str_re.search(initdoc)
if p_res is None:
return initdoc, "", ""
else:
# Could have been accomplished also via re.match
# where new line is after :Parameters:
# parameters header index
ph_i = p_res.start()
# parameters body index
pb_i = p_res.end()
# end of parameters
try:
pe_i = initdoc.index('\n\n', pb_i)
except ValueError:
pe_i = len(initdoc)
result = initdoc[:ph_i].rstrip('\n '), \
initdoc[pb_i:pe_i], initdoc[pe_i:]
# XXX a bit of duplication of effort since handle_docstring might
# do splitting internally
return handle_docstring(result[0], polite=False).strip('\n'), \
textwrap.dedent(result[1]).strip('\n'), \
textwrap.dedent(result[2]).strip('\n') | 4,320 |
def test_steps_reject():
"""Test the correct rejection for an invalid steps Datapoint."""
acc_tester = AcceptanceTester('steps')
dp = Datapoint(datetime(2018, 1, 1, 12, 0, 0), -1)
assert acc_tester(dp) is False | 4,321 |
def print_command(cmd):
""" print command in custom window """
t_custom.insert(END, cmd + " ") | 4,322 |
def get_context() -> RequestContext:
""" See GlobalContextManager.get_context()
"""
return global_context_manager.get_context() | 4,323 |
def create_fixed(parent,fixed):
"""
Function checks for fixed field and if valid creates an
SKOS elements for each position in an OrderedList
:param parent: Parent element
:param fixed: Fixed position value, may include range notation
in format of xx-xx
"""
if fixed != 'n/a' and len(fixed) > 0:
label = etree.SubElement(parent,
'{%s}prefLabel' % ns.SKOS,
**{'{%s}lang' % ns.XML : 'en'})
label.text = 'fixed'
collection = etree.SubElement(parent,
'{%s}OrderedCollection' % ns.SKOS)
multiple_positions = FIXED_POS_RE.search(fixed)
if multiple_positions is not None:
range_values = multiple_positions.groups()
for value in range(int(range_values[0]),
int(range_values[-1])):
member = etree.SubElement(collection,
'{%s}member' % ns.SKOS)
member.text = u"%s" % value
else:
member = etree.SubElement(collection,
'{%s}member' % ns.SKOS)
member.text = fixed | 4,324 |
def crazy_lights(length=100, wait_time=1):
"""
Function to build a playlist of crazy blinking lights.
"""
# get the busylight opbject
bl = BusyLight()
random_colors = np.random.randint(0,255, size=(length,3))
for r,g,b in random_colors:
bl.set_color(r,g,b)
bl.add_to_playlist()
bl.play_sequence(wait_time=wait_time)
bl.close() | 4,325 |
def show_raid_configuration(ctx, profile, configuration):
""" Get the matching RAID profile or fail """
raid_recipe = RAIDRecipe(ctx.obj['client'])
config_data = raid_recipe.get_selected_configuration(configuration, profile=profile)
print(json.dumps(config_data, indent=4, sort_keys=True)) | 4,326 |
def extract(filepath, output, usebasename=False, outputfilenamer=None):
"""
Load and extract each part of MIME multi-part data as files from given data
as a file.
:param filepath: :class:`pathlib.Path` object represents input
:param output: :class:`pathlib.Path` object represents output dir
:param usebasename: Use the basename, not full path, when writing files
:param outputfilenamer: Callback fn takes `inf` and returns a filename
For example, it could return a filename based on `inf['location']`
:raises: ValueError
"""
if output == "-":
raise ValueError("Output dir must be given to extract")
if os.path.exists(output) and os.path.isfile(output):
raise OSError("Output '%s' already exists as a file!" % output)
os.makedirs(output)
for inf in load_itr(filepath):
filename = inf["filename"]
if usebasename:
filename = os.path.split(filename)[-1]
if outputfilenamer:
filename = outputfilenamer(inf)
outpath = os.path.join(output, filename)
outdir = os.path.dirname(outpath)
LOGGER.debug("Extract %s from %s", filename, filepath)
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(outpath, "wb") as out:
out.write(inf["payload"]) | 4,327 |
def main():
"""Main entry point."""
current_dir = os.getcwd()
project_name = os.path.basename(current_dir)
parser = pbs.comments.Parser()
for filename in os.listdir(current_dir):
language = parser.infer_language(filename)
with open(filename, 'r') as source_file:
procedure_comments = parser.parse(source_file.readlines())
for procedure, comments in procedure_comments.iteritems():
answer = pbs.lookup.search(comments + " in " + language)
logging.info(
"Found this answer for procedure '%s' described as '%s':\n %s",
procedure, comments, answer)
pbs.build.ccompile(filename)
pbs.build.clink_many(current_dir, project_name) | 4,328 |
def Main():
"""Main generates the size difference between two targets.
"""
parser = argparse.ArgumentParser(
description='Size difference between two targets')
parser.add_argument(
'--source_project', required=True, help='The path to the source project')
parser.add_argument(
'--source_scheme', required=True, help='The scheme of the source project')
parser.add_argument(
'--target_project', required=True, help='The path to the target project')
parser.add_argument(
'--target_scheme', required=True, help='The scheme of the target project')
parser.add_argument(
'--build_timeout', default=None, required=False, help='Timeout to build testapps')
args = parser.parse_args()
source_size, target_size = GenerateSizeDifference(
args.source_project, args.source_scheme, args.target_project,
args.target_scheme, build_timeout)
diff_size = source_size - target_size
if source_size > target_size:
print('{} is {} larger than {}'.format(args.source_project, diff_size,
args.target_project))
elif source_size == target_size:
print('{} and {} are the same size'.format(args.source_project,
args.target_project))
else:
print('{} is {} smaller than {}'.format(args.source_project, -1 * diff_size,
args.target_project)) | 4,329 |
def sched_yield(space):
""" Voluntarily relinquish the CPU"""
while True:
try:
res = rposix.sched_yield()
except OSError as e:
wrap_oserror(space, e, eintr_retry=True)
else:
return space.newint(res) | 4,330 |
def preformatted(s):
"""Return preformatted text."""
return _tag(s, "pre") | 4,331 |
def jitter_colors(rgb, d_brightness=0, d_contrast=0, d_saturation=0):
"""
Color jittering by randomizing brightness, contrast and saturation, in random order
Args:
rgb: Image in RGB format
Numpy array of shape (h, w, 3)
d_brightness, d_contrast, d_saturation: Alpha for blending drawn from [1 - d, 1 + d]
Nonnegative float
Optional; defaults to 0, i.e., no randomization
Returns:
rgb_out: Color-jittered image in RGB format
Numpy array of the same shape as input
"""
attrs = ['brightness', 'contrast', 'saturation']
ds = [d_brightness, d_contrast, d_saturation]
# In random order
ind = np.array(range(len(attrs)))
np.random.shuffle(ind) # in-place
rgb_out = deepcopy(rgb)
for idx in ind:
rgb_out = adjust_image_attribute(
rgb_out, attrs[idx], ds[idx], random=True)
return rgb_out | 4,332 |
def split_numpy_array(array, portion=None, size=None, shuffle=True):
"""
Split numpy array into two halves, by portion or by size.
Args:
array (np.ndarray): A numpy array to be splitted.
portion (float): Portion of the second half.
Ignored if `size` is specified.
size (int): Size of the second half.
shuffle (bool): Whether or not to shuffle before splitting?
Returns:
tuple[np.ndarray]: Splitted two halves of the array.
"""
(a,), (b,) = split_numpy_arrays((array,), portion=portion, size=size,
shuffle=shuffle)
return a, b | 4,333 |
def test_sinkhorn_consistency_sym_asym(solv, entropy, atol, p, m, reach):
"""Test if the symmetric and assymetric Sinkhorn
output the same results when (a,x)=(b,y)"""
entropy.reach = reach
cost = euclidean_cost(p)
a, x = generate_measure(2, 5, 3)
f_a, g_a = solv.sinkhorn_asym(
m * a, x, m * a, x, cost=cost, entropy=entropy
)
_, f_s = solv.sinkhorn_sym(m * a, x, cost=cost, entropy=entropy)
assert torch.allclose(
entropy.error_sink(f_a, f_s), torch.tensor([0.0]), atol=atol
)
assert torch.allclose(
entropy.error_sink(g_a, f_s), torch.tensor([0.0]), atol=atol
) | 4,334 |
def make_valid(layer):
"""update invalid shapes in postgres
"""
sql = f'UPDATE {layer} SET shape = ST_MakeValid(shape) WHERE ST_IsValid(shape) = false;'
unfixable_layers = ['utilities.broadband_service']
if layer in unfixable_layers:
return
try:
execute_sql(sql, config.DBO_CONNECTION)
except psycopg2.errors.UndefinedColumn:
#: table doesn't have shape field
pass | 4,335 |
def test_protmap():
"""CRG: extract protein-->iLocus mapping from GFF3"""
db = genhub.test_registry.genome('Dqcr')
mapping = {'DQUA011a006022P1': 'DquaILC-14465',
'DQUA011a006023P1': 'DquaILC-14466',
'DQUA011a006024P1': 'DquaILC-14467'}
infile = 'testdata/gff3/dqua-275-loci.gff3'
testmap = dict()
with open(infile, 'r') as instream:
for protid, locid in db.protein_mapping(instream):
testmap[protid] = locid
assert mapping == testmap, \
'protein mapping mismatch: %r %r' % (mapping, testmap) | 4,336 |
def constraint_layer(
stencils: Sequence[np.ndarray],
method: Method,
derivative_orders: Sequence[int],
constrained_accuracy_order: int = 1,
initial_accuracy_order: Optional[int] = 1,
grid_step: float = None,
dtype: Any = np.float32,
) -> tf.keras.layers.Layer:
"""Create a Keras layer for enforcing polynomial accuracy constraints."""
if constrained_accuracy_order:
return PolynomialAccuracy(
stencils,
method,
derivative_orders,
accuracy_order=constrained_accuracy_order,
bias_accuracy_order=initial_accuracy_order,
grid_step=grid_step,
dtype=dtype,
)
else:
if constrained_accuracy_order != 0:
raise ValueError('invalid constrained_accuracy_order')
return PolynomialBias(
stencils,
method,
derivative_orders,
initial_accuracy_order,
grid_step,
) | 4,337 |
def test_cancellation_with_infinite_duration(http_test_server_fixture):
"""Test that we can use signals to cancel execution."""
args = [
http_test_server_fixture.nighthawk_client_path, "--concurrency", "2",
http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json"
]
client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Thread(target=(lambda: _send_sigterm(client_process))).start()
stdout, stderr = client_process.communicate()
client_process.wait()
output = stdout.decode('utf-8')
asserts.assertEqual(client_process.returncode, 0)
parsed_json = json.loads(output)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "graceful_stop_requested", 2)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) | 4,338 |
def parse_excel_xml(xml_file=None, xml_string=None):
"""Return a list of the tables (2D arrays) in the Excel XML.
Provide either the path to an XML file, or a string of XML content.
"""
handler = ExcelHandler()
if xml_file is not None:
parse(xml_file, handler)
elif xml_string is not None:
parseString(xml_string, handler)
else:
raise ValueError("At least one of xml_file or xml_string should be"
" provided.")
return handler.tables | 4,339 |
def test_init_with_no_child_kernel(concat_kernel_function):
"""
Test that creating a `ConcatKernel` with no child kernel raises an exception.
"""
with pytest.raises(AssertionError) as exp:
concat_kernel_function([])
assert str(exp.value).find("There must be at least one child kernel.") >= 0 | 4,340 |
def load_stt_plugin(module_name):
"""Wrapper function for loading stt plugin.
Arguments:
module_name (str): Mycroft stt module name from config
Returns:
class: STT plugin class
"""
return load_plugin(module_name, PluginTypes.STT) | 4,341 |
def remove_all_autobash_sections_from_backup():
"""Remove all autobash sections from the backup file."""
PROFILES = 'profiles'
LAYOUTS = 'layouts'
AUTOBASH_STR = 'autobash-'
config = ConfigObj(TERMINATOR_CONFIG_BACKUP_FILE)
# remove autobash profiles
# print("REMOVING PROFILES")
for profile in list(config[PROFILES].keys()): # using list to prevent iterator
if profile.startswith(AUTOBASH_STR):
del config[PROFILES][profile]
# remove autobash layouts
# print("REMOVING LAYOUTS")
for layout in list(config[LAYOUTS].keys()): # using list to prevent iterator
if layout.startswith(AUTOBASH_STR):
del config[LAYOUTS][layout]
# pprint(config)
config.write() | 4,342 |
def writeToExportedModule(moduleName, prefix, exporModuleTpl, functionsList, outputDir):
"""
Write the binding file for the module
"""
filePath = os.path.join(outputDir, prefix + moduleName + ".cpp")
out = file (filePath, "wb")
out.write (exporModuleTpl.render(moduleName = moduleName, exportFunctions = functionsList))
out.close() | 4,343 |
def copy_nucleotide_peptide_sketches(
peptide_sketch_dir,
nucleotide_sketch_dir,
pre_sketch_id_outdir,
nucleotide_sketch_ids=NUCLEOTIDE_SKETCH_IDS,
peptide_sketch_ids=PEPTIDE_SKETCH_IDS,
select_cell_ids=None,
dryrun=False,
cell_id_fun=None
):
"""Copy both nucleotide and peptide sketches per sketch id
cell_id_fun : function
String manipulation function to clean cell id from the basename of the
sketch to have it match the list of select cell ids
dd """
if nucleotide_sketch_dir is not None:
_per_sketch_id_copy_sketches(
nucleotide_sketch_ids,
nucleotide_sketch_dir,
pre_sketch_id_outdir,
select_cell_ids,
dryrun,
cell_id_fun=cell_id_fun
)
if peptide_sketch_dir is not None:
_per_sketch_id_copy_sketches(
peptide_sketch_ids,
peptide_sketch_dir,
pre_sketch_id_outdir,
select_cell_ids,
dryrun,
cell_id_fun=cell_id_fun
) | 4,344 |
def backward_softmax(
x, dc_decomp=False, convex_domain={}, slope=V_slope.name, mode=F_HYBRID.name, previous=True, axis=-1, **kwargs
):
"""
Backward LiRPA of softmax
:param x:
:param dc_decomp:
:param convex_domain:
:param slope:
:param mode:
:param axis:
:return:
"""
if dc_decomp:
raise NotImplementedError()
# TO DO linear relaxation
raise NotImplementedError() | 4,345 |
def predict_label(model, data, as_prob=False):
"""Predicts the data target
Assumption: Positive class label is at position 1
Parameters
----------
name : Tensorflow or PyTorch Model
Model object retrieved by :func:`load_model`
data : DataCatalog
Dataset used for predictions
Returns
-------
predictions : 2d numpy array with predictions
"""
print(f"Predicing label '{data.target}' of {data.name} dataset.")
features = data.encoded_normalized.drop(data.target, axis=1)
predictions = model.predict(features)
predictions = predictions[:, 1]
if not as_prob:
predictions = predictions.round()
acc = accuracy_score(data.raw[data.target], predictions.round())
print(f"Model accuracy is: {(100* acc).round(2)}%.")
return predictions | 4,346 |
def meshW_plan(insert):
"""
Inserts/retracts W mesh (post-M0M1)
insert bool: ``True`` if should insert, ``False`` to retract
"""
yield from _diagnostic_plan(insert,5) | 4,347 |
def show_subpath(subpath):
"""
使用转换器,为变量指定规则为 path类型(类似 string ,但可以包含斜杠)
"""
# show the subpath after /path/
return 'Subpath %s' % escape(subpath) | 4,348 |
def backward_propagation(parameters, cache, X, Y):
"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
W1 = parameters['W1']
W2 = parameters['W2']
# Retrieve also A1 and A2 from dictionary "cache".
A1 = cache['A1']
A2 = cache['A2']
# Backward propagation: calculate dW1, db1, dW2, db2.
dZ2 = A2 - Y
dW2 = (1 / m) * np.dot(dZ2, A1.T)
db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2))
dW1 = (1 / m) * np.dot(dZ1, X.T)
db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)
grads = {'dW1': dW1, 'db1': db1, 'dW2': dW2, 'db2': db2}
return grads | 4,349 |
def MakeCdfFromHist(hist, name=''):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
name: string name for the data.
Returns:
Cdf object
"""
return MakeCdfFromItems(hist.Items(), name) | 4,350 |
def box_iou(boxes, clusters):
"""
Introduction
------------
计算每个box和聚类中心的距离值
Parameters
----------
boxes: 所有的box数据
clusters: 聚类中心
"""
box_num = boxes.shape[0]
cluster_num = clusters.shape[0]
box_area = boxes[:, 0] * boxes[:, 1]
#每个box的面积重复9次,对应9个聚类中心
box_area = box_area.repeat(cluster_num)
box_area = np.reshape(box_area, [box_num, cluster_num])
cluster_area = clusters[:, 0] * clusters[:, 1]
cluster_area = np.tile(cluster_area, [1, box_num])
cluster_area = np.reshape(cluster_area, [box_num, cluster_num])
#这里计算两个矩形的iou,默认所有矩形的左上角坐标都是在原点,然后计算iou,因此只需取长宽最小值相乘就是重叠区域的面积
boxes_width = np.reshape(boxes[:, 0].repeat(cluster_num), [box_num, cluster_num])
clusters_width = np.reshape(np.tile(clusters[:, 0], [1, box_num]), [box_num, cluster_num])
min_width = np.minimum(clusters_width, boxes_width)
boxes_high = np.reshape(boxes[:, 1].repeat(cluster_num), [box_num, cluster_num])
clusters_high = np.reshape(np.tile(clusters[:, 1], [1, box_num]), [box_num, cluster_num])
min_high = np.minimum(clusters_high, boxes_high)
iou = np.multiply(min_high, min_width) / (box_area + cluster_area - np.multiply(min_high, min_width))
return iou | 4,351 |
def addr(arr):
""" Get address of numpy array's data """
return arr.__array_interface__['data'][0] | 4,352 |
def rank_five_cards(cards):
"""Returns an (array) value that represents a strength for a hand.
These can easily be compared against each other."""
# List of all card values
values = sorted([card.number for card in cards])
# Checks if hand is a straight
is_straight = all([values[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Weakest straight
is_straight = all(values[i] == values[0] + i for i in range(4)) and values[4] == 12
# Rotate values as the ace is weakest in this case
values = values[1:] + values[:1]
# Checks if hand is a flush
is_flush = all([card.suit == cards[0].suit for card in cards])
# Get card value counts
value_count = {value: values.count(value) for value in values}
# Sort value counts by most occuring
sorted_value_count = sorted([(count, value) for value, count in value_count.items()],
reverse=True)
# Get all kinds (e.g. four of a kind, three of a kind, pair)
kinds = [value_count[0] for value_count in sorted_value_count]
# Get values for kinds
kind_values = [value_count[1] for value_count in sorted_value_count]
# Royal flush
if is_straight and is_flush and values[0] == 8:
return [ROYAL_FLUSH] + [str(value) for value in values]
# Straight flush
if is_straight and is_flush:
return [STRAIGHT_FLUSH] + kind_values
# Four of a kind
if kinds[0] == 4:
return [FOUR_OF_A_KIND] + kind_values
# Full house
if kinds[0] == 3 and kinds[1] == 2:
return [FULL_HOUSE] + kind_values
# Flush
if is_flush:
return [FLUSH] + kind_values
# Straight
if is_straight:
return [STRAIGHT] + kind_values
# Three of a kind
if kinds[0] == 3:
return [THREE_OF_A_KIND] + kind_values
# Two pair
if kinds[0] == 2 and kinds[1] == 2:
return [TWO_PAIR] + kind_values
# Pair
if kinds[0] == 2:
return [PAIR] + kind_values
# No pair
return [HIGH_CARD] + kind_values | 4,353 |
def test_data_p1_no_gas():
"""TODO."""
data: Data = Data.from_dict(json.loads(load_fixtures("data_p1_no_gas.json")))
assert data
assert data.smr_version == 50
assert data.meter_model == "ISKRA 2M550T-101"
assert data.wifi_ssid == "My Wi-Fi"
assert data.wifi_strength == 100
assert data.total_power_import_t1_kwh == 10830.511
assert data.total_power_import_t2_kwh == 2948.827
assert data.total_power_export_t1_kwh == 1285.951
assert data.total_power_export_t2_kwh == 2876.51
assert data.active_power_w == -543
assert data.active_power_l1_w == -676
assert data.active_power_l2_w == 133
assert data.active_power_l3_w == 0
assert data.total_gas_m3 is None
assert data.gas_timestamp is None | 4,354 |
def upload(d_list: List[Dict], upload_urls: List[str]):
"""Upload to FHIR server"""
for base_url in upload_urls:
for d in d_list:
# Reason for adding ID to url: https://www.hl7.org/fhir/http.html#update
url = f'{base_url}/{d["id"]}'
response = requests.put(url, json=d)
# Codes 400+ are errors
if int(response.status_code) >= 400 and int(response.status_code) != 422:
raise RuntimeError(
f'Got error {response.status_code} when uploading item with ID {d["id"]} to {url}: \n'
f'{response.text}')
# Unprocessable Entity: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/422
if int(response.status_code) == 422:
err_list = [x['diagnostics'] for x in json.loads(response.text)['issue']]
err = err_list[0] if len(err_list) == 1 else str(err_list)
print(err, file=sys.stderr) | 4,355 |
def action_path(args):
"""show path to selected backup"""
b = Restore()
b.evaluate_arguments(args)
sys.stdout.write('{}\n'.format(b.current_backup_path)) | 4,356 |
def _get_wmi_wbem():
"""Returns a WMI client connected to localhost ready to do queries."""
client, _ = _get_win32com()
if not client:
return None
wmi_service = client.Dispatch('WbemScripting.SWbemLocator')
return wmi_service.ConnectServer('.', 'root\\cimv2') | 4,357 |
def make_ndx(mpirun, scan_atoms, index_input, np=1):
"""
Python wrapper for gmx make_ndx
Parameters
----------
index_input : str
file (with extension) used for gmx make_ndx
mpirun : bool
Is this a multi-node run or not gmx (False) vs gmx_mpi (Default: True)
number of processes (np)
scan_coordinates : array of ints
Indicates atoms involved in scan. Need this for running MD
np : int
Number of processes to run mpirun on (Default 1 for non-mpi run)
"""
if mpirun == True:
mpi = "mpirun " + "np " + str(np) + " gmx_mpi"
elif mpirun == False:
mpi = "gmx"
else:
print ("mpirun only takes bool as input")
commands = [mpi, "make_ndx", "-f", index_input]
pipe_command=["cat","cat_make_ndx.txt"] # pick ff in working directory and TIP3P ** this should not be hardcoded lmao FIX **
ps = subprocess.Popen(pipe_command, stdout=subprocess.PIPE)
output = subprocess.check_output(commands, stdin=ps.stdout)
subprocess.run(commands)
# append scan atoms to end of file
if len(scan_atoms) is not 4:
raise Exception("Need 4 atoms to describe a dihedral")
w1 = "[ SCAN ]\n"
w2 = f'\t{scan_atoms[0]}\t{scan_atoms[1]}\t{scan_atoms[2]}\t{scan_atoms[3]}\n'
f1 = open("index.ndx", "a") # append mode
f1.write(w1+w2)
f1.close()
return | 4,358 |
def get_skmtea_instances_meta(version, group_instances_by=None) -> Dict[str, Any]:
"""
Args:
group_by (str, optional): How to group detection labels.
Currently only supports grouping by "supercategory".
"""
assert group_instances_by in [None, "supercategory"], f"group_by={group_instances_by}"
path_manager = env.get_path_manager()
if group_instances_by is None:
thing_ids = [k["id"] for k in SKMTEA_DETECTION_CATEGORIES]
thing_classes = [k["name"] for k in SKMTEA_DETECTION_CATEGORIES]
thing_colors = [k["color"] for k in SKMTEA_DETECTION_CATEGORIES]
elif group_instances_by == "supercategory":
things = {
k["supercategory_id"]: (k["supercategory"], k["color"])
for k in SKMTEA_DETECTION_CATEGORIES
}
thing_ids = list(things.keys())
thing_classes = [v[0] for v in things.values()]
thing_colors = [v[1] for v in things.values()]
else:
raise ValueError(f"{group_instances_by} not supported")
# Mapping from the incontiguous qDESS category id to an id in [0, N]
# N=15 generally, N=4 if group by supercategory
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
# Segmentation classes
# TODO: Add support for subselecting classes.
# seg_classes = [k["name"] for k in QDESS_SEGMENTATION_CATEGORIES]
# seg_colors = [k["color"] for k in QDESS_SEGMENTATION_CATEGORIES]
# seg_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
# seg_abbrevs = [k["abbrev"] for k in QDESS_SEGMENTATION_CATEGORIES]
paths = get_paths(version)
ret = {
# Detection
"group_instances_by": group_instances_by,
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
"scan_metadata": pd.read_csv(path_manager.get_local_path(paths.metadata_csv), index_col=0),
# This mask path is temporary. In the future, the segmentations will be made
# available directly through the recon h5 file.
"mask_gw_corr_dir": path_manager.get_local_path(paths.mask_gradwarp_corrected),
"version": version,
}
return ret | 4,359 |
def join_arrays(a, b):
"""
Joining Arrays Row-wise
Parameters
----------
a : array
One of the arrays
b : array
Second of the arrays
Returns
-------
arr : array
Joined two arrays row wise
"""
return (np.r_[a, b]) | 4,360 |
def notify_user(dataset_obj_id, user_id) -> None:
"""Send an email notification to user
If the gui_user is specified, we will send the notification to the person
that is doing actions via the GUI. Otherwise, we will notify the user that
created the ContactJob.
"""
user = User.objects.get(id=user_id)
log.debug(f"notify_user {dataset_obj_id}")
subject = f"Dataset creation completed - {dataset_obj_id}"
relative_url = reverse('create_dataset.results', args=(dataset_obj_id,))
result_url = "http://voseq.com" + relative_url
content = "Your dataset has successfully completed. " \
"Please verify and download the results from: " \
f"{result_url}"
from_email = '[email protected]'
if user and user.email:
to_emails = [user.email] + [email for name, email in settings.ADMINS]
try:
send_mail(subject, content, from_email, to_emails)
except SMTPException:
log.exception("Failed to notify_user for dataset " + str(dataset_obj_id))
else:
log.debug("sent dataset status email to " + str(to_emails))
else:
log.debug('Cannot send notification email. '
'No user / email assigned to job ' + str(dataset_obj_id)) | 4,361 |
def test_list_g_month_day_max_length_nistxml_sv_iv_list_g_month_day_max_length_1_1(mode, save_output, output_format):
"""
Type list/gMonthDay is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/gMonthDay/Schema+Instance/NISTSchema-SV-IV-list-gMonthDay-maxLength-1.xsd",
instance="nistData/list/gMonthDay/Schema+Instance/NISTXML-SV-IV-list-gMonthDay-maxLength-1-1.xml",
class_name="NistschemaSvIvListGMonthDayMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 4,362 |
def psvd(a: np.ndarray):
"""Photonic SVD architecture
Args:
a: The matrix for which to perform the svd
Returns:
A tuple of singular values and the two corresponding SVD architectures :math:`U` and :math:`V^\\dagger`.
"""
l, d, r = svd(a)
return rectangular(l), d, rectangular(r) | 4,363 |
def vmatrix(vma):
""" write a variable zmatrix (bohr/radian) to a string (angstroms/degree)
"""
assert automol.zmatrix.v.is_valid(vma)
vma_str = automol.zmatrix.v.string(vma)
return vma_str | 4,364 |
def test_get_pdp_list():
"""
Test the function raises a TypeError if a list is in the input
"""
full_pipe = 1
df_1 = 2
feat = ['a', 'b']
with pytest.raises(TypeError):
pdp = tml.get_pdp(full_pipe, feat, df_1) | 4,365 |
def list_all(request,
next_function: Callable,
response_keyword='items') -> Iterator[Any]:
"""Execute GCP API `request` and subsequently call `next_function` until
there are no more results. Assumes that it is a list method and that
the results are under a `items` key."""
while True:
try:
response = request.execute(num_retries=config.API_RETRIES)
except googleapiclient.errors.HttpError as err:
raise utils.GcpApiError(err) from err
# Empty lists are omitted in GCP API responses
if response_keyword in response:
yield from response[response_keyword]
request = next_function(previous_request=request,
previous_response=response)
if request is None:
break | 4,366 |
def test_python_memoization(n=4):
"""Testing python memoization disable via config
"""
x = random_uuid(0)
for i in range(0, n):
foo = random_uuid(0)
assert foo.result() != x.result(
), "Memoized results were used when memoization was disabled" | 4,367 |
def set_above_compare_strategy(analyzer: SensorAvgThreshAnalyzer):
"""
Sets the analyzer's compare strategy as a "greater than" comparison.
:param analyzer: Sensor analyzer to set the strategy to.
"""
analyzer.set_compare_strategy(operator.gt) | 4,368 |
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str | 4,369 |
def dummy_func_1(input_array):
"""
a sample fitness function that uses the closeness of fit to a polynomial with random coefficients to calculate
fitness (loss)
Args:
input_array(array): iterable of 16 floats between 0 and 1
Returns:
loss(float): an approximation of how close the polynomial with coefficients determined by input is to the target
polynomial
(Ben)
"""
n_samples = 10_000
test_range = np.linspace(0, 1, n_samples)
target = polyval(test_range, TARGET, tensor=False)
output = polyval(test_range, input_array, tensor=False)
loss = np.sum(abs(target - output)) / n_samples
return -1 * loss | 4,370 |
def validation_loop(sess, model, ops, handles, valid_summary_writer, external=False):
""" Iterates over the validation data, calculating a trained model's cross-entropy. """
# Unpack OPs
batch_loss_op, sentence_losses_op = ops
# Initialize metrics
valid_losses = list()
sentence_losses = list()
valid_global_step = 0
# Unpack iterator variables
if handles is not None:
handle, valid_handle = handles
feed_dict = {handle: valid_handle,
model.training: False}
else:
feed_dict = {model.training: False}
logging.info('Estimating validation loss ... ')
while True:
try:
# Run a forward pass through the model
# Note, per-sentence losses used by the model are already length-normalized
fetches = sess.run([model.global_step, batch_loss_op, sentence_losses_op], feed_dict=feed_dict)
if fetches is not None:
valid_losses += [fetches[1]]
sentence_losses += fetches[2].tolist()
valid_global_step = fetches[0]
if len(sentence_losses) > 0:
logging.info('Evaluated {:d} sentences'.format(len(sentence_losses)))
except tf.errors.OutOfRangeError:
break
# Report
total_valid_loss = sum(valid_losses)
mean_valid_loss = total_valid_loss / len(valid_losses)
valid_perplexity = np.exp(mean_valid_loss)
if not external:
current_time = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
logging.info('-' * 20)
logging.info('{:s}[VALID] Loss/ word {:.4f} | Perplexity: {:.4f} | Sentence total {:d}'
.format(current_time, mean_valid_loss, valid_perplexity, len(sentence_losses)))
# Write summaries
if valid_summary_writer:
valid_loss_summary = \
tf.Summary(value=[tf.Summary.Value(tag='validation_loss', simple_value=mean_valid_loss)])
valid_perplexity_summary = \
tf.Summary(value=[tf.Summary.Value(tag='validation_perplexity', simple_value=valid_perplexity)])
valid_summary_writer.add_summary(valid_loss_summary, global_step=valid_global_step)
valid_summary_writer.add_summary(valid_perplexity_summary, global_step=valid_global_step)
return mean_valid_loss, valid_perplexity, sentence_losses, valid_global_step | 4,371 |
def tile_image (layer, z, x, y, start_time, again=False, trybetter = True, real = False):
"""
Returns asked image.
again - is this a second pass on this tile?
trybetter - should we try to combine this tile from better ones?
real - should we return the tile even in not good quality?
"""
x = x % (2 ** (z-1))
if y<0 or y >= (2 ** (z-1)):
return None
if not bbox.bbox_is_in(projections.bbox_by_tile(z,x,y,layer["proj"]), layer.get("data_bounding_box",config.default_bbox), fully=False):
return None
global cached_objs, cached_hist_list
if "prefix" in layer:
if (layer["prefix"], z, x, y) in cached_objs:
return cached_objs[(layer["prefix"], z, x, y)]
if layer.get("cached", True):
local = config.tiles_cache + layer["prefix"] + "/z%s/%s/x%s/%s/y%s."%(z, x/1024, x, y/1024,y)
ext = layer["ext"]
if "cache_ttl" in layer:
for ex in [ext, "dsc."+ext, "ups."+ext, "tne"]:
f = local+ex
if os.path.exists(f):
if (os.stat(f).st_mtime < (time.time()-layer["cache_ttl"])):
os.remove(f)
gpt_image = False
try:
"trying to create local cache directory, if it doesn't exist"
os.makedirs("/".join(local.split("/")[:-1]))
except OSError:
pass
if not os.path.exists(local+"tne") and not os.path.exists(local+"lock"):
if os.path.exists(local+ext): # First, look for tile in cache
try:
im1 = Image.open(local+ext)
im1.is_ok = True
return im1
except IOError:
if os.path.exists(local+"lock"):
return None
else:
os.remove(local+ext) # # Cached tile is broken - remove it
if layer["scalable"] and (z<layer.get("max_zoom", config.default_max_zoom)) and trybetter: # Second, try to glue image of better ones
if os.path.exists(local+"ups."+ext):
try:
im = Image.open(local+"ups."+ext)
im.is_ok = True
return im
except IOError:
pass
ec = ImageColor.getcolor(layer.get("empty_color", config.default_background), "RGBA")
ec = (ec[0],ec[1],ec[2],0)
im = Image.new("RGBA", (512, 512), ec)
im1 = tile_image(layer, z+1,x*2,y*2, start_time)
if im1:
im2 = tile_image(layer, z+1,x*2+1,y*2, start_time)
if im2:
im3 = tile_image(layer, z+1,x*2,y*2+1, start_time)
if im3:
im4 = tile_image(layer, z+1,x*2+1,y*2+1, start_time)
if im4:
im.paste(im1,(0,0))
im.paste(im2,(256,0))
im.paste(im3,(0,256))
im.paste(im4,(256,256))
im = im.resize((256,256),Image.ANTIALIAS)
if layer.get("cached", True):
try:
im.save(local+"ups."+ext)
except IOError:
pass
im.is_ok = True
return im
if not again:
if "fetch" in layer:
delta = (datetime.datetime.now() - start_time)
delta = delta.seconds + delta.microseconds/1000000.
if (config.deadline > delta) or (z < 4):
im = fetchers.fetch(z,x,y,layer) # Try fetching from outside
if im:
im.is_ok = True
return im
if real and (z>1):
im = tile_image(layer, z-1, int(x/2), int(y/2), start_time, again=False, trybetter=False, real=True)
if im:
im = im.crop((128 * (x % 2), 128 * (y % 2), 128 * (x % 2) + 128, 128 * (y % 2) + 128))
im = im.resize((256,256), Image.BILINEAR)
im.is_ok = False
return im
else:
if "fetch" in layer:
delta = (datetime.datetime.now() - start_time)
delta = delta.seconds + delta.microseconds/1000000.
if (config.deadline > delta) or (z < 4):
im = fetchers.fetch(z,x,y,layer) # Try fetching from outside
if im:
im.is_ok = True
return im | 4,372 |
def register_geoserver_db(res_id, db):
"""
Attempts to register a GeoServer layer
"""
geoserver_namespace = settings.DATA_SERVICES.get("geoserver", {}).get('NAMESPACE')
geoserver_url = settings.DATA_SERVICES.get("geoserver", {}).get('URL')
geoserver_user = settings.DATA_SERVICES.get("geoserver", {}).get('USER')
geoserver_pass = settings.DATA_SERVICES.get("geoserver", {}).get('PASSWORD')
geoserver_directory = settings.DATA_SERVICES.get("geoserver", {}).get('IRODS_DIR')
geoserver_auth = requests.auth.HTTPBasicAuth(
geoserver_user,
geoserver_pass
)
workspace_id = f"{geoserver_namespace}-{res_id}"
headers = {
"content-type": "application/json"
}
if any(i in db['layer_name'] for i in [".", ","]):
return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."}
rest_url = f"{geoserver_url}/workspaces/{workspace_id}/{db['store_type']}/{db['layer_name'].replace('/', ' ')}/external.{db['file_type']}"
data = f"file://{geoserver_directory}/{db['hs_path']}"
response = requests.put(rest_url, data=data, headers=headers, auth=geoserver_auth)
if response.status_code != 201:
return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."}
rest_url = f"{geoserver_url}/workspaces/{workspace_id}/{db['store_type']}/{db['layer_name'].replace('/', ' ')}/{db['layer_group']}/{db['file_name']}.json"
response = requests.get(rest_url, headers=headers, auth=geoserver_auth)
try:
if json.loads(response.content.decode('utf-8'))[db["verification"]]["enabled"] is False:
return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."}
except:
return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."}
bbox = json.loads(response.content)[db["verification"]]["nativeBoundingBox"]
data = response.content.decode('utf-8').replace('"name":"' + db["file_name"] + '"', '"name":"' + db["layer_name"].replace("/", " ") + '"')
response = requests.put(rest_url, headers=headers, auth=geoserver_auth, data=data)
if response.status_code != 200:
return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."}
if db["layer_type"] == "GeographicRaster":
try:
hydroshare_url = "/".join(settings.HYDROSHARE_URL.split("/")[:-1])
layer_vrt_url = f"{hydroshare_url}/resource/{'.'.join(db['hs_path'].split('.')[:-1])}.vrt"
response = requests.get(layer_vrt_url)
vrt = etree.fromstring(response.content.decode('utf-8'))
layer_max = None
layer_min = None
layer_ndv = None
for element in vrt.iterfind(".//MDI"):
if element.get("key") == "STATISTICS_MAXIMUM":
layer_max = element.text
if element.get("key") == "STATISTICS_MINIMUM":
layer_min = element.text
try:
layer_ndv = vrt.find(".//NoDataValue").text
except:
layer_ndv = None
if layer_max is not None and layer_min is not None and layer_min < layer_max and layer_ndv is not None:
layer_style = get_layer_style(layer_max, layer_min, layer_ndv, db["layer_name"].replace("/", " "))
rest_url = f"{geoserver_url}/workspaces/{workspace_id}/styles"
headers = {"content-type": "application/vnd.ogc.sld+xml"}
response = requests.post(rest_url, data=layer_style, auth=geoserver_auth, headers=headers)
if response.status_code == 201:
rest_url = f"{geoserver_url}/layers/{workspace_id}:{db['layer_name'].replace('/', ' ')}"
headers = {"content-type": "application/json"}
body = '{"layer": {"defaultStyle": {"name": "' + db["layer_name"].replace("/", " ") + '", "href":"https:\/\/geoserver.hydroshare.org\/geoserver\/rest\/styles\/' + db["layer_name"].replace("/", " ") + '.json"}}}'
response = requests.put(rest_url, data=body, auth=geoserver_auth, headers=headers)
except Exception as e:
pass
return {"success": True, "type": db["layer_type"], "layer_name": db["layer_name"], "message": f"{'/'.join((geoserver_url.split('/')[:-1]))}/{workspace_id}/wms?service=WMS&version=1.1.0&request=GetMap&layers={workspace_id}:{urllib.parse.quote(db['layer_name'].replace('/', ' '))}&bbox={bbox['minx']}%2C{bbox['miny']}%2C{bbox['maxx']}%2C{bbox['maxy']}&width=612&height=768&srs={bbox['crs']}&format=application/openlayers"} | 4,373 |
def move_file(source,destination):
"""perform mv command to move a file from sourc to destination
Returns True if move is successful
"""
#print("MOV:"+source+"-->"+destination)
mv_cmd=['mv',source,destination]
if not getReturnStatus(mv_cmd):
return False
return True | 4,374 |
def test_config_constructor():
"""Test Constructor"""
cfg = ConfigOptions()
# state = cfg.__getstate__()
assert cfg.loglevel.name == "INFO" | 4,375 |
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
iqr = stats.scoreatpercentile(a, 75)-stats.scoreatpercentile(a, 25)
h = 2*iqr/(len(a)**(1/3))
bins=int(np.ceil((a.max()-a.min())/h)) if h!=0 else int(np.sqrt(a.size))
return bins | 4,376 |
def to_ascii_bytes(string):
"""Convert unicode to ascii byte string."""
return bytes(string, 'ascii') if PY3 else bytes(string) | 4,377 |
def get_user(user_id):
""" get a user """
app.logger.debug("get_user({0})".format(user_id))
try:
response = app.usersClient.get_user(user_id)
return jsonify(response)
except OktaError as e:
message = {
"error_causes": e.error_causes,
"error_summary": e.error_summary,
"error_id": e.error_id,
"error_code": e.error_code
}
return make_response(jsonify(message), e.status_code) | 4,378 |
def matrix_conv_both_methods_from_avg(n_realz, input_folder, mapping,
v_tuple, t_tuple,
prefix='real_', numbered=True, verbose=False):
"""
Convergence of the aggregate transition matrix both considering the frequency and not considering
the frequency corresponding to the stencil method and the extended stencil method
:param lag_array:
:param n_realz:
:param input_folder:
:param mapping:
:param time_step:
:param prefix:
:param numbered:
:param verbose:
:return:
"""
if (not numbered) and n_realz>1:
raise 'Expecting only one file when no numbers are used for the input data'
# unpack final transition matrices
v_mat, v_mat_extend = v_tuple
t_mat, t_mat_extend = t_tuple
v_log_edges = mapping.v_log_edges
n_v_class = mapping.n_abs_v_classes
n_theta_class = mapping.n_theta_classes
theta_edges = mapping.theta_edges
v_output = np.zeros((n_v_class, n_v_class))
theta_output = np.zeros((n_theta_class, n_theta_class))
v_output_nofreq = np.zeros((n_v_class, n_v_class))
theta_output_nofreq = np.zeros((n_theta_class, n_theta_class))
total_transitions = 0
lag = 1
v_norm, v_norm_nofreq, t_norm, t_norm_nofreq, n_transition = [[] for _ in range(5)]
for j in range(n_realz):
print 'realization number '+str(j)
start_idx = 0
# load the polar coordinates file
data_path = os.path.join(input_folder, 'avg_polar_' + str(j) + '.npz')
data = np.load(data_path)
big_v, big_theta, big_f, ptr_list = data['V'], data['Theta'], data['F'], data['ptr']
for i in ptr_list:
new_v, new_theta, new_f = big_v[start_idx:i], big_theta[start_idx:i], big_f[start_idx:i]
length = i - start_idx
start_idx = i
if len(new_v) > lag:
class_v = np.array(mapping.find_1d_class_idx(np.log(new_v), v_log_edges), dtype=int)
class_theta = np.array(mapping.find_1d_class_idx(new_theta, theta_edges), dtype=int)
count_matrix_with_freq_one_trajectory(v_output, lag, class_v, new_f)
count_matrix_with_freq_one_trajectory(theta_output, lag, class_theta, new_f)
v_current = normalize_columns(v_output)
t_current = normalize_columns(theta_output)
v_norm.append(np.linalg.norm(v_current-v_mat))
t_norm.append(np.linalg.norm(t_current-t_mat))
# get the transition matrices for the extended method (v, theta, f) ->
# input (v,theta)
count_matrix_one_trajectory(v_output_nofreq, lag, class_v)
count_matrix_one_trajectory(theta_output_nofreq, lag, class_theta)
v_current = normalize_columns(v_output_nofreq)
t_current = normalize_columns(theta_output_nofreq)
v_norm_nofreq.append(np.linalg.norm(v_current-v_mat_extend))
t_norm_nofreq.append(np.linalg.norm(t_current-t_mat_extend))
total_transitions += length
n_transition.append(total_transitions)
return v_norm, t_norm, v_norm_nofreq, t_norm_nofreq, n_transition | 4,379 |
def zipper(sequence):
"""Given a sequence return a list that has the same length as the original
sequence, but each element is now a list with an integer and the original
element of the sequence."""
n = len(sequence)
rn = range(n)
data = zip(rn,sequence)
return data | 4,380 |
def normalizeRows(x):
"""
Implement a function to normalizes each row of the matrix x (to have unit length)
Argument:
x -- A numpy matrix of shape (n, m)
Returns:
x -- The normalized (by row) numpy matrix
"""
x_norm = np.linalg.norm(x, ord=2, axis=1, keepdims=True)
x = x / x_norm
return x | 4,381 |
def weat_p_value(X, Y, A, B, embd, sample = 1000):
"""Computes the one-sided P value for the given list of association and target word pairs
Arguments
X, Y : List of association words
A, B : List of target words
embd : Dictonary of word-to-embedding for all words
sample : Number of random permutations used.
Returns
"""
size_of_permutation = min(len(X), len(Y))
X_Y = X + Y
test_stats_over_permutation = []
Xmat = np.array([embd[w.lower()] for w in X if w.lower() in embd])
Ymat = np.array([embd[w.lower()] for w in Y if w.lower() in embd])
Amat = np.array([embd[w.lower()] for w in A if w.lower() in embd])
Bmat = np.array([embd[w.lower()] for w in B if w.lower() in embd])
if not sample:
permutations = combinations(X_Y, size_of_permutation)
else:
permutations = [random_permutation(X_Y, size_of_permutation) for s in range(sample)]
for Xi in permutations:
Yi = filterfalse(lambda w:w in Xi, X_Y)
Ximat = np.array([embd[w.lower()] for w in Xi if w.lower() in embd])
Yimat = np.array([embd[w.lower()] for w in Yi if w.lower() in embd])
test_stats_over_permutation.append(test_statistic(Ximat, Yimat, Amat, Bmat))
unperturbed = test_statistic(Xmat, Ymat, Amat, Bmat)
is_over = np.array([o > unperturbed for o in test_stats_over_permutation])
return is_over.sum() / is_over.size | 4,382 |
def plot_pauli_bar_rep_of_state(state_pl_basis, ax, labels, title):
"""
Visualize a quantum state in the Pauli-Liouville basis. The magnitude of the operator
coefficients are represented by the height of a bar in the bargraph.
:param numpy.ndarray state_pl_basis: The quantum state represented in the Pauli-Liouville basis.
:param ax: The matplotlib axes.
:param labels: The labels for the operator basis states.
:param title: The title for the plot.
"""
dim = len(labels)
im = ax.bar(np.arange(dim) - .4, np.real(state_pl_basis), width=.8)
ax.set_xticks(range(dim))
ax.set_xlabel("Pauli Operator")
ax.set_ylabel("Coefficient")
ax.set_title(title)
ax.set_xticklabels(labels, rotation=45)
ax.grid(False) | 4,383 |
def toggle_nullclines():
"""Make an interactive plot of nullclines and fixed points of
the Gardner-Collins synthetic toggle switch.
"""
# Set up sliders
params = [
dict(
name="βx", start=0.1, end=20, step=0.1, value=10, long_name="beta_x_slider",
),
dict(
name="βy", start=0.1, end=20, step=0.1, value=10, long_name="beta_y_slider",
),
dict(name="n", start=1, end=10, step=0.1, value=4, long_name="n_slider"),
]
sliders = [
bokeh.models.Slider(
start=param["start"],
end=param["end"],
value=param["value"],
step=param["step"],
title=param["name"],
width=150,
)
for param in params
]
# Build base plot with starting parameters
beta = 10
n = 4
# Compute nullclines
x_y = np.linspace(0, 20, 400)
y_x = np.linspace(0, 20, 400)
x_x = beta / (1 + y_x ** n)
y_y = beta / (1 + x_y ** n)
cds = bokeh.models.ColumnDataSource(data=dict(x_x=x_x, x_y=x_y, y_x=y_x, y_y=y_y))
# Make the plot
p = bokeh.plotting.figure(
frame_height=250,
frame_width=250,
x_axis_label="x",
y_axis_label="y",
x_range=[-1, 20],
y_range=[-1, 20],
)
p.line(x="x_x", y="y_x", source=cds, line_width=2, legend_label="x nullcline")
p.line(
x="x_y",
y="y_y",
source=cds,
line_width=2,
color="orange",
legend_label="y nullcline",
)
cds_stable = bokeh.models.ColumnDataSource(
dict(x=[0.0009999, 9.99999999999], y=[9.99999999999, 0.0009999])
)
cds_unstable = bokeh.models.ColumnDataSource(
dict(x=[1.533012798623252], y=[1.533012798623252])
)
p.circle(source=cds_stable, x="x", y="y", color="black", size=10)
p.circle(
source=cds_unstable,
x="x",
y="y",
line_color="black",
fill_color="white",
line_width=2,
size=10,
)
# Callback (uses JavaScript)
js_code = jsfuns["rootfinding"] + jsfuns["toggle_nullclines"] + "callback()"
callback = bokeh.models.CustomJS(
args=dict(cds=cds, cdsStable=cds_stable, cdsUnstable=cds_unstable), code=js_code
)
# We use the `js_on_change()` method to call the custom JavaScript code.
for param, slider in zip(params, sliders):
callback.args[param["long_name"]] = slider
slider.js_on_change("value", callback)
# Return layout
return bokeh.layouts.row(
p,
bokeh.models.Spacer(width=30),
bokeh.layouts.column(bokeh.models.Spacer(height=40), *sliders),
) | 4,384 |
def write_smiles_to_file(f_name, smiles):
"""Write dataset to a file.
Parameters
----------
f_name : str
Path to create a file of molecules, where each line of the file
is a molecule in SMILES format.
smiles : list of str
List of SMILES
"""
with open(f_name, 'w') as f:
for s in smiles:
f.write(s + '\n') | 4,385 |
def is_chinese_word_add_number(s):
"""中文混数字"""
if len(s) == 0:
return False
else:
for w in s:
if is_chinese(w) == False and is_number(w) == False:
return False
return True | 4,386 |
def get_rack_id_by_label(rack_label):
"""
Find the rack id for the rack label
Returns:
rack_id or None
"""
rack_id = None
session = persistent_mgr.create_database_session()
rack = persistent_mgr.get_rack_by_label(session, rack_label)
if rack:
rack_id = rack.rack_id
session.close()
return rack_id | 4,387 |
def cat(xs: torch.Tensor, lx: torch.Tensor) -> torch.Tensor:
"""Cat the padded xs via lengths lx
Args:
xs (torch.FloatTensor): of size (N, T, V)
lx (torch.LongTensor): of size (N, ), whose elements are (lx0, lx1, ...)
Return:
x_gather (torch.FloatTensor): size (lx0+lx1+..., V)
"""
assert xs.dtype in [torch.float, torch.float16, torch.float64], f"expect xs to be torch.float type, instead of {xs.dtype}"
assert xs.size(0) == lx.size(0)
return _GatherCat.apply(xs.contiguous(), lx.to(device=xs.device, dtype=torch.int32)) | 4,388 |
def rule(n: int) -> dict:
"""Implement one of the 256 rules of elementary cellular automata.
Args:
n: The id of the rule (1-256).
Returns:
A mapping from a tuple of 3 cellvalues to a single cell value.
"""
assert n > 0 and n < 257, "must choose a rule between 1 and 256"
values = to_bin(n)
return {
s: v
for s, v
in zip(STATES, values)
} | 4,389 |
def soundtone_type(value):
"""
Parse tone sounds parameters from args.
value: 'square:90hz,10s,100%'
returns: {'waveform': 'square', 'frequency': '90', 'amplitude': '100'}'
"""
abbr_map = {"hz": "frequency", "%": "amplitude", "s": "duration"}
tone_form, generator_raw_params = value.lower().split(":", 1)
parameters = {"waveform": tone_form}
for param in generator_raw_params.split(","):
match = re.match(r"(\d+)(\D+)$", param)
if not match:
raise argparse.ArgumentTypeError(f"invalid tone parameter, format: '{generator_raw_params}'.")
param_name, param_value = abbr_map[match.group(2)], int(match.group(1))
if param_name == "amplitude":
param_value = param_value / 100
parameters[param_name] = param_value
return parameters | 4,390 |
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-
working-note-v1.3.1.pdf.
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n) | 4,391 |
def median(array, width=None, axis=None, even=False):
"""Replicate the IDL ``MEDIAN()`` function.
Parameters
----------
array : array-like
Compute the median of this array.
width : :class:`int`, optional
Size of the neighborhood in which to compute the median (*i.e.*,
perform median filtering). If omitted, the median of the whole
array is returned.
axis : :class:`int`, optional
Compute the median over this axis for a multi-dimensional array. If
ommitted, the median over the entire array will be returned. If
set, this function will behave as though `even` is ``True``.
even : :class:`bool`, optional
If set to ``True``, the median of arrays with an even number of elements
will be the average of the middle two values.
Returns
-------
array-like
The median of the array.
Raises
------
:exc:`ValueError`
If `width` is set, and the input `array` is not 1 or 2 dimensional.
Notes
-----
* For arrays with an even number of elements, the :func:`numpy.median`
function behaves like ``MEDIAN(array, /EVEN)``, so the absence of
the `even` keyword has to turn *off* that behavior.
* For median filtering, this uses :func:`scipy.signal.medfilt` and
:func:`scipy.signal.medfilt2d` under the hood, but patches up the
values on the array boundaries to match the return values of the
IDL ``MEDIAN()`` function.
"""
import numpy as np
from scipy.signal import medfilt, medfilt2d
if width is None:
if axis is None:
f = array.flatten()
if f.size % 2 == 1 or even:
return np.median(array)
else:
i = f.argsort()
return f[i[f.size//2]]
else:
return np.median(array, axis=axis)
else:
if array.ndim == 1:
medarray = medfilt(array, min(width, array.size))
istart = int((width - 1)/2)
iend = array.size - int((width + 1)/2)
i = np.arange(array.size)
w = (i < istart) | (i > iend)
medarray[w] = array[w]
return medarray
elif array.ndim == 2:
medarray = medfilt2d(array, min(width, array.size))
istart = int((width-1)/2)
iend = (array.shape[0] - int((width+1)/2), array.shape[1] - int((width+1)/2))
i = np.arange(array.shape[0])
j = np.arange(array.shape[1])
w = ((i < istart) | (i > iend[0]), (j < istart) | (j > iend[1]))
medarray[w[0], :] = array[w[0], :]
medarray[:, w[1]] = array[:, w[1]]
return medarray
else:
raise ValueError("Invalid number of dimensions for input array!") | 4,392 |
def test_partitions(os_partition_table):
""" check that partion_table returns same info as /proc/partitions """
# # independent tests
# tests that work as non-root user
#
# get independent list of partitions from kernel
re_part = re.compile(' (sd[a-z][1-9])$')
lines_out = os_one_liner('cat /proc/partitions')
proc_parts = [] # partitions from /proc/partitions
for line in lines_out:
if re_part.search(line):
proc_parts += [re_part.search(line).group(1)]
# Are partitions from proc_parts in partition_table
for d_part in proc_parts:
test = f'/dev/{d_part}'
# some partitions from /proc/partitions are not block devices
# assert test in [v.dev for i, v in os_partition_table.partitions.items()]
for key, value in os_partition_table.partitions.items():
assert key == value.dev
assert value.disk in key
assert value.part_num in key
# more tests | 4,393 |
def print_result(feature):
"""
Status message for Sensu - this will show up in any alerts.
"""
print(feature_message(feature)) | 4,394 |
def stitch_frame(frames, _):
"""
Stitching for single frame.
Simply returns the frame of the first index in the frames list.
"""
return frames[0] | 4,395 |
def project_add():
"""
Desc: 新增项目接口
"""
form_data = eval(request.get_data(as_text=True))
pro_name, remark = form_data['projectName'], form_data['remark']
user_id = get_jwt_identity()
response = ProjectM().add_project(user_id, pro_name, remark)
return response | 4,396 |
def date_sequence(start, end, stats_duration, step_size):
"""
Generate a sequence of time span tuples
:seealso:
Refer to `dateutil.parser.parse` for details on date parsing.
:param str start: Start date of first interval
:param str end: End date. The end of the last time span may extend past this date.
:param str stats_duration: What period of time should be grouped
:param str step_size: How far apart should the start dates be
:return: sequence of (start_date, end_date) tuples
"""
step_size, freq = parse_interval(step_size)
stats_duration = parse_duration(stats_duration)
for start_date in rrule(freq, interval=step_size, dtstart=start, until=end):
end_date = start_date + stats_duration
if end_date <= end:
yield start_date, start_date + stats_duration | 4,397 |
def end_of_next_month(dt):
"""
Return the end of the next month
"""
month = dt.month + 2
year = dt.year
if month > 12:
next_month = month - 12
year+=1
else:
next_month = month
return (
dt.replace(
year=year, month=next_month, day=1
) - timedelta(days=1)
) | 4,398 |
def get_version():
"""
Reads version from git status or PKG-INFO
https://gist.github.com/pwithnall/7bc5f320b3bdf418265a
"""
# noinspection PyUnresolvedReferences
git_dir = os.path.join(base_dir, '.git')
if os.path.isdir(git_dir):
# Get the version using "git describe".
cmd = 'git describe --tags --match [0-9]*'.split()
try:
version = subprocess.check_output(cmd).decode().strip()
except subprocess.CalledProcessError:
return None
# PEP 386 compatibility
if '-' in version:
version = '.post'.join(version.split('-')[:2])
# Don't declare a version "dirty" merely because a time stamp has
# changed. If it is dirty, append a ".dev1" suffix to indicate a
# development revision after the release.
with open(os.devnull, 'w') as fd_devnull:
subprocess.call(['git', 'status'],
stdout=fd_devnull, stderr=fd_devnull)
cmd = 'git diff-index --name-only HEAD'.split()
try:
dirty = subprocess.check_output(cmd).decode().strip()
except subprocess.CalledProcessError:
return None
if dirty != '':
version += '.dev1'
else:
# Extract the version from the PKG-INFO file.
try:
with open('PKG-INFO') as v:
version = version_re.search(v.read()).group(1)
except OSError:
version = None
return version | 4,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.