content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def getForegroundClassNameUnicode(hwnd=None):
"""
Returns a unicode string containing the class name of the specified
application window.
If hwnd parameter is None, frontmost window will be queried.
"""
if hwnd is None:
hwnd = win32gui.GetForegroundWindow()
# Maximum number of chars we'll accept for the class name; the
# rest will be truncated if it's longer than this.
MAX_LENGTH = 1024
classNameBuf = ctypes.create_unicode_buffer( MAX_LENGTH )
retval = ctypes.windll.User32.GetClassNameW(
hwnd,
classNameBuf,
len( classNameBuf )
)
if retval == 0:
raise ctypes.WinError()
return classNameBuf.value | 2,600 |
def test_simple_cordex_recipe(tmp_path, patched_datafinder, config_user):
"""Test simple CORDEX recipe."""
content = dedent("""
diagnostics:
test:
additional_datasets:
- dataset: MOHC-HadGEM3-RA
project: CORDEX
product: output
domain: AFR-44
institute: MOHC
driver: ECMWF-ERAINT
exp: evaluation
ensemble: r1i1p1
rcm_version: v1
start_year: 1991
end_year: 1993
mip: mon
variables:
tas:
scripts: null
""")
recipe = get_recipe(tmp_path, content, config_user)
variable = recipe.diagnostics['test']['preprocessor_output']['tas'][0]
filename = variable.pop('filename').split('/')[-1]
assert (filename ==
'CORDEX_MOHC-HadGEM3-RA_v1_ECMWF-ERAINT_AFR-44_mon_evaluation_'
'r1i1p1_tas_1991-1993.nc')
reference = {
'alias': 'MOHC-HadGEM3-RA',
'dataset': 'MOHC-HadGEM3-RA',
'diagnostic': 'test',
'domain': 'AFR-44',
'driver': 'ECMWF-ERAINT',
'end_year': 1993,
'ensemble': 'r1i1p1',
'exp': 'evaluation',
'frequency': 'mon',
'institute': 'MOHC',
'long_name': 'Near-Surface Air Temperature',
'mip': 'mon',
'modeling_realm': ['atmos'],
'preprocessor': 'default',
'product': 'output',
'project': 'CORDEX',
'recipe_dataset_index': 0,
'rcm_version': 'v1',
'short_name': 'tas',
'original_short_name': 'tas',
'standard_name': 'air_temperature',
'start_year': 1991,
'timerange': '1991/1993',
'units': 'K',
'variable_group': 'tas',
}
assert set(variable) == set(reference)
for key in reference:
assert variable[key] == reference[key] | 2,601 |
def read_mapping_from_csv(bind):
"""
Calls read_csv() and parses the loaded array into a dictionary. The dictionary is defined as follows:
{
"teams": {
*team-name*: {
"ldap": []
},
....
},
"folders: {
*folder-id*: {
"name": *folder-name*,
"permissions": [
{
"teamId": *team-name*,
"permission0: *permission*"
},
....
]
},
...
}
:return: The csv's contents parsed into a dictionary as described above.
"""
result = {"teams": {}, "folders": {}}
csv_content = read_csv(bind)
is_header = True
for line in csv_content:
if not is_header:
ldap = line[0]
team = line[1]
folder_name = line[3]
folder_uuid = line[4]
permission = line[5]
if not team in result["teams"]:
result["teams"][team] = {"ldap": []}
if not ldap in result["teams"][team]["ldap"]:
result["teams"][team]["ldap"].append(ldap)
if not folder_uuid in result["folders"]:
result["folders"][folder_uuid] = {"name": folder_name, "permissions": []}
access = {"teamId": team, "permission": permission}
if not access in result["folders"][folder_uuid]["permissions"]:
result["folders"][folder_uuid]["permissions"].append(access)
else:
is_header = False
return result | 2,602 |
def primal_update(
agent_id: int,
A: np.ndarray,
W: np.ndarray,
x: np.ndarray,
z: np.ndarray,
lam: np.ndarray,
prev_x: np.ndarray,
prev_z: np.ndarray,
objective_grad: np.ndarray,
feasible_set: CCS,
alpha: float,
tau: float,
nu: float,
others_agent_id: Sequence[int],
others_lam: Sequence[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
""" """
x = feasible_set.projection(
x + alpha * (x - prev_x) - tau * objective_grad - np.matmul(A.T, lam)
)
z = (
z
+ alpha * (z - prev_z)
+ nu
* sum(
[
W[agent_id, oai] * (lam - ol)
for oai, ol in zip(others_agent_id, others_lam)
]
)
)
return x, z | 2,603 |
def test_app_create_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test create validation for apps. This test will set the rancher version
explicitly and attempt to create apps with rancher version requirements.
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
cat_base = "catalog://?catalog="+c_name+"&template=chartmuseum&version="
app_data = {
'name': random_str(),
'externalId': cat_base+"1.6.2",
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
"answers": [{
"type": "answer",
"clusterId": None,
"projectId": None,
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.7.1",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
set_server_version(client, "2.1.0-beta2")
# First try requires a min of 2.1 so an error should be returned
with pytest.raises(ApiError) as e:
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher min version not met'
set_server_version(client, "2.3.1")
# Second try requires a max of 2.3 so an error should be returned
with pytest.raises(ApiError) as e:
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
set_server_version(client, "2.2.1-rc4")
# Third try should work
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
wait_for_workload(admin_pc.client, ns.name, count=1) | 2,604 |
def clean_instaces(image, min_area=20):
"""
Removes smal instances
:param image:
:return:
"""
ids, counts = np.unique(image, return_counts=True)
removables = [ids for i, ids in enumerate(ids) if counts[i] < min_area]
for r in range(0, len(removables)):
image[image == removables[r]] = 0 | 2,605 |
def norm_cmap(values, cmap, normalize, cm, mn, mx):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
if (mn is None) and (mx is None):
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap, norm | 2,606 |
def _cpx(odss_tuple, nterm, ncond):
"""
This function transforms the raw data for electric parameters (voltage, current...) in a suitable complex array
:param odss_tuple: tuple of nphases*2 floats (returned by odsswr as couples of real, imag components, for each phase
of each terminal)
:type odss_tuple: tuple or list
:param nterm: number of terminals of the underlying electric object
:type nterm: int
:param ncond: number of conductors per terminal of the underlying electric object
:type ncond: int
:returns: a [nterm x ncond] numpy array of complex floats
:rtype: numpy.ndarray
"""
assert len(odss_tuple) == nterm * ncond * 2
cpxr = np.zeros([nterm, ncond], 'complex')
def pairwise(iterable):
# "s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
for idx, couple in enumerate(pairwise(odss_tuple)):
real = couple[0]
imag = couple[1]
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
return cpxr | 2,607 |
def get_l2_distance_arad(X1, X2, Z1, Z2, \
width=0.2, cut_distance=6.0, r_width=1.0, c_width=0.5):
""" Calculates the Gaussian distance matrix D for atomic ARAD for two
sets of molecules
K is calculated using an OpenMP parallel Fortran routine.
Arguments:
==============
X1 -- np.array of ARAD descriptors for molecules in set 1.
X2 -- np.array of ARAD descriptors for molecules in set 2.
Z1 -- List of lists of nuclear charges for molecules in set 1.
Z2 -- List of lists of nuclear charges for molecules in set 2.
Keyword arguments:
width --
cut_distance --
r_width --
c_width --
Returns:
==============
D -- The distance matrices for each sigma (4D-array, Nmol1 x Nmol2 x Natom1 x Natoms2)
"""
amax = X1.shape[1]
assert X1.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 1"
assert X2.shape[1] == amax, "ERROR: Check ARAD decriptor sizes! code = 2"
assert X2.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 3"
nm1 = len(Z1)
nm2 = len(Z2)
assert X1.shape[0] == nm1, "ERROR: Check ARAD decriptor sizes! code = 4"
assert X2.shape[0] == nm2, "ERROR: Check ARAD decriptor sizes! code = 5"
N1 = []
for Z in Z1:
N1.append(len(Z))
N2 = []
for Z in Z2:
N2.append(len(Z))
N1 = np.array(N1,dtype=np.int32)
N2 = np.array(N2,dtype=np.int32)
c1 = []
for charges in Z1:
c1.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z1_arad = np.zeros((nm1,amax,2))
for i in range(nm1):
for j, z in enumerate(c1[i]):
Z1_arad[i,j] = z
c2 = []
for charges in Z2:
c2.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z2_arad = np.zeros((nm2,amax,2))
for i in range(nm2):
for j, z in enumerate(c2[i]):
Z2_arad[i,j] = z
return atomic_arad_l2_distance_all(X1, X2, Z1_arad, Z2_arad, N1, N2, \
nm1, nm2, width, cut_distance, r_width, c_width, amax) | 2,608 |
def test_list_server(provider):
""" Checks whether any server is listed and has attributes"""
servers = provider.inventory.list_server()
for server in servers:
assert server.id
assert server.name
assert server.path
assert server.data
assert len(servers) > 0, "No server is listed for any of feeds" | 2,609 |
def _swap_endian(val, length):
"""
Swap the endianness of a number
"""
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length) | 2,610 |
def get_analysis(output, topology, traj):
"""
Calls analysis fixture with the right arguments depending on the trajectory type.
Parameters
-----------
output : str
Path to simulation 'output' folder.
topology : str
Path to the topology file.
traj : str
Trajectory type: xtc or pdb.
"""
traj = traj if traj else "pdb"
trajectory = f"trajectory.{traj}"
analysis = Analysis(
resname="LIG",
chain="Z",
simulation_output=output,
skip_initial_structures=False,
topology=topology,
water_ids_to_track=[("A", 2109), ("A", 2124)],
traj=trajectory,
)
return analysis | 2,611 |
def check_tag_match(extended_result: bool = False):
"""
Determine the discrepancy between the declared tags
and those actually specified in the Definition
:param extended_result: If True, returns an expanded dataset instead of a boolean
:return: Boolean or tuple, depending on the extended_result variable
"""
definitions = Definition.query.filter(Definition.case_tags != "").all()
for defin in definitions:
pattern_case_tags = f"[{''.join(Definition.APPROVED_CASE_TAGS)}]"
list_tags = re.findall(pattern_case_tags, defin.case_tags)
list_body = [tag for tag in re.findall(r'\w+', defin.body)
if tag in Definition.APPROVED_CASE_TAGS]
result = list_tags == list_body
if result:
continue
if extended_result:
# print(df.source_word.name, result, list_tags, list_body)
if len(defin.source_word.definitions.all()) > 1 and not list_body:
second = f"\n\t{defin.source_word.definitions[1].grammar}" \
f" {defin.source_word.definitions[1].body}"
else:
second = ""
print(f"{defin.source_word.name},\n\t{defin.grammar}"
f" {defin.body}{second} >< [{defin.case_tags}]\n")
else:
print(defin.source_word.name, result) | 2,612 |
def get_callback_class(module_name, subtype):
""" Can return None. If no class implementation exists for the given subtype, the module is
searched for a BASE_CALLBACKS_CLASS implemention which is used if found. """
module = _get_module_from_name(module_name)
if subtype is None:
return _get_callback_base_class(module)
try:
return getattr(module, subtype + CALLBACK_PREFIX)
# If the callback implementation for this subtype doesn't exist,
# attempt to load the BASE_CALLBACKS_CLASS class.
except AttributeError:
return _get_callback_base_class(module) | 2,613 |
def merge_sort(data, left=None, right=None):
"""Merge sort in place. Like selection_sort, this is a generator."""
if left is None:
left = 0
if right is None:
right = len(data)
if right <= left + 1:
return
mid = (left + right) // 2
yield 'subdivide', left, mid
yield from merge_sort(data, left, mid)
yield 'subdivide', mid + 1, right
yield from merge_sort(data, mid, right)
yield from merge(data, left, mid, right) | 2,614 |
def index():
"""Toon de metingen"""
return render_template('index.html', metingen=Meting.query.all()) | 2,615 |
def ensure_lambda_uninstall_permissions(session):
"""
Ensures that the current AWS session has the necessary permissions to uninstall the
New Relic AWS Lambda layer and log subscription.
:param session: A boto3 session
"""
needed_permissions = check_permissions(
session, actions=["lambda:GetFunction", "lambda:UpdateFunctionConfiguration"]
)
if needed_permissions:
message = [
"The following AWS permissions are needed to uninstall the New Relic AWS "
"Lambda layer:\n"
]
for needed_permission in needed_permissions:
message.append(" * %s" % needed_permission)
message.append("\nEnsure your AWS user has these permissions and try again.")
raise click.UsageError("\n".join(message)) | 2,616 |
def about(template):
"""
Attach a template to a step which can be used to generate
documentation about the step.
"""
def decorator(step_function):
step_function._about_template = template
return step_function
return decorator | 2,617 |
def _extend_batch_dim(t: torch.Tensor, new_batch_dim: int) -> torch.Tensor:
"""
Given a tensor `t` of shape [B x D1 x D2 x ...] we output the same tensor repeated
along the batch dimension ([new_batch_dim x D1 x D2 x ...]).
"""
num_non_batch_dims = len(t.shape[1:])
repeat_shape = (new_batch_dim, *(1 for _ in range(num_non_batch_dims)))
return t.repeat(repeat_shape) | 2,618 |
def read_GTSRB_train(directory, shuffle = True):
"""
Read the training portion of GTSRB database.
Each class has an own index file.
"""
print('Reading trainset index...')
entries = []
for class_id in range(num_classes):
# each class is in a separate folder
print('\r%i%%'%(int((class_id/num_classes) * 100)), end='')
class_str = str(class_id).zfill(5)
class_directory = os.path.join(directory, class_str)
# each class has an own indes file
index_filename = os.path.join(class_directory, 'GT-%s.csv'%class_str)
index = csv.DictReader(open(index_filename, 'r'), delimiter=';')
for line in index:
filename = os.path.join(class_directory, line['Filename'])
x1 = int(line['Roi.X1'])
y1 = int(line['Roi.Y1'])
x2 = int(line['Roi.X2'])
y2 = int(line['Roi.Y2'])
# there is no need to use the class_id from the csv file
# we can be sure that it corresponds to the folder
entries.append(DatasetEntry(filename, x1, y1, x2, y2, class_id))
print('\rdone')
if shuffle: random.shuffle(entries)
return entries | 2,619 |
def get_client(config):
"""
get_client returns a feature client configured using data found in the
settings of the current application.
"""
storage = _features_from_settings(config.registry.settings)
return Client(storage) | 2,620 |
def upgrade(profile, validator, writeProfileToFileFunc):
""" Upgrade a profile in memory and validate it
If it is safe to do so, as defined by shouldWriteProfileToFile, the profile is written out.
"""
# when profile is none or empty we can still validate. It should at least have a version set.
_ensureVersionProperty(profile)
startSchemaVersion = int(profile[SCHEMA_VERSION_KEY])
log.debug("Current config schema version: {0}, latest: {1}".format(startSchemaVersion, latestSchemaVersion))
for fromVersion in range(startSchemaVersion, latestSchemaVersion):
_doConfigUpgrade(profile, fromVersion)
_doValidation(deepcopy(profile), validator) # copy the profile, since validating mutates the object
try:
# write out the configuration once the upgrade has been validated. This means that if NVDA crashes for some
# other reason the file does not need to be upgraded again.
if writeProfileToFileFunc:
writeProfileToFileFunc(profile.filename, profile)
except Exception as e:
log.warning("Error saving configuration; probably read only file system")
log.debugWarning("", exc_info=True)
pass | 2,621 |
def print_results(ibs, testres):
"""
Prints results from an experiment harness run.
Rows store different qaids (query annotation ids)
Cols store different configurations (algorithm parameters)
Args:
ibs (IBEISController): ibeis controller object
testres (test_result.TestResult):
CommandLine:
python dev.py -e print --db PZ_MTEST -a default:dpername=1,qpername=[1,2] -t default:fg_on=False
python dev.py -e print -t best --db seals2 --allgt --vz
python dev.py -e print --db PZ_MTEST --allgt -t custom --print-confusion-stats
python dev.py -e print --db PZ_MTEST --allgt --noqcache --index 0:10:2 -t custom:rrvsone_on=True --print-confusion-stats
python dev.py -e print --db PZ_MTEST --allgt --noqcache --qaid4 -t custom:rrvsone_on=True --print-confusion-stats
python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl
python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl
python -m ibeis --tf print_results --db PZ_MTEST -a default -t default:lnbnn_on=True default:lnbnn_on=False,bar_l2_on=True default:lnbnn_on=False,normonly_on=True
CommandLine:
python -m ibeis.expt.experiment_printres --test-print_results
utprof.py -m ibeis.expt.experiment_printres --test-print_results
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.experiment_printres import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'PZ_MTEST', a='default:dpername=1,qpername=[1,2]', t='default:fg_on=False')
>>> result = print_results(ibs, testres)
>>> print(result)
"""
(cfg_list, cfgx2_cfgresinfo, testnameid, cfgx2_lbl, cfgx2_qreq_) = ut.dict_take(
testres.__dict__, ['cfg_list', 'cfgx2_cfgresinfo', 'testnameid', 'cfgx2_lbl', 'cfgx2_qreq_'])
# cfgx2_cfgresinfo is a list of dicts of lists
# Parse result info out of the lists
cfgx2_nextbestranks = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_next_bestranks')
cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score')
#cfgx2_aveprecs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_avepercision')
cfgx2_scorediffs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_scorediff')
#cfgx2_gt_raw_score = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
column_lbls = [ut.remove_chars(ut.remove_vowels(lbl), [' ', ','])
for lbl in cfgx2_lbl]
scorediffs_mat = np.array(ut.replace_nones(cfgx2_scorediffs, np.nan))
print(' --- PRINT RESULTS ---')
print(' use --rank-lt-list=1,5 to specify X_LIST')
if True:
# Num of ranks less than to score
X_LIST = testres.get_X_LIST()
#X_LIST = [1, 5]
#nConfig = len(cfg_list)
#nQuery = len(testres.qaids)
cfgx2_nQuery = list(map(len, testres.cfgx2_qaids))
#cfgx2_qx2_ranks = testres.get_infoprop_list('qx2_bestranks')
#--------------------
# A positive scorediff indicates the groundtruth was better than the
# groundfalse scores
istrue_list = [scorediff > 0 for scorediff in scorediffs_mat]
isfalse_list = [~istrue for istrue in istrue_list]
#------------
# Build Colscore
nLessX_dict = testres.get_nLessX_dict()
#------------
best_rankscore_summary = []
#to_intersect_list = []
# print each configs scores less than X=thresh
for X, cfgx2_nLessX in six.iteritems(nLessX_dict):
max_nLessX = cfgx2_nLessX.max()
bestX_cfgx_list = np.where(cfgx2_nLessX == max_nLessX)[0]
best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestX_cfgx_list)
# FIXME
best_rankscore += rankscore_str(X, max_nLessX, cfgx2_nQuery[bestX_cfgx_list[0]])
best_rankscore_summary += [best_rankscore]
#to_intersect_list.append(ut.take(cfgx2_lbl, max_nLessX))
#intersected = to_intersect_list[0] if len(to_intersect_list) > 0 else []
#for ix in range(1, len(to_intersect_list)):
# intersected = np.intersect1d(intersected, to_intersect_list[ix])
#if False:
# #gt_raw_score_mat = np.vstack(cfgx2_gt_raw_score).T
# #rank_mat = testres.get_rank_mat()
# #------------
# # Build row lbls
# if False:
# qx2_lbl = np.array([
# 'qx=%d) q%s ' % (qx, ibsfuncs.aidstr(testres.qaids[qx], ibs=ibs, notes=True))
# for qx in range(nQuery)])
# #------------
# # Build Colscore and hard cases
# if False:
# qx2_min_rank = []
# qx2_argmin_rank = []
# new_hard_qaids = []
# new_hardtup_list = []
# for qx in range(nQuery):
# ranks = rank_mat[qx]
# valid_ranks = ranks[ranks >= 0]
# min_rank = ranks.min() if len(valid_ranks) > 0 else -3
# bestCFG_X = np.where(ranks == min_rank)[0]
# qx2_min_rank.append(min_rank)
# # Find the best rank over all configurations
# qx2_argmin_rank.append(bestCFG_X)
#@ut.memoize
#def get_new_hard_qx_list(testres):
# """ Mark any query as hard if it didnt get everything correct """
# rank_mat = testres.get_rank_mat()
# is_new_hard_list = rank_mat.max(axis=1) > 0
# new_hard_qx_list = np.where(is_new_hard_list)[0]
# return new_hard_qx_list
# new_hard_qx_list = testres.get_new_hard_qx_list()
# for qx in new_hard_qx_list:
# # New list is in aid format instead of cx format
# # because you should be copying and pasting it
# notes = ' ranks = ' + str(rank_mat[qx])
# qaid = testres.qaids[qx]
# name = ibs.get_annot_names(qaid)
# new_hardtup_list += [(qaid, name + " - " + notes)]
# new_hard_qaids += [qaid]
@ut.argv_flag_dec
def intersect_hack():
failed = testres.rank_mat > 0
colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T]
#failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0])
#failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only)
failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1])
failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only)
gt_aids1 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids)
gt_aids2 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids)
qaids_expt = failed_col1_only_aids
gt_avl_aids1 = ut.flatten(gt_aids1)
gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1))
ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1)
ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2)
#jsontext = ut.to_json({
# 'qaids': list(qaids_expt),
# 'dinclude_aids1': list(gt_aids_expt1),
# 'dinclude_aids2': list(gt_aids_expt2),
#})
#annotation_configs.varysize_pzm
#from ibeis.expt import annotation_configs
acfg = testres.acfg_list[0]
import copy
acfg1 = copy.deepcopy(acfg)
acfg2 = copy.deepcopy(acfg)
acfg1['qcfg']['min_pername'] = None
acfg2['qcfg']['min_pername'] = None
acfg1['dcfg']['min_pername'] = None
acfg2['dcfg']['min_gt_per_name'] = None
acfg1['qcfg']['default_aids'] = qaids_expt
acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1
acfg2['qcfg']['default_aids'] = qaids_expt
acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2
from ibeis.init import filter_annots
from ibeis.expt import experiment_helpers
annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True)
annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True)
acfg_name_list = dict( # NOQA
acfg_list=[acfg1, acfg2],
expanded_aids_list=[annots1, annots2],
)
test_cfg_name_list = ['candidacy_k']
cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs)
t1, t2 = testres_list # NOQA
#ut.embed()
#intersect_hack()
#@ut.argv_flag_dec
#def print_rowlbl():
# print('=====================')
# print('[harn] Row/Query Labels: %s' % testnameid)
# print('=====================')
# print('[harn] queries:\n%s' % '\n'.join(qx2_lbl))
#print_rowlbl()
#------------
@ut.argv_flag_dec
def print_collbl():
print('=====================')
print('[harn] Col/Config Labels: %s' % testnameid)
print('=====================')
enum_cfgx2_lbl = ['%2d) %s' % (count, cfglbl)
for count, cfglbl in enumerate(cfgx2_lbl)]
print('[harn] cfglbl:\n%s' % '\n'.join(enum_cfgx2_lbl))
print_collbl()
#------------
@ut.argv_flag_dec
def print_cfgstr():
print('=====================')
print('[harn] Config Strings: %s' % testnameid)
print('=====================')
cfgstr_list = [query_cfg.get_cfgstr() for query_cfg in cfg_list]
enum_cfgstr_list = ['%2d) %s' % (count, cfgstr)
for count, cfgstr in enumerate(cfgstr_list)]
print('\n[harn] cfgstr:\n%s' % '\n'.join(enum_cfgstr_list))
print_cfgstr()
#------------
#@ut.argv_flag_dec
#def print_rowscore():
# print('=======================')
# print('[harn] Scores per Query: %s' % testnameid)
# print('=======================')
# for qx in range(nQuery):
# bestCFG_X = qx2_argmin_rank[qx]
# min_rank = qx2_min_rank[qx]
# minimizing_cfg_str = ut.indentjoin(cfgx2_lbl[bestCFG_X], '\n * ')
# #minimizing_cfg_str = str(bestCFG_X)
# print('-------')
# print(qx2_lbl[qx])
# print(' best_rank = %d ' % min_rank)
# if len(cfgx2_lbl) != 1:
# print(' minimizing_cfg_x\'s = %s ' % minimizing_cfg_str)
#print_rowscore()
#------------
#@ut.argv_flag_dec
#def print_row_ave_precision():
# print('=======================')
# print('[harn] Scores per Query: %s' % testnameid)
# print('=======================')
# for qx in range(nQuery):
# aveprecs = ', '.join(['%.2f' % (aveprecs[qx],) for aveprecs in cfgx2_aveprecs])
# print('-------')
# print(qx2_lbl[qx])
# print(' aveprecs = %s ' % aveprecs)
#print_row_ave_precision()
##------------
#@ut.argv_flag_dec
#def print_hardcase():
# print('--- hard new_hardtup_list (w.r.t these configs): %s' % testnameid)
# print('\n'.join(map(repr, new_hardtup_list)))
# print('There are %d hard cases ' % len(new_hardtup_list))
# aid_list = [aid_notes[0] for aid_notes in new_hardtup_list]
# name_list = ibs.get_annot_names(aid_list)
# name_set = set(name_list)
# print(sorted(aid_list))
# print('Names: %r' % (name_set,))
#print_hardcase()
#default=not ut.get_argflag('--allhard'))
#------------
#@ut.argv_flag_dec
#def echo_hardcase():
# print('--- hardcase commandline: %s' % testnameid)
# # Show index for current query where hardids reside
# #print('--index ' + (' '.join(map(str, new_hard_qx_list))))
# #print('--take new_hard_qx_list')
# #hardaids_str = ' '.join(map(str, [' ', '--qaid'] + new_hard_qaids))
# hardaids_str = ' '.join(map(str, [' ', '--set-aids-as-hard'] + new_hard_qaids))
# print(hardaids_str)
##echo_hardcase(default=not ut.get_argflag('--allhard'))
#echo_hardcase()
#@ut.argv_flag_dec
#def print_bestcfg():
# print('==========================')
# print('[harn] Best Configurations: %s' % testnameid)
# print('==========================')
# # print each configs scores less than X=thresh
# for X, cfgx2_nLessX in six.iteritems(nLessX_dict):
# max_LessX = cfgx2_nLessX.max()
# bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0]
# best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X)
# best_rankscore += rankscore_str(X, max_LessX, nQuery)
# cfglbl_list = cfgx2_lbl[bestCFG_X]
# best_rankcfg = format_cfgstr_list(cfglbl_list)
# #indent('\n'.join(cfgstr_list), ' ')
# print(best_rankscore)
# print(best_rankcfg)
# print('[cfg*] %d cfg(s) are the best of %d total cfgs' % (len(intersected), nConfig))
# print(format_cfgstr_list(intersected))
#print_bestcfg()
#------------
#@ut.argv_flag_dec
#def print_gtscore():
# # Prints best ranks
# print('gtscore_mat: %s' % testnameid)
# print(' nRows=%r, nCols=%r' % (nQuery, nConfig))
# header = (' labled rank matrix: rows=queries, cols=cfgs:')
# print('\n'.join(cfgx2_lbl))
# column_list = gt_raw_score_mat.T
# print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
# column_lbls=column_lbls, header=header,
# transpose=False,
# use_lbl_width=len(cfgx2_lbl) < 5))
#print_gtscore()
#------------
#@ut.argv_flag_dec
#def print_best_rankmat():
# # Prints best ranks
# print('-------------')
# print('RankMat: %s' % testnameid)
# print(' nRows=%r, nCols=%r' % (nQuery, nConfig))
# header = (' labled rank matrix: rows=queries, cols=cfgs:')
# print('\n'.join(cfgx2_lbl))
# column_list = rank_mat.T
# print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
# column_lbls=column_lbls, header=header,
# transpose=False,
# use_lbl_width=len(cfgx2_lbl) < 5))
#print_best_rankmat()
#@ut.argv_flag_dec
#def print_diffmat():
# # score differences over configs
# print('-------------')
# print('Diffmat: %s' % testnameid)
# diff_matstr = get_diffmat_str(rank_mat, testres.qaids, nConfig)
# print(diff_matstr)
#print_diffmat()
#@ut.argv_flag_dec
#def print_rankhist_time():
# print('A rank histogram is a dictionary. '
# 'The keys denote the range of the ranks that the values fall in')
# # TODO: rectify this code with other hist code
# config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid')
# config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs()
# _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs))
# for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter:
# #full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
# #ut.print_dict(ut.dict_hist(ranks), 'rank histogram', sorted_=True)
# # find the qxs that belong to each bin
# aid_list1 = testres.qaids
# aid_list2 = qx2_gt_aid
# ibs.assert_valid_aids(aid_list1)
# ibs.assert_valid_aids(aid_list2)
# timedelta_list = ibs.get_annot_pair_timdelta(aid_list1, aid_list2)
# #timedelta_str_list = [ut.get_posix_timedelta_str2(delta)
# # for delta in timedelta_list]
# bin_edges = testres.get_rank_histogram_bin_edges()
# timedelta_groups = ut.dict_take(ut.group_items(timedelta_list, config_binxs), np.arange(len(bin_edges)), [])
# timedelta_stats = [ut.get_stats(deltas, use_nan=True, datacast=ut.get_posix_timedelta_str2) for deltas in timedelta_groups]
# print('Time statistics for each rank range:')
# print(ut.dict_str(dict(zip(bin_edges, timedelta_stats)), sorted_=True))
#print_rankhist_time()
#@ut.argv_flag_dec
#def print_rankhist():
# print('A rank histogram is a dictionary. '
# 'The keys denote the range of the ranks that the values fall in')
# # TODO: rectify this code with other hist code
# config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid')
# config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs()
# _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs))
# for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter:
# print('Frequency of rank ranges:')
# ut.print_dict(agg_hist_dict, 'agg rank histogram', sorted_=True)
#print_rankhist()
#------------
# Print summary
#print(' --- SUMMARY ---')
#------------
#@ut.argv_flag_dec
#def print_colmap():
# print('==================')
# print('[harn] mAP per Config: %s (sorted by mAP)' % testnameid)
# print('==================')
# cfgx2_mAP = np.array([aveprec_list.mean() for aveprec_list in cfgx2_aveprecs])
# sortx = cfgx2_mAP.argsort()
# for cfgx in sortx:
# print('[mAP] cfgx=%r) mAP=%.3f -- %s' % (cfgx, cfgx2_mAP[cfgx], cfgx2_lbl[cfgx]))
# #print('L___ Scores per Config ___')
#print_colmap()
#------------
@ut.argv_flag_dec_true
def print_colscore():
print('==================')
print('[harn] Scores per Config: %s' % testnameid)
print('==================')
#for cfgx in range(nConfig):
# print('[score] %s' % (cfgx2_lbl[cfgx]))
# for X in X_LIST:
# nLessX_ = nLessX_dict[int(X)][cfgx]
# print(' ' + rankscore_str(X, nLessX_, nQuery))
print('\n[harn] ... sorted scores')
for X in X_LIST:
print('\n[harn] Sorted #ranks < %r scores' % (X))
sortx = np.array(nLessX_dict[int(X)]).argsort()
#frac_list = (nLessX_dict[int(X)] / cfgx2_nQuery)[:, None]
#print('cfgx2_nQuery = %r' % (cfgx2_nQuery,))
#print('frac_list = %r' % (frac_list,))
#print('Pairwise Difference: ' + str(ut.safe_pdist(frac_list, metric=ut.absdiff)))
for cfgx in sortx:
nLessX_ = nLessX_dict[int(X)][cfgx]
rankstr = rankscore_str(X, nLessX_, cfgx2_nQuery[cfgx], withlbl=False)
print('[score] %s --- %s' % (rankstr, cfgx2_lbl[cfgx]))
print_colscore()
#------------
ut.argv_flag_dec(print_latexsum)(ibs, testres)
@ut.argv_flag_dec
def print_next_rankmat():
# Prints nextbest ranks
print('-------------')
print('NextRankMat: %s' % testnameid)
header = (' top false rank matrix: rows=queries, cols=cfgs:')
print('\n'.join(cfgx2_lbl))
column_list = cfgx2_nextbestranks
print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
column_lbls=column_lbls, header=header,
transpose=False,
use_lbl_width=len(cfgx2_lbl) < 5))
print_next_rankmat()
#------------
@ut.argv_flag_dec
def print_scorediff_mat():
# Prints nextbest ranks
print('-------------')
print('ScoreDiffMat: %s' % testnameid)
header = (' score difference between top true and top false: rows=queries, cols=cfgs:')
print('\n'.join(cfgx2_lbl))
column_list = cfgx2_scorediffs
column_type = [float] * len(column_list)
print(ut.make_csv_table(column_list, row_lbls=testres.qaids,
column_lbls=column_lbls,
column_type=column_type,
header=header,
transpose=False,
use_lbl_width=len(cfgx2_lbl) < 5))
print_scorediff_mat(alias_flags=['--sdm'])
#------------
def jagged_stats_info(arr_, lbl, col_lbls):
arr = ut.recursive_replace(arr_, np.inf, np.nan)
# Treat infinite as nan
stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
sel_stat_dict, sel_indices = ut.find_interesting_stats(stat_dict, col_lbls)
sel_col_lbls = ut.take(col_lbls, sel_indices)
statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True)
stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
sel_stat_str = 'sel_col_lbls = %s' % (ut.list_str(sel_col_lbls),) + '\n' + sel_stat_str
return stat_str, sel_stat_str
@ut.argv_flag_dec
def print_confusion_stats():
"""
CommandLine:
python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid
"""
# Prints nextbest ranks
print('-------------')
print('ScoreDiffMatStats: %s' % testnameid)
print('column_lbls = %r' % (column_lbls,))
#cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score')
#cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score')
gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan)
gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan)
tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list)
fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list)
tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list)
fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list)
tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info(tp_rawscores, 'tp_rawscores', cfgx2_lbl)
fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info(fp_rawscores, 'fp_rawscores', cfgx2_lbl)
tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info(tn_rawscores, 'tn_rawscores', cfgx2_lbl)
fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info(fn_rawscores, 'fn_rawscores', cfgx2_lbl)
#print(tp_rawscores_str)
#print(fp_rawscores_str)
#print(tn_rawscores_str)
#print(fn_rawscores_str)
print(tp_rawscore_statstr)
print(fp_rawscore_statstr)
print(tn_rawscore_statstr)
print(fn_rawscore_statstr)
print_confusion_stats(alias_flags=['--cs'])
ut.argv_flag_dec_true(testres.print_percent_identification_success)()
sumstrs = []
sumstrs.append('')
sumstrs.append('||===========================')
sumstrs.append('|| [cfg*] SUMMARY: %s' % testnameid)
sumstrs.append('||---------------------------')
sumstrs.append(ut.joins('\n|| ', best_rankscore_summary))
sumstrs.append('||===========================')
summary_str = '\n' + '\n'.join(sumstrs) + '\n'
#print(summary_str)
ut.colorprint(summary_str, 'blue')
print('To enable all printouts add --print-all to the commandline') | 2,622 |
def fra_months(z): # Apologies, this function is verbose--function modeled after SSA regulations
"""A function that returns the number of months from date of birth to FRA based on SSA chart"""
# Declare global variable
global months_to_fra
# If date of birth is 1/1/1938 or earlier, full retirement age (FRA) is 65
if z < datetime.date(1938, 1, 2):
months_to_fra = 780
# If date of birth is between 1/2/1938 and 1/1/1939, then (FRA) is age 65 + 2 months
elif z < datetime.date(1939, 1, 2):
months_to_fra = 782
# If date of birth is between 1/2/1939 and 1/1/1940, then (FRA) is age 65 + 4 months
elif z < datetime.date(1940, 1, 2):
months_to_fra = 784
# If date of birth is between 1/2/1940 and 1/1/1941, then (FRA) is age 65 + 6 months
elif z < datetime.date(1941, 1, 2):
months_to_fra = 786
# If date of birth is between 1/2/1941 and 1/1/1942, then (FRA) is age 65 + 8 months
elif z < datetime.date(1942, 1, 2):
months_to_fra = 788
# If date of birth is between 1/2/1942 and 1/1/1943, then (FRA) is age 65 + 10 months
elif z < datetime.date(1943, 1, 2):
months_to_fra = 790
# If date of birth is between 1/2/1943 and 1/1/1955, then (FRA) is age 66
elif z < datetime.date(1955, 1, 2):
months_to_fra = 792
# If date of birth is between 1/2/1955 and 1/1/1956, then (FRA) is age 66 + 2 months
elif z < datetime.date(1956, 1, 2):
months_to_fra = 794
# If date of birth is between 1/2/1956 and 1/1/1957, then (FRA) is age 66 + 4 months
elif z < datetime.date(1957, 1, 2):
months_to_fra = 796
# If date of birth is between 1/2/1957 and 1/1/1958, then (FRA) is age 66 + 6 months
elif z < datetime.date(1958, 1, 2):
months_to_fra = 798
# If date of birth is between 1/2/1958 and 1/1/1959, then (FRA) is age 66 + 8 months
elif z < datetime.date(1959, 1, 2):
months_to_fra = 800
# If date of birth is between 1/2/1959 and 1/1/1960, then (FRA) is age 66 + 10 months
elif z < datetime.date(1960, 1, 2):
months_to_fra = 802
# If date of birth is 1/2/1960 or later, then (FRA) is age 67
else:
months_to_fra = 804
return months_to_fra | 2,623 |
def repo_ls(node, relative_path, color):
"""List files in the node repository folder."""
from aiida.cmdline.utils.repository import list_repository_contents
try:
list_repository_contents(node, relative_path, color)
except FileNotFoundError:
echo.echo_critical('the path `{}` does not exist for the given node'.format(relative_path)) | 2,624 |
def parse_process(i, jobs_queue):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param i: process id.
:param jobs_queue: where to get jobs.
"""
logging.info("enter %d-th parse_process pid:%d",i,os.getpid());
while True:
logging.info("parse_process pid:%d 'while True' try to get a job from jobs_queue. current jobs_queue's size:%d", os.getpid(), jobs_queue.qsize())
job = jobs_queue.get() # job is (id, title, page, page_num)
#lu : jobs_queue.get() would block current extract process, until it can return a job object.
if job:
infile, outfile = job
logging.info("parse_process pid:%d get a job(infile:%s outfile:%s) from jobs_queue to parse... jobs_queue's size become:%d", os.getpid(), infile, outfile, jobs_queue.qsize())
try:
Parse(infile, outfile)
except:
logging.exception('parse_process pid:%d Occur a expect, when Parsing File: %s', os.getpid(), infile)
logging.info("parse_process pid:%d the job(infile:%s outfile:%s) has been parsed, whose size become:%d", os.getpid(), infile, outfile, jobs_queue.qsize())
else:
logging.info("parse_process pid:%d the job gotten from job_queue is 'None', so to quit...",os.getpid())
logging.debug('Quit extractor')
break | 2,625 |
def set_global_format_spec(formats: SpecDict):
"""Set the global default format specifiers.
Parameters
----------
formats: dict[type, str]
Class-based format identifiers.
Returns
-------
old_spec : MultiFormatSpec
The previous globally-set formatters.
Example
-------
>>> s = section.Elastic2D(1, 29000, 10, 144)
>>> print(s)
section Elastic 1 29000 10 144
>>> set_global_format_spec({float: '#.3g'})
MultiFormatSpec(int='d', float='g')
>>> print(s)
section Elastic 1 2.90e+04 10.0 144.
"""
old_spec = _GLOBAL_FORMAT_SPEC.copy()
_GLOBAL_FORMAT_SPEC.update(formats)
return old_spec | 2,626 |
def gen_base_pass(length=15):
"""
Generate base password.
- A new password will be generated on each call.
:param length: <int> password length.
:return: <str> base password.
"""
generator = PassGen()
return generator.make_password(length=length) | 2,627 |
def split_component_chars(address_parts):
"""
:param address_parts: list of the form [(<address_part_1>, <address_part_1_label>), .... ]
returns [(<char_0>, <address_comp_for_char_0), (<char_1>, <address_comp_for_char_1),.., (<char_n-1>, <address_comp_for_char_n-1)]
"""
char_arr = []
for address_part, address_part_label in address_parts:
# The address part of the tuple (address_part, address_part_label)
for c in address_part:
char_arr.append((c, address_part_label))
return char_arr | 2,628 |
def report_metrics(topic, message):
"""
ๅฐmetricๆฐๆฎ้่ฟdatamanageไธๆฅๅฐๅญๅจไธญ
:param topic: ้่ฆไธๆฅ็topic
:param message: ้่ฆไธๆฅ็ๆ็นๆฐๆฎ
:return: ไธๆฅ็ปๆ
"""
try:
res = DataManageApi.metrics.report({"kafka_topic": topic, MESSAGE: message, TAGS: [DEFAULT_GEOG_AREA_TAG]})
logger.info(f"report capacity metric {json.dumps(message)}")
if res.is_success():
return True
else:
logger.warning(f"report metric failed. {json.dumps(message)} {res.message}")
return False
except Exception:
logger.error("query metric failed, encounter some exception", exc_info=True)
return False | 2,629 |
def wg_completion_scripts_cb(data, completion_item, buffer, completion):
""" Complete with known script names, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
weechat.hook_completion_list_add(completion, script["full_name"],
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK | 2,630 |
def getG(source):
""" Read the Graph from a textfile """
G = {}
Grev = {}
for i in range(1,N+1):
G[i] = []
Grev[i] = []
fin = open(source)
for line in fin:
v1 = int(line.split()[0])
v2 = int(line.split()[1])
G[v1].append(v2)
Grev[v2].append(v1)
fin.close()
return G, Grev | 2,631 |
def word_to_signed_int(x):
"""
returns a signed integer from the word (2-bytes)
:param x: two-bytes to convert to a signed number
"""
pass | 2,632 |
def remove_version(code):
""" Remove any version directive """
pattern = '\#\s*version[^\r\n]*\n'
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
return regex.sub('\n', code) | 2,633 |
def unlabeled_balls_in_labeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among labeled boxes with specified box sizes
(capacities). This is a generalization of the most common formulation of the
problem, where each box is sufficiently large to accommodate all of the
balls, and is an important example of a class of combinatorics problems
called 'weak composition' problems.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
ACKNOWLEDGMENT
I'd like to thank Chris Rebert for helping me to convert my prototype
class-based code into a generator function.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes,list):
raise ValueError("box_sizes must be a non-empty list.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
return _unlabeled_balls_in_labeled_boxes(balls, box_sizes) | 2,634 |
def NortekVectorConvert(dat, vhd, csv, sample_Hz=2):
"""
Take a NortekVector output file set (.dat and .vhd files only) and convert
them into csv files with times and measurements
@inputs
dat - path to .dat file output by Nortek, containing PUV data
vhd - path to .vhd file output by Nortek, containing start time
csv - path to .csv file to save to
"""
dat_cols = ["Burst","Ensemble","u","v","w",
"str1","str2","str3",
"snr1","snr2","snr3",
"corr1","corr2","corr3",
"p","analog1","analog2","checksum(1=failed)"]
df = pd.read_table(dat,sep='\s+',names=dat_cols)
df = df[["u","v","w","p"]]
gc.collect()
mo, da, ye, ho, mi, se = open(vhd).readline().split(' ')[:6]
start_time = pd.Timestamp(month = int(mo), day= int(da), year=int(ye),
hour=int(ho), minute=int(mi), second=int(se))
t0 = start_time.to_datetime64()
timestep = np.timedelta64(int(1000*(sample_Hz**-1)),'ms')
t = np.arange(t0, t0+len(df)*timestep, timestep)
df['t'] = t
df.to_csv(csv, index=False) | 2,635 |
def beginning_next_non_empty_line(bdata, i):
""" doc
"""
while bdata[i] not in EOL:
i += 1
while bdata[i] in EOL:
i += 1
return i | 2,636 |
def initate_process(args_array, isse, **kwargs):
"""Function: initate_process
Description: Sets up the program log, opens a sftp connection, and
determines which option will be ran.
Arguments:
(input) args_array -> Dict of command line options and values.
(input) isse -> ISSE Guard class instance.
(input) **kwargs:
pattern -> pattern matching string for other filenames
"""
args_array = dict(args_array)
log = gen_class.Logger(isse.prog_log, isse.prog_log, "INFO",
"%(asctime)s %(levelname)s %(message)s",
"%Y-%m-%dT%H:%M:%SZ")
str_val = "=" * 80
log.log_info("%s Initialized" % isse.name)
log.log_info("%s" % str_val)
log.log_info("Transfer Dir: %s" % isse.transfer_dir)
log.log_info("Review Dir: %s" % isse.review_dir)
log.log_info("Complete Dir: %s" % isse.complete_dir)
log.log_info("Job Log: %s" % isse.job_log)
log.log_info("%s" % str_val)
sftp = None
if isse.action != "moveapproved":
sftp, status = set_sftp_conn(isse, args_array["-s"], args_array["-d"],
log)
if isse.action == "moveapproved":
move_to_reviewed(isse, log)
elif sftp.is_connected and status and isse.action == "process":
isse.set_other_files()
log.log_info("set_other_files...")
log.log_info("[ %s ]" % ", ".join(isse.other_files))
process(isse, sftp, log, **kwargs)
elif sftp.is_connected and status and isse.action == "send":
print("NOTE: Send option is for debugging purposes only.")
if isse.files:
_send(isse, sftp, log)
else:
print("ERROR: Expected file path or array of file paths.")
elif not sftp.is_connected:
log.log_err("SFTP Connection failed to open")
elif not status:
log.log_err("SFTP failure on changing directory")
else:
log.log_err("initate_process::Unknown error")
if sftp and sftp.is_connected:
sftp.close_conn()
log.log_info("SFTP Connection closed")
log.log_close() | 2,637 |
def VisualizeBoxes(image,
boxes,
classes,
scores,
class_id_to_name,
min_score_thresh=.25,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False,
text_loc='TOP'):
"""Visualize boxes on top down image."""
box_to_display_str_map = collections.defaultdict(str)
box_to_color_map = collections.defaultdict(str)
num_boxes = boxes.shape[0]
for i in range(num_boxes):
if scores is not None and scores[i] < min_score_thresh:
continue
box = tuple(boxes[i].tolist())
display_str = ''
if not skip_labels:
if classes[i] in class_id_to_name:
class_name = class_id_to_name[classes[i]]
display_str = str(class_name)
else:
display_str = 'N/A'
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box] = display_str
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
box_to_color_map[box] = PIL_COLOR_LIST[classes[i] % len(PIL_COLOR_LIST)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
DrawBoundingBoxOnImage(
image,
box,
color=color,
thickness=line_thickness,
display_str=box_to_display_str_map[box],
text_loc=text_loc)
return image | 2,638 |
def tagDataskp(dList, start, end, name):
"""
Toma una posiciรณn para obtener de la lista dList.
"""
try:
#if end is not None:
if end:
#tagdata = ",".join(dList[start:end + 1])
tagdata = dList[start:end + 1]
else:
tagdata = dList[start]
except:
from datetime import datetime
sys.stderr.write("Error al obtener el Tag Data: %s, %s. Evento: %s [%s].\n" % (name, dList[2], dList[1], str(datetime.now()))) # dList[2] el 'id'
return tagdata or None | 2,639 |
def cal_softplus(x):
"""Calculate softplus."""
return np.log(np.exp(x) + 1) | 2,640 |
def load_list_from_disk_with_pickle(path_to_list: str) -> list:
"""This function loads a list from disk
Args:
path_to_list (str): path to where the list is saved
Returns:
loaded_list (list): loaded list
Raises:
AssertionError: if list path does not exist
"""
assert os.path.exists(path_to_list), "Path {} does not exist".format(path_to_list)
open_file = open(path_to_list, "rb")
loaded_list = pickle.load(open_file) # load from disk
open_file.close()
return loaded_list | 2,641 |
def test_app_config():
"""
Test app_init call using the 'test-fence-config.yaml'
This includes the check to verify underlying storage
"""
config_path = "test-fence-config.yaml"
root_dir = os.path.dirname(os.path.realpath(__file__))
# delete the record operation from the data blueprint, because right now it calls a
# whole bunch of stuff on the arborist client to do some setup for the uploader role
fence.blueprints.data.blueprint.deferred_functions = [
f
for f in fence.blueprints.data.blueprint.deferred_functions
if f.__name__ != "record"
]
fake_blob_service_client = FakeBlobServiceClient()
patch_list = [
{"patch_name": "fence.app_sessions"},
{"patch_name": "fence.app_register_blueprints"},
{"patch_name": "fence.oidc.oidc_server.OIDCServer.init_app"},
{"patch_name": "fence._setup_prometheus"},
{
"patch_name": "fence.resources.storage.StorageManager.__init__",
"return_value": None,
},
{"patch_name": "fence._check_aws_creds_and_region"},
{
"patch_name": "fence.BlobServiceClient.from_connection_string",
"return_value": fake_blob_service_client,
},
]
patchers = []
for patch_values in patch_list:
patcher = (
patch(patch_values["patch_name"], return_value=patch_values["return_value"])
if "return_value" in patch_values.keys()
else patch(patch_values["patch_name"])
)
patcher.start()
patchers.append(patcher)
app_init(
fence.app,
test_settings,
root_dir=root_dir,
config_path=os.path.join(root_dir, config_path),
)
assert fence.app.config # nosec
for patcher in patchers:
patcher.stop() | 2,642 |
def prepare_spark_conversion(df: pd.DataFrame) -> pd.DataFrame:
"""Pandas does not distinguish NULL and NaN values. Everything null-like
is converted to NaN. However, spark does distinguish NULL and NaN for
example. To enable correct spark dataframe creation with NULL and NaN
values, the `PANDAS_NULL` constant is used as a workaround to enforce NULL
values in pyspark dataframes. Pyspark treats `None` values as NULL.
Parameters
----------
df: pd.DataFrame
Input dataframe to be prepared.
Returns
-------
df_prepared: pd.DataFrame
Prepared dataframe for spark conversion.
"""
return df.where(df.ne(PANDAS_NULL), None) | 2,643 |
def get_user_vk_id(id):
"""
:param id: ะงะธัะปะพะฒะพะน ID ะฟะพะปัะทะพะฒะฐัะตะปั VK
:return: ะกััะปะบะฐ ะฝะฐ ะฟะพะปัะทะพะฒะฐัะตะปั
"""
response = requests.get('{}users.get?user_ids={}&fields=domain&access_token={}&v={}'
.format(api_address, id, token, api_version))
dict = get_dictionary(response)
return 'https://vk.com/{}'.format(dict['response'][0]['domain']) | 2,644 |
def controllable_staircase(
A,
B,
C,
D,
E,
tol=1e-9,
):
"""
Implementation of
COMPUTATION OF IRREDUCIBLE GENERALIZED STATE-SPACE REALIZATIONS ANDRAS VARGA
using givens rotations.
it is very slow, but numerically stable
TODO, add pivoting,
TODO, make it use the U-T property on E better for speed
TODO, make it output Q and Z to apply to aux matrices, perhaps use them on C
"""
# from icecream import ic
# import tabulate
Ninputs = B.shape[1]
Nstates = A.shape[0]
Nconstr = A.shape[1]
Noutput = C.shape[0]
BA, E = scipy.linalg.qr_multiply(E, np.hstack([B, A]), pivoting=False, mode="left")
Nmin = min(Nconstr, Nstates)
for CidxBA in range(0, Nmin - 1):
for RidxBA in range(Nconstr - 1, CidxBA, -1):
# create a givens rotation for Q reduction on BA
BAv0 = BA[RidxBA - 1, CidxBA]
BAv1 = BA[RidxBA, CidxBA]
BAvSq = BAv0 ** 2 + BAv1 ** 2
if BAvSq < tol:
continue
BAvAbs = BAvSq ** 0.5
c = BAv1 / BAvAbs
s = BAv0 / BAvAbs
M = np.array([[s, +c], [-c, s]])
BA[RidxBA - 1 : RidxBA + 1, :] = M @ BA[RidxBA - 1 : RidxBA + 1, :]
# TODO, use the U-T to be more efficient
E[RidxBA - 1 : RidxBA + 1, :] = M @ E[RidxBA - 1 : RidxBA + 1, :]
Cidx = RidxBA
Ridx = RidxBA
# row and col swap
Ev0 = E[Ridx, Cidx - 1]
Ev1 = E[Ridx, Cidx]
EvSq = Ev0 ** 2 + Ev1 ** 2
if EvSq < tol:
continue
EvAbs = EvSq ** 0.5
c = Ev0 / EvAbs
s = Ev1 / EvAbs
MT = np.array([[s, +c], [-c, s]])
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] = (
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] @ MT
)
C[:, Cidx - 1 : Cidx + 1] = C[:, Cidx - 1 : Cidx + 1] @ MT
# TODO, use the U-T to be more efficient
E[:, Cidx - 1 : Cidx + 1] = E[:, Cidx - 1 : Cidx + 1] @ MT
B = BA[:, :Ninputs]
A = BA[:, Ninputs:]
return A, B, C, D, E | 2,645 |
def loyalty():
"""ะะตัะตััะธัะฐัั ะธะฝะดะตะบั ะปะพัะปัะฝะพััะธ"""
articles = Article.objects.all()
if articles.count() == 0:
logger.info('ะะพะบะฐ ะฝะตั ััะฐัะตะน ะดะปั ะฟะตัะตััััะฐ. ะัั
ะพะดะธะผ...')
return False
logger.info('ะะฐัะฐะปะพ ะฟะตัะตััััะฐ ะธะฝะดะตะบัะฐ ะปะพัะปัะฝะพััะธ')
logger.info(f'ะะพะปะธัะตััะฒะพ ะผะฐัะตัะธะฐะปะพะฒ: {articles.count()}')
texts = [item.text for item in articles]
dt = DefineText(texts)
themes, _ = dt.article_theme()
sentiments, _ = dt.article_sentiment()
for article, theme, sentiment in zip(articles, themes, sentiments):
article.theme = bool(theme)
article.sentiment = sentiment
article.save() | 2,646 |
def user_info(request):
"""Returns a JSON object containing the logged-in student's information."""
student = request.user.student
return HttpResponse(json.dumps({
'academic_id': student.academic_id,
'current_semester': int(student.current_semester),
'name': student.name,
'username': request.user.username}), content_type="application/json") | 2,647 |
def main():
"""
A function to bootstrap the application.
This function loads the config.json file anf runs the proxy server.
:return: None
"""
args = parse_args()
# Check if config file provided
if args.config_file:
# Get data from config file
config = load_config(args.config_file)
try:
# Start the proxy server
start_proxy_server(config)
except:
logging.error("Failed to start server %s", config['proxy_server'])
else:
error = "Config file not found"
logging.error("Failed to load config file: %s", error) | 2,648 |
def extract_brain_activation(brainimg, mask, roilabels, method='mean'):
"""
Extract brain activation from ROI.
Parameters
----------
brainimg : array
A 4D brain image array with the first dimension correspond to pictures and the rest 3D correspond to brain images
mask : array
A 3D brain image array with the same size as the rest 3D of brainimg.
roilabels : list, array
ROI labels
method : str
Method to integrate activation from each ROI, by default is 'mean'.
Returns
-------
roisignals : list
Extracted brain activation.
Each element in the list is the extracted activation of the roilabels.
Due to different label may contain different number of activation voxels,
the output activation could not stored as numpy array list.
"""
if method == 'mean':
calc_way = partial(np.mean, axis=1)
elif method == 'std':
calc_way = partial(np.std, axis=1)
elif method == 'max':
calc_way = partial(np.max, axis=1)
elif method == 'voxel':
calc_way = np.array
else:
raise Exception('We haven''t support this method, please contact authors to implement.')
assert brainimg.shape[1:] == mask.shape, "brainimg and mask are mismatched."
roisignals = []
for i, lbl in enumerate(roilabels):
roisignals.append(calc_way(brainimg[:, mask==lbl]))
return roisignals | 2,649 |
def list_goal():
"""List relative path pants targets."""
path = lib.rel_cwd()
pants_args = "list {0}:".format(path)
lib.pants_list(pants_args) | 2,650 |
def load_sensor_suite_from_json(manager: 'WASensorManager', filename: str):
"""Load a sensor suite from json
Each loaded sensor will be added to the manager
Args:
manager (WASensorManager): The sensor manager to store all created objects in
filename (str): The json specification file that describes the sensor suite
"""
j = _load_json(filename)
# Validate the json file
_check_field(j, 'Type', value='Sensor')
_check_field(j, 'Template', value='Sensor Suite')
_check_field(j, 'Sensors', field_type=list)
# Load the sensors
for sensor in j['Sensors']:
new_sensor = load_sensor_from_json(sensor, manager._system)
manager.add_sensor(new_sensor) | 2,651 |
def logit(x):
"""
Elementwise logit (inverse logistic sigmoid).
:param x: numpy array
:return: numpy array
"""
return np.log(x / (1.0 - x)) | 2,652 |
def _base58_decode(address: str) -> bool:
"""
SEE https://en.bitcoin.it/wiki/Base58Check_encoding
"""
try:
decoded_address = base58.b58decode(address).hex()
result, checksum = decoded_address[:-8], decoded_address[-8:]
except ValueError:
return False
else:
for _ in range(1, 3):
result = hashlib.sha256(binascii.unhexlify(result)).hexdigest()
return checksum == result[:8] | 2,653 |
def to_nbody(cluster, do_key_params=False, ro=8.0, vo=220.0):
"""Convert stellar positions/velocities, centre of mass, and orbital position and velocity to Nbody units
- requires that cluster.zmbar, cluster.rbar, cluster.vstar are set (defaults are 1)
Parameters
----------
cluster : class
StarCluster
do_key_params : bool
call key_params to calculate key parameters after unit change (default: False)
ro : float
galpy radius scaling parameter
vo : float
galpy velocity scaling parameter
Returns
-------
None
History:
-------
2018 - Written - Webb (UofT)
"""
if cluster.units != "pckms":
cluster.to_pckms(do_key_params=False)
if cluster.units == "pckms":
cluster.m /= cluster.zmbar
cluster.x /= cluster.rbar
cluster.y /= cluster.rbar
cluster.z /= cluster.rbar
cluster.vx /= cluster.vstar
cluster.vy /= cluster.vstar
cluster.vz /= cluster.vstar
cluster.xc /= cluster.rbar
cluster.yc /= cluster.rbar
cluster.zc /= cluster.rbar
cluster.vxc /= cluster.vstar
cluster.vyc /= cluster.vstar
cluster.vzc /= cluster.vstar
cluster.xgc /= cluster.rbar
cluster.ygc /= cluster.rbar
cluster.zgc /= cluster.rbar
cluster.vxgc /= cluster.vstar
cluster.vygc /= cluster.vstar
cluster.vzgc /= cluster.vstar
cluster.units = "nbody"
cluster.rv3d()
if do_key_params:
cluster.key_params() | 2,654 |
def get_average_matrix(shape, matrices):
""" Take the average matrix by a list of matrices of same shape """
return _ImageConvolution().get_average_matrix(shape, matrices) | 2,655 |
def pph_custom_pivot(n, t0):
"""
O algoritmo recebe uma lista n com pares de coordenadas (a, b) e retorna uma lista s, somente com as
coordenadas que juntas tenham uma razรฃo mรกxima do tipo r = ((a0 + a1 + ... + an) / (b0 + b1 + ... + bn)).
Esse algoritmo tem complexidade de pior caso O(n^2) quando a razรฃo de todos os elementos serรฃo sempre menores que a razรฃo
do pivot. Para encontrar o elemento pivot, o algoritmo faz o seguinte cรกlculo:
pivot = [a0 + (a1 + a2 + ... + an)] / [b0 + (b1 + b2 + ... + bn)]
Args:
n (list[Pair]): Lista com coordenadas do tipo Pair.
t0 (Pair): a0 e b0 iniciais de referรชncia para o algoritmo.
Returns:
s (list[Pair]): Lista com coordenadas que maximizam a razรฃo r.
"""
# 0- Declaraรงรฃo do objeto HiberbolicSet
s = HiperbolicSet(t0.a, t0.b)
k = n
# 1- Calcula um pivot usando a funรงรฃo r = ((a0 + a1 + ... + an) / (b0 + b1 + ... + bn)) em O(n)
pivot = custom_pivot(k, None, t0.a, t0.b)
# 2- Chama os steps da recursรฃo para o cรกlculo do pph customizado
res = pph_steps(k, pivot, pivot.a, pivot.b)
# 6- Adiciona a lista com os pares que maximizam a razรฃo - O(n)
s.add_all(res)
return s | 2,656 |
def train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_arguments,
params,
tensorboard,
getter,
dataloader_val=None,
evaluator=None
):
"""
Model training engine.
:param model: model(data, targets) should return a loss dictionary
:param checkpointer: Checkpointer object
:param checkpoint_arguments: arguments that should be saved in checkpoint
:param params: training parameters:
max_epochs: maximium epochs
checkpoint_period: how many checkpoints
print_every: report every ? iterations
:param dataloader_val: validation dataset
:param evaluator: Evaluator object
"""
# get arguments
start_epoch = checkpoint_arguments['epoch']
start_iter = checkpoint_arguments['iteration']
max_epochs = params['max_epochs']
checkpoint_period = params['checkpoint_period']
print_every = params['print_every']
val_every = params['val_every']
max_iter = max_epochs * len(data_loader)
# metric logger
meters = MetricLogger(", ")
print("Start training")
start_training_time = time.time()
# end: the end time of last iteration
end = time.time()
first = True
for epoch in range(start_epoch, max_epochs):
model.train()
# starting from where we drop
enumerator = enumerate(data_loader, start_iter if first else 0)
for iteration, (data, targets) in enumerator:
# this is necessary to ensure the right number of epochs
if iteration >= max_iter:
break
# time used for loading data
data_time = time.time() - end
iteration = iteration + 1
globel_step = epoch * len(data_loader) + iteration
# step learning rate scheduler
if scheduler:
scheduler.step()
# batch training
# put data to device
data = {k: v.to(device) for (k, v) in data.items()}
targets = {k: v.to(device) for (k, v) in targets.items()}
# get losses
loss_dict = model(data, targets)
# reduce loss dictionary
loss_dict = {k: torch.mean(v) for k, v in loss_dict.items()}
# sum all losses for bp
meters.update(**loss_dict)
# loss = sum(loss for loss in loss_dict.values())
loss = loss_dict['loss']
optimizer.zero_grad()
loss.backward()
optimizer.step()
# time for one iteration
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
# estimated seconds is number of iterations left / time per iteration
eta_seconds = meters.time.global_avg * (max_iter - epoch * len(data_loader) + iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % print_every == 0 or iteration == max_iter:
print(
meters.delimiter.join(
[
"eta: {eta}",
"epoch: {epoch}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
epoch=epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
)
)
tb_data = getter.get_tensorboard_data()
if not tensorboard is None:
metric_dict = meters.state_dict()
tensorboard.update(**metric_dict)
tensorboard.update(**tb_data)
tensorboard.add('train', globel_step)
# save model, optimizer, scheduler, and other arguments
if iteration % checkpoint_period == 0:
checkpoint_arguments['epoch'] = epoch
# iteration should be kept in the checkpointer
checkpoint_arguments['iteration'] = iteration
checkpointer.save("model_{:05d}_{:07d}".format(epoch, iteration))
# evaluate result after each epoch
if not evaluator is None and not dataloader_val is None and globel_step % val_every == 0:
result = evaluate(model, device, dataloader_val, evaluator)
print('Validation result: ', result)
if tensorboard:
tensorboard.update(val_result=result)
# NOTE: back to train mode!
model.train()
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
print("Total training time: {} ({:.4f} s /it)".format(
total_time_str, total_training_time / (max_iter)
)) | 2,657 |
def make_csv(secret, site_id, path_to_csv=None, result_limit=1000, query_params=None):
"""
Function which fetches a video library and writes each video_objects Metadata to CSV. Useful for CMS systems.
:param secret: <string> Secret value for your JWPlatform API key
:param site_id: <string> ID of a JWPlatform site
:param path_to_csv: <string> Local system path to desired CSV. Default will be within current working directory.
:param result_limit: <int> Number of video results returned in response. (Suggested to leave at default of 1000)
:param query_params: Arguments conforming to standards found @ https://developer.jwplayer.com/jwplayer/reference#get_v2-sites-site-id-media
:return: <dict> Dict which represents the JSON response.
"""
path_to_csv = path_to_csv or os.path.join(os.getcwd(), 'video_list.csv')
timeout_in_seconds = 2
max_retries = 3
retries = 0
page = 1
videos = list()
if query_params is None:
query_params = {}
query_params["page_length"] = result_limit
jwplatform_client = jwplatform.client.JWPlatformClient(secret)
logging.info("Querying for video list.")
while True:
try:
query_params["page"] = page
response = jwplatform_client.Media.list(site_id=site_id, query_params=query_params)
except jwplatform.errors.TooManyRequestsError:
logging.error("Encountered rate limiting error. Backing off on request time.")
if retries == max_retries:
raise
timeout_in_seconds *= timeout_in_seconds # Exponential back off for timeout in seconds. 2->4->8->etc.etc.
retries += 1
time.sleep(timeout_in_seconds)
continue
except jwplatform.errors.APIError as e:
logging.error("Encountered an error querying for videos list.\n{}".format(e))
raise e
# Reset retry flow-control variables upon a non successful query (AKA not rate limited)
retries = 0
timeout_in_seconds = 2
# Add all fetched video objects to our videos list.
next_videos = response.json_body["media"]
for video in next_videos:
csv_video = video["metadata"]
csv_video["id"] = video["id"]
videos.append(csv_video)
page += 1
logging.info("Accumulated {} videos.".format(len(videos)))
if len(next_videos) == 0: # Condition which defines you've reached the end of the library
break
# Section for writing video library to csv
desired_fields = ['id', 'title', 'description', 'tags', 'publish_start_date', 'permalink']
should_write_header = not os.path.isfile(path_to_csv)
with open(path_to_csv, 'a+') as path_to_csv:
# Only write columns to the csv which are specified above. Columns not specified are ignored.
writer = csv.DictWriter(path_to_csv, fieldnames=desired_fields, extrasaction='ignore')
if should_write_header:
writer.writeheader()
writer.writerows(videos) | 2,658 |
def music(hot_music_url, **kwargs):
"""
get hot music result
:return: HotMusic object
"""
result = fetch(hot_music_url, **kwargs)
# process json data
datetime = parse_datetime(result.get('active_time'))
# video_list = result.get('music_list', [])
musics = []
music_list = result.get('music_list', [])
for item in music_list:
music = data_to_music(item.get('music_info', {}))
music.hot_count = item.get('hot_value')
musics.append(music)
# construct HotMusic object and return
return HotMusic(datetime=datetime, data=musics) | 2,659 |
def parse_from_docstring(docstring, spec='operation'):
"""Returns path spec from docstring"""
# preprocess lines
lines = docstring.splitlines(True)
parser = _ParseFSM(FSM_MAP, lines, spec)
parser.run()
return parser.spec | 2,660 |
def collection_headings(commodities) -> CommodityCollection:
"""Returns a special collection of headings to test header and chapter
parenting rules."""
keys = ["9900_80_0", "9905_10_0", "9905_80_0", "9910_10_0", "9910_80_0"]
return create_collection(commodities, keys) | 2,661 |
def get_ssh_user():
"""Returns ssh username for connecting to cluster workers."""
return getpass.getuser() | 2,662 |
def tryf(body, *handlers, elsef=None, finallyf=None):
"""``try``/``except``/``finally`` as a function.
This allows lambdas to handle exceptions.
``body`` is a thunk (0-argument function) that represents
the body of the ``try`` block.
``handlers`` is ``(excspec, handler), ...``, where
``excspec`` is either an exception type,
or a tuple of exception types.
``handler`` is a 0-argument or 1-argument
function. If it takes an
argument, it gets the exception
instance.
Handlers are tried in the order specified.
``elsef`` is a thunk that represents the ``else`` block.
``finallyf`` is a thunk that represents the ``finally`` block.
Upon normal completion, the return value of ``tryf`` is
the return value of ``elsef`` if that was specified, otherwise
the return value of ``body``.
If an exception was caught by one of the handlers, the return
value of ``tryf`` is the return value of the exception handler
that ran.
If you need to share variables between ``body`` and ``finallyf``
(which is likely, given what a ``finally`` block is intended
to do), consider wrapping the ``tryf`` in a ``let`` and storing
your variables there. If you want them to leak out of the ``tryf``,
you can also just create an ``env`` at an appropriate point,
and store them there.
"""
def accepts_arg(f):
try:
if arity_includes(f, 1):
return True
except UnknownArity: # pragma: no cover
return True # just assume it
return False
def isexceptiontype(exc):
try:
if issubclass(exc, BaseException):
return True
except TypeError: # "issubclass() arg 1 must be a class"
pass
return False
# validate handlers
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
if not all(isexceptiontype(t) for t in excspec):
raise TypeError(f"All elements of a tuple excspec must be exception types, got {excspec}")
elif not isexceptiontype(excspec): # single exception type
raise TypeError(f"excspec must be an exception type or tuple of exception types, got {excspec}")
# run
try:
ret = body()
except BaseException as exception:
# Even if a class is raised, as in `raise StopIteration`, the `raise` statement
# converts it into an instance by instantiating with no args. So we need no
# special handling for the "class raised" case.
# https://docs.python.org/3/reference/simple_stmts.html#the-raise-statement
# https://stackoverflow.com/questions/19768515/is-there-a-difference-between-raising-exception-class-and-exception-instance/19768732
exctype = type(exception)
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
# this is safe, exctype is always a class at this point.
if any(issubclass(exctype, t) for t in excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else: # single exception type
if issubclass(exctype, excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else:
if elsef is not None:
return elsef()
return ret
finally:
if finallyf is not None:
finallyf() | 2,663 |
def obtain_sheet_music(score, most_frequent_dur):
"""
Returns unformated sheet music from score
"""
result = ""
octaves = [3 for i in range(12)]
accidentals = [False for i in range(7)]
for event in score:
for note_indx in range(len(event[0])):
data = notenum2string(event[0][note_indx], accidentals, octaves)
result += data[0]
accidentals = data[1]
octaves = data[2]
if note_indx != len(event[0])-1:
result += '-'
if event[1] != most_frequent_dur: # Quarters are default
result += '/'
result += dur2mod(event[1], most_frequent_dur)
result += ','
return result | 2,664 |
def adjacency_matrix(edges):
"""
Convert a directed graph to an adjacency matrix.
Note: The distance from a node to itself is 0 and distance from a node to
an unconnected node is defined to be infinite.
Parameters
----------
edges : list of tuples
list of dependencies between nodes in the graph
[(source node, destination node, weight), ...]
Returns
-------
out : tuple
(names, adjacency matrix)
names - list of unique nodes in the graph
adjacency matrix represented as list of lists
"""
# determine the set of unique nodes
names = set()
for src, dest, _ in edges:
# add source and destination nodes
names.add(src)
names.add(dest)
# convert set of names to sorted list
names = sorted(names)
# determine initial adjacency matrix with infinity weights
matrix = [[float('Inf')] * len(names) for _ in names]
for src, dest, weight in edges:
# update weight in adjacency matrix
matrix[names.index(src)][names.index(dest)] = weight
for src in names:
matrix[names.index(src)][names.index(src)] = 0
# return list of names and adjacency matrix
return names, matrix | 2,665 |
def estimate_Cn(P=1013, T=273.15, Ct=1e-4):
"""Use Weng et al to estimate Cn from meteorological data.
Parameters
----------
P : `float`
atmospheric pressure in hPa
T : `float`
temperature in Kelvin
Ct : `float`
atmospheric struction constant of temperature, typically 10^-5 - 10^-2 near the surface
Returns
-------
`float`
Cn
"""
return (79 * P / (T ** 2)) * Ct ** 2 * 1e-12 | 2,666 |
def sg_get_scsi_status_str(scsi_status):
""" Fetch scsi status string. """
buff = _get_buffer(128)
libsgutils2.sg_get_scsi_status_str(scsi_status, 128, ctypes.byref(buff))
return buff.value.decode('utf-8') | 2,667 |
def numpy_grid(x, pad=0, nrow=None, uint8=True):
""" thin wrap to make_grid to return frames ready to save to file
args
pad (int [0]) same as utils.make_grid(padding)
nrow (int [None]) # defaults to horizonally biased rectangle closest to square
uint8 (bool [True]) convert to img in range 0-255 uint8
"""
x = x.clone().detach().cpu()
nrow = nrow or int(math.sqrt(x.shape[0]))
x = ((utils.make_grid(x, nrow=nrow, padding=pad).permute(1,2,0) - x.min())/(x.max()-x.min())).numpy()
if uint8:
x = (x*255).astype("uint8")
return x | 2,668 |
def test__dialect__ansi_specific_segment_not_parse(raw, err_locations, caplog):
"""Test queries do not parse, with parsing errors raised properly."""
lnt = Linter()
parsed = lnt.parse_string(raw)
assert len(parsed.violations) > 0
locs = [(v.line_no(), v.line_pos()) for v in parsed.violations]
assert locs == err_locations | 2,669 |
def if_active(f):
"""decorator for callback methods so that they are only called when active"""
@functools.wraps(f)
def inner(self, loop, *args, **kwargs):
if self.active:
return f(self, loop, *args, **kwargs)
return inner | 2,670 |
def _offset(requests: int = 3) -> None:
"""
Finds offset
:param requests:
:return none:
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(Offset(
requests=requests
).find()) | 2,671 |
def main() -> None:
""" Main function that runs when the Script is called from the Console """
parser = argparse.ArgumentParser(description='A Script to populate App '
'Database with the test Data.')
parser.add_argument('-d', '--drop', help='Drop Data from the Tables.',
action='store_true')
parser.add_argument('-ld', '--limit-departments', default=14,
help='Limit Number of Departments', type=int)
parser.add_argument('-le', '--limit-employees', default=10,
help='Limit Number of Employees per Department', type=int)
args = parser.parse_args()
if args.drop:
drop_data()
populate_departments(args.limit_departments)
populate_employees(args.limit_employees) | 2,672 |
def test_helmholtz_single_layer_p0_p1(
default_parameters, helpers, precision, device_interface
):
"""Test dense assembler for the slp with disc. p0/p1 basis."""
from bempp.api.operators.boundary.helmholtz import single_layer
from bempp.api import function_space
grid = helpers.load_grid("sphere")
space0 = function_space(grid, "DP", 0)
space1 = function_space(grid, "DP", 1)
discrete_op = single_layer(
space1,
space1,
space0,
WAVENUMBER,
assembler="dense",
precision=precision,
device_interface=device_interface,
parameters=default_parameters,
).weak_form()
expected = helpers.load_npy_data("helmholtz_single_layer_boundary_p0_dp1")
_np.testing.assert_allclose(
discrete_op.A, expected, rtol=helpers.default_tolerance(precision)
) | 2,673 |
def obs_all_node_target_pairs_one_hot(agent_id: int, factory: Factory) -> np.ndarray:
"""One-hot encoding (of length nodes) of the target location for each node. Size of nodes**2"""
num_nodes = len(factory.nodes)
node_pair_target = np.zeros(num_nodes ** 2)
for n in range(num_nodes):
core_target_index = []
if factory.nodes[n].table != None and factory.nodes[n].table.has_core():
core_target_index = [
factory.nodes.index(factory.nodes[n].table.core.current_target)
]
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.asarray(
one_hot_encode(num_nodes, core_target_index)
)
else:
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.zeros(num_nodes)
return node_pair_target | 2,674 |
def repeat_each_position(shape: GuitarShape, length: int = None, repeats: int = 2, order: Callable = asc) -> List[
List[FretPosition]]:
"""
Play each fret in the sequence two or more times
"""
if length is not None:
div_length = math.ceil(length / repeats)
else:
div_length = length
pattern = order(shape, length=div_length)
new_positions = []
for positions in pattern:
new_positions.extend([positions] * repeats)
if length is not None and len(new_positions) != length:
new_positions = adjust_length(new_positions, length)
return new_positions | 2,675 |
def verify_same_strand(tx_names, data):
"""Verify exons are on the same strand"""
strands = data['strand'].unique()
if len(strands) > 1:
raise ValueError(f"Multiple strands found in {tx_names}, skipping.") | 2,676 |
def gz_csv_read(file_path, use_pandas=False):
"""Read a gzipped csv file.
"""
import csv
import gzip
from StringIO import StringIO
with gzip.open(file_path, 'r') as infile:
if use_pandas:
import pandas
data = pandas.read_csv(StringIO(infile.read()))
else:
reader = csv.reader(StringIO(infile.read()))
data = [row for row in reader]
return data | 2,677 |
def init_block(in_channels, out_channels, stride, activation=nn.PReLU):
"""Builds the first block of the MobileFaceNet"""
return nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
make_activation(activation)
) | 2,678 |
def yield_in_except_throw_exc_type():
"""
>>> g = yield_in_except_throw_exc_type()
>>> next(g)
>>> g.throw(TypeError)
Traceback (most recent call last):
TypeError
>>> next(g)
Traceback (most recent call last):
StopIteration
"""
try:
raise ValueError
except ValueError as exc:
assert sys.exc_info()[1] is exc, sys.exc_info()
yield
assert sys.exc_info()[1] is exc, sys.exc_info() | 2,679 |
def address_working(address, value=None):
"""
Find, insert or delete from database task address
:param address: website address example: https://www.youtube.com/
:param value: True: add , False: remove, default: find
:return:
"""
global db
if value is True:
db.tasks.insert_one({'Address': address})
return True
if value is False:
db.tasks.delete_many({'Address': address})
return False
x = list(db.tasks.find({'Address': address}))
if len(x) == 0:
return False
else:
return True | 2,680 |
def merge_default_values(resource_list, default_values):
"""
Generate a new list where each item of original resource_list will be merged with the default_values.
Args:
resource_list: list with items to be merged
default_values: properties to be merged with each item list. If the item already contains some property
the original value will be maintained.
Returns:
list: list containing each item merged with default_values
"""
def merge_item(resource):
return merge_resources(default_values, resource)
return lmap(merge_item, resource_list) | 2,681 |
async def async_setup_entry_usb(hass, config_entry, async_add_entities):
"""Set up Plugwise sensor based on config_entry."""
api_stick = hass.data[DOMAIN][config_entry.entry_id][STICK]
async def async_add_sensors(mac: str):
"""Add plugwise sensors for device."""
entities = []
entities.extend(
[
USBSensor(api_stick.devices[mac], description)
for description in PW_SENSOR_TYPES
if description.plugwise_api == STICK
and description.key in api_stick.devices[mac].features
]
)
if entities:
async_add_entities(entities)
for mac in hass.data[DOMAIN][config_entry.entry_id][SENSOR_DOMAIN]:
hass.async_create_task(async_add_sensors(mac))
def discoved_device(mac: str):
"""Add sensors for newly discovered device."""
hass.async_create_task(async_add_sensors(mac))
# Listen for discovered nodes
api_stick.subscribe_stick_callback(discoved_device, CB_NEW_NODE) | 2,682 |
def paint_smi_matrixs(matrixs, index=0):
"""paint similarity matrix (TSM/ KQ) """
plt.clf()
b, c, w, h = matrixs.shape
for i in range(c):
matrix = matrixs[0, i, :, :].detach().cpu().numpy()
plt.imshow(matrix)
plt.colorbar()
dir = 'graph/matrixs{0}'.format(index)
if not os.path.exists(dir):
os.mkdir('graph/matrixs{0}'.format(index))
plt.savefig(fname="graph/matrixs{0}/matrix{1}.png".format(index, str(i)), dpi=400)
plt.close() | 2,683 |
def test_ap_wpa2_eap_tls_domain_match_cn(dev, apdev):
"""WPA2-Enterprise using EAP-TLS and domainmatch (CN)"""
check_domain_match(dev[0])
params = int_eap_server_params()
params["server_cert"] = "auth_serv/server-no-dnsname.pem"
params["private_key"] = "auth_serv/server-no-dnsname.key"
hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TLS",
identity="tls user", ca_cert="auth_serv/ca.pem",
private_key="auth_serv/user.pkcs12",
private_key_passwd="whatever",
domain_match="server3.w1.fi",
scan_freq="2412") | 2,684 |
def finish_scheduling(request, schedule_item=None, payload=None):
"""
Finalize the creation of a scheduled action. All required data is passed
through the payload.
:param request: Request object received
:param schedule_item: ScheduledAction item being processed. If None,
it has to be extracted from the information in the payload.
:param payload: Dictionary with all the required data coming from
previous requests.
:return:
"""
# Get the payload from the session if not given
if payload is None:
payload = request.session.get(session_dictionary_name)
# If there is no payload, something went wrong.
if payload is None:
# Something is wrong with this execution. Return to action table.
messages.error(request,
_('Incorrect action scheduling invocation.'))
return redirect('action:index')
# Get the scheduled item if needed
if not schedule_item:
s_item_id = payload.get('schedule_id')
if not s_item_id:
messages.error(request, _('Incorrect parameters in action scheduling'))
return redirect('action:index')
# Get the item being processed
schedule_item = ScheduledAction.objects.get(pk=s_item_id)
# Check for exclude values and store them if needed
exclude_values = payload.get('exclude_values')
if exclude_values:
schedule_item.exclude_values = exclude_values
schedule_item.status = ScheduledAction.STATUS_PENDING
schedule_item.save()
# Create the payload to record the event in the log
log_payload = {
'action': schedule_item.action.name,
'action_id': schedule_item.action.id,
'execute': schedule_item.execute.isoformat(),
}
if schedule_item.action.action_type == Action.PERSONALIZED_TEXT:
log_payload.update({
'email_column': schedule_item.item_column.name,
'subject': schedule_item.payload.get('subject'),
'cc_email': schedule_item.payload.get('cc_email', []),
'bcc_email': schedule_item.payload.get('bcc_email', []),
'send_confirmation': schedule_item.payload.get('send_confirmation',
False),
'track_read': schedule_item.payload.get('track_read', False)
})
log_type = Log.SCHEDULE_EMAIL_EDIT
elif schedule_item.action.action_type == Action.PERSONALIZED_JSON:
ivalue = None
if schedule_item.item_column:
ivalue = schedule_item.item_column.name
log_payload.update({
'item_column': ivalue,
'token': schedule_item.payload.get('subject')
})
log_type = Log.SCHEDULE_JSON_EDIT
else:
log_type = None
# Create the log
Log.objects.register(request.user,
log_type,
schedule_item.action.workflow,
log_payload)
# Notify the user. Show the time left until execution and a link to
# view the scheduled events with possibility of editing/deleting.
# Successful processing.
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
tdelta = schedule_item.execute - now
# Reset object to carry action info throughout dialogs
request.session[session_dictionary_name] = {}
request.session.save()
# Create the timedelta string
delta_string = ''
if tdelta.days != 0:
delta_string += ugettext('{0} days').format(tdelta.days)
hours = tdelta.seconds / 3600
if hours != 0:
delta_string += ugettext(', {0} hours').format(hours)
minutes = (tdelta.seconds % 3600) / 60
if minutes != 0:
delta_string += ugettext(', {0} minutes').format(minutes)
# Successful processing.
return render(request,
'scheduler/schedule_done.html',
{'tdelta': delta_string,
's_item': schedule_item}) | 2,685 |
def handle_result(context, res):
"""Handle results (sink)"""
_, date_key, df = res
try:
df = df.join(context['cep'])
except TypeError:
context['cep'].index = context['cep'].index.tz_localize(df.index.tz)
df = df.join(context['cep'])
df.to_csv(os.path.join(DST_DIR, f'{date_key}.csv'))
logger.info(res) | 2,686 |
def analyze(results_file, base_path):
"""
Parse and print the results from gosec audit.
"""
# Load gosec json Results File
with open(results_file) as f:
issues = json.load(f)['Issues']
if not issues:
print("Security Check: No Issues Detected!")
return ([], [], [])
else:
high_risk = list()
medium_risk = list()
low_risk = list()
# Sort Issues
for issue in issues:
if issue['severity'] == 'HIGH':
high_risk.append(issue)
elif issue['severity'] == 'MEDIUM':
medium_risk.append(issue)
elif issue['severity'] == 'LOW':
low_risk.append(issue)
# Print Summary
print()
print('Security Issue Summary:')
print(' Found ' + str(len(high_risk)) + ' High Risk Issues')
print(' Found ' + str(len(medium_risk)) + ' Medium Risk Issues')
print(' Found ' + str(len(low_risk)) + ' Low Risk Issues')
# Print Issues In Order of Importance
if high_risk:
header = ('= High Security Risk Issues =')
print_category(header, high_risk, base_path)
if medium_risk:
header = ('= Medium Security Risk Issues =')
print_category(header, medium_risk, base_path)
if low_risk:
header = ('= Low Security Risk Issues =')
print_category(header, low_risk, base_path)
return (high_risk, medium_risk, low_risk) | 2,687 |
def _peaks(image,nr,minvar=0):
"""Divide image into nr quadrants and return peak value positions."""
n = np.ceil(np.sqrt(nr))
quadrants = _rects(image.shape,n,n)
peaks = []
for q in quadrants:
q_image = image[q.as_slice()]
q_argmax = q_image.argmax()
q_maxpos = np.unravel_index(q_argmax,q.shape)
if q_image.flat[q_argmax] > minvar:
peaks.append(np.array(q_maxpos) + q.origin)
return peaks | 2,688 |
def send_init_analytics(opt_out: bool, config_path: str, executed_at: datetime) -> None:
"""
Create a new `AnalyticsClient` and send an `AnalyticsEvent` representing
the execution of `fidesctl init` by a user.
"""
if opt_out is not False:
return
analytics_id = get_config_from_file(config_path, "cli", "analytics_id")
app_name = fidesctl.__name__
try:
client = AnalyticsClient(
client_id=analytics_id or generate_client_id(FIDESCTL_CLI),
developer_mode=bool(getenv("FIDESCTL_TEST_MODE") == "True"),
os=system(),
product_name=app_name + "-cli",
production_version=version(app_name),
)
event = AnalyticsEvent(
"cli_command_executed",
executed_at,
command="fidesctl init",
docker=bool(getenv("RUNNING_IN_DOCKER") == "TRUE"),
resource_counts=None, # TODO: Figure out if it's possible to capture this
)
client.send(event)
except AnalyticsError:
pass | 2,689 |
def adf_test(df: pd.Series):
"""
์ ์์ฑ(Stationary) ๊ฒ์ฆ ๋ฐฉ๋ฒ - Dicky-Fuller test
$HO$ (๊ท๋ฌด๊ฐ์ค) : ๋น์ ์์ด ์๋๋ค๋ผ๊ณ ํ ๋งํ ๊ทผ๊ฑฐ๊ฐ ์๋ค.
$H1$ ๋๋ฆฝ๊ฐ์ค : ๋น์ ์์ด ์๋๋ค.
* p-value๊ฐ 5% ์ด๋ด๋ฉด, ๊ท๋ฌด๊ฐ์ค ๊ธฐ๊ฐ
* Adjusted Close๋ฅผ ํตํด ์ ์์ฌ ๊ฒ์ฆ
* p-value๊ฐ 0.05๋ณด๋ค ์์ง ์์ผ๋ฏ๋ก, ๊ท๋ฌด๊ฐ์ค์ ๊ธฐ๊ฐํ ์ ์๋ค. ๊ทธ๋์ ๋น์ ์์ด ์๋๋ผ๊ณ ํ ๋งํ ๊ทผ๊ฑฐ๊ฐ ์๊ธฐ ๋๋ฌธ์ ๋น์ ์์ด๋ผ๊ณ ๋งํ ์๋ ์๋ค.
:param df: _description_
:type df: pd.Series
"""
result = adfuller(df)
print("Test statistic: ", result[0])
print("p-value: ", result[1])
print("Critic values")
for k, v in result[4].items():
print("\t%s : %.3f" % (k, v)) | 2,690 |
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e) | 2,691 |
def dev_populate_db():
"""Performs the initial database setup for the application
"""
current_app.logger.info("Initializing tables with dev data")
roles = {x.name: x for x in model.UserRole.query.all()}
db_session.add_all(
[
model.User(
"superuser", "SuperUser", "pass", user_roles=list(roles.values())
),
model.User(
"observer", "ObserverUser", "pass", user_roles=[roles["observer"]]
),
]
)
contestants = []
names = [
"Fred", "George", "Jenny", "Sam", "Jo", "Joe", "Sarah", "Ben", "Josiah", "Micah"
]
for i in range(1, 5):
test_contestant = model.User(
"testuser{}".format(i),
names[i - 1],
"pass",
user_roles=[roles["defendant"]],
)
db_session.add(test_contestant)
contestants.append(test_contestant)
# create test contest
now = datetime.datetime.utcnow()
test_contest = model.Contest(
name="test_contest",
start_time=now,
end_time=now + datetime.timedelta(hours=2),
is_public=True,
activate_time=now,
freeze_time=None,
deactivate_time=None,
)
test_contest.users += contestants
db_session.add(test_contest)
io_problem_type = model.ProblemType.query.filter_by(name="input-output").one()
problems = []
hello_world = model.Problem(
io_problem_type,
"hello-world",
"Hello, World!",
'Print the string "Hello, World!"',
"",
"Hello, World!",
"",
"Hello, World!",
)
problems.append(hello_world)
test_contest.problems.append(hello_world)
db_session.add(hello_world)
n = 5000
hello_worlds = model.Problem(
io_problem_type,
"hello-worlds",
"Hello, Worlds!",
'Print the string "Hello, World!" n times',
"2",
"Hello, World!\nHello, World!",
str(n),
"Hello, World!\n" * n,
)
problems.append(hello_worlds)
test_contest.problems.append(hello_worlds)
db_session.add(hello_worlds)
fizzbuzz = model.Problem(
io_problem_type,
"fizzbuzz",
"FizzBuzz",
"Perform fizzbuzz up to the given number\n\nMore info can be found [here](https://en.wikipedia.org/wiki/Fizz_buzz)",
"3",
"1\n2\nFizz",
"15",
"1\n2\nFizz\n4\nBuzz\nFizz\n7\n8\nFizz\nBuzz\n11\nFizz\n13\n14\nFizzBuzz\n",
)
problems.append(fizzbuzz)
test_contest.problems.append(fizzbuzz)
db_session.add(fizzbuzz)
fibonacci = model.Problem(
io_problem_type,
"fibonoacci",
"Fibonacci",
"Give the nth number in the Fibonacci sequence",
"4",
"3",
"5",
"5",
)
problems.append(fibonacci)
test_contest.problems.append(fibonacci)
db_session.add(fibonacci)
ext_fibonacci = model.Problem(
io_problem_type,
"ext-fib",
"Extended Fibonacci",
"Give the the numbers of the Fibonacci sequence between 0 and n, inclusive.\nIf n is positive, the range is [0,n].\nIf n is negative, the range is [n,0].",
"-3",
"2\n-1\n1\n0",
"-5",
"5\n-3\n2\n-1\n1\n0",
)
problems.append(ext_fibonacci)
test_contest.problems.append(ext_fibonacci)
db_session.add(ext_fibonacci)
# insert submissions
python = model.Language.query.filter_by(name="python").one()
solutions = {
"Hello, World!": "print('Hello, World!')",
"Hello, Worlds!": "for i in range(int(input())):\n\tprint('Hello, World!')",
"FizzBuzz": 'print("\\n".join("Fizz"*(i%3==0)+"Buzz"*(i%5==0) or str(i) for i in range(1,int(input())+1)))',
"Fibonacci": "fib = lambda n: n if n < 2 else fib(n-1) + fib(n-2)\nprint(fib(int(input())))",
"Extended Fibonacci": "print('5\\n-3\\n2\\n-1\\n1\\n0')",
}
problem_subs = []
for problem in problems:
for user in contestants:
for _ in range(2):
problem_subs.append((problem, user))
random.shuffle(problem_subs)
for problem, user in problem_subs:
src_code = solutions[problem.name]
is_submission = random.randint(1, 7) != 5
is_priority = random.randint(1, 9) == 7
is_correct = random.randint(1, 2) == 2
if not is_correct:
src_code = src_code + "\nprint('Wait this isn\\'t correct')"
test_run = model.Run(
user,
test_contest,
python,
problem,
datetime.datetime.utcnow(),
src_code,
problem.secret_input,
problem.secret_output,
is_submission,
)
test_run.is_correct = is_correct
test_run.is_priority = is_priority
test_run.state = model.RunState.JUDGING
db_session.add(test_run)
util.set_configuration("extra_signup_fields", json.dumps(["email"]))
db_session.commit() | 2,692 |
def connection_type_validator(type):
"""
Property: ConnectionInput.ConnectionType
"""
valid_types = [
"CUSTOM",
"JDBC",
"KAFKA",
"MARKETPLACE",
"MONGODB",
"NETWORK",
"SFTP",
]
if type not in valid_types:
raise ValueError("% is not a valid value for ConnectionType" % type)
return type | 2,693 |
def plot_time_series(meter_data, temperature_data, **kwargs):
""" Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``.
"""
# TODO(philngo): include image in docs.
figure = kwargs.pop('figure')
if not figure:
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
else:
fig = figure
ax1 = figure.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2 | 2,694 |
def render_pretty_time(jd):
"""Convert jd into a pretty string representation"""
year, month, day, hour_frac = sweph.revjul(jd)
_, hours, minutes, seconds = days_frac_to_dhms(hour_frac/24)
time_ = calendar.timegm((year,month,day,hours,minutes,seconds,0,0,0))
return time.strftime('%e %b %Y %H:%M UTC', time.gmtime(time_)) | 2,695 |
def jsonify(obj):
"""Dump an object to JSON and create a Response object from the dump.
Unlike Flask's native implementation, this works on lists.
"""
dump = json.dumps(obj)
return Response(dump, mimetype='application/json') | 2,696 |
def section(stree):
"""
Create sections in a :class:`ScheduleTree`. A section is a sub-tree with
the following properties: ::
* The root is a node of type :class:`NodeSection`;
* The immediate children of the root are nodes of type :class:`NodeIteration`
and have same parent.
* The :class:`Dimension` of the immediate children are either: ::
* identical, OR
* different, but all of type :class:`SubDimension`;
* The :class:`Dimension` of the immediate children cannot be a
:class:`TimeDimension`.
"""
class Section(object):
def __init__(self, node):
self.parent = node.parent
self.dim = node.dim
self.nodes = [node]
def is_compatible(self, node):
return (self.parent == node.parent
and (self.dim == node.dim or node.dim.is_Sub))
# Search candidate sections
sections = []
for i in range(stree.height):
# Find all sections at depth `i`
section = None
for n in findall(stree, filter_=lambda n: n.depth == i):
if any(p in flatten(s.nodes for s in sections) for p in n.ancestors):
# Already within a section
continue
elif not n.is_Iteration or n.dim.is_Time:
section = None
elif section is None or not section.is_compatible(n):
section = Section(n)
sections.append(section)
else:
section.nodes.append(n)
# Transform the schedule tree by adding in sections
for i in sections:
node = NodeSection()
processed = []
for n in list(i.parent.children):
if n in i.nodes:
n.parent = node
if node not in processed:
processed.append(node)
else:
processed.append(n)
i.parent.children = processed
return stree | 2,697 |
def was_csv_updated() -> bool:
""" This function compares the last modified time on the csv file to the
actions folder to check which was last modified.
1. check if csv or files have more actions.
2. if same number of actions, assume the update was made in the csv
"""
csv_actions = get_cas_from_csv()
file_actions = get_cas_from_files()
return (
True
if len(csv_actions) >= len(file_actions)
else False
) | 2,698 |
def MakeBands(dR, numberOfBands, nearestInteger):
"""
Divide a range into bands
:param dR: [min, max] the range that is to be covered by the bands.
:param numberOfBands: the number of bands, a positive integer.
:param nearestInteger: if True then [floor(min), ceil(max)] is used.
:return: A List consisting of [min, midpoint, max] for each band.
"""
bands = list()
if (dR[1] < dR[0]) or (numberOfBands <= 0):
return bands
x = list(dR)
if nearestInteger:
x[0] = math.floor(x[0])
x[1] = math.ceil(x[1])
dx = (x[1] - x[0]) / float(numberOfBands)
b = [x[0], x[0] + dx / 2.0, x[0] + dx]
i = 0
while i < numberOfBands:
bands.append(b)
b = [b[0] + dx, b[1] + dx, b[2] + dx]
i += 1
return bands | 2,699 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.