content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Mapping
def update_dict(d, u):
"""Return updated dict.
http://stackoverflow.com/a/3233356
:param d: dict
:type d: dict
:param u: updated dict.
:type u: dict
:rtype: dict
"""
for k, v in u.items():
if isinstance(v, Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d | 5c855e4d66afee04887fccea2ee8154cd8e40ea5 | 702,198 |
import numpy
import pandas
def parseBamReadcountIndel(row):
"""Parsing indels only"""
# first split ref data
ref_split = row[row['REF']].split(':')
ref_split = ref_split[:1] + [float(x) for x in ref_split[1:]]
# now arrgegate information for alt allels
possib_alt = ['INDEL', 'INDEL1', 'INDEL2', 'INDEL3']
num_allels = 0
alt_read_count = 0
cum_array = numpy.zeros(12)
alts = []
for i in possib_alt:
row_list = row[i].split(':')
if int(row_list[1]) > 0:
num_allels += 1
allel_count = float(row_list[1])
alt_read_count += allel_count
alts += row_list[:2]
cum_array += allel_count * numpy.array(row_list[2:], dtype=float)
if alt_read_count > 0:
cum_array = cum_array / alt_read_count
clmns_detail = ['base',
'count',
'avg_mapping_quality',
'avg_base_quality',
'avg_se_mapping_quality',
'num_plus_strand',
'num_minus_strand',
'avg_pos_as_fraction',
'avg_num_mismatches_as_fraction',
'avg_sum_mismatch_qualities',
'num_q2_containing_reads',
'avg_dist_to_q2_start_in_q2_reads',
'avg_clipped_length',
'avg_dist_to_effective_3p_end']
res = pandas.Series(ref_split + [num_allels, '_'.join(alts), alt_read_count] +
cum_array.tolist(),
['REF_'+ x for x in clmns_detail] + ['num_allels'] +
['ALT_'+ x for x in clmns_detail])
return res | a14b3cd5573ca2381b6284e8420a78b9d0763c48 | 702,199 |
from pathlib import Path
def get_solar_charge_state() -> str:
"""
Gets the current state of the charging system
Returns:
The charge state object as a json string
"""
current_state = Path('current_state.json').read_text()
return current_state | 0621aff9e6ae77b48811b2879f644ff3e4e4ee91 | 702,200 |
def get_fixture_value(request, fixture_name):
"""
Returns the value associated with fixture named `fixture_name`, in provided `request` context.
This is just an easy way to use `getfixturevalue` or `getfuncargvalue` according to whichever is available in
current `pytest` version.
:param request:
:param fixture_name:
:return:
"""
try:
# Pytest 4+ or latest 3.x (to avoid the deprecated warning)
return request.getfixturevalue(fixture_name)
except AttributeError:
# Pytest 3-
return request.getfuncargvalue(fixture_name) | 11e2b5f67595ecf102f7a8f28cc4aa151a8ebca5 | 702,201 |
def quote_ident(column: str):
"""
---------------------------------------------------------------------------
Returns the specified string argument in the format that is required in
order to use that string as an identifier in an SQL statement.
Parameters
----------
column: str
Column's name.
Returns
-------
str
Formatted column' name.
"""
tmp_column = str(column)
if len(tmp_column) >= 2 and (tmp_column[0] == tmp_column[-1] == '"'):
tmp_column = tmp_column[1:-1]
return '"{}"'.format(str(tmp_column).replace('"', '""')) | cdfe1f7e108904ef35c1733b91fea1a51e239570 | 702,202 |
def find_opt_end(options):
""" Find the end of an option (;) handling escapes. """
offset = 0
while True:
i = options[offset:].find(";")
if options[offset + i - 1] == "\\":
offset += 2
else:
return offset + i | 5e3404ffb2b776402a598351374f3390500edef3 | 702,203 |
def gas_fvf(z, temp, pressure):
"""
Calculate Gas FVF
For range: this is not a correlation, so valid for infinite intervals
"""
temp = temp + 459.67
Bg = 0.0282793 * z * temp / pressure
return(Bg) | 375ac4aeac7f5177aab6e1bb4c2ab604b80778d2 | 702,204 |
def printSchoolYear(year1):
"""Return a print version of the given school year.
"""
if year1:
return "%d–%d" % (year1 - 1, year1) | 93879512567a2be3e3cf541b747178b422be0590 | 702,206 |
import functools
import io
import sys
def check_print(assert_in: str = "", length: int = -1):
"""Captures output of print function and checks if the function contains a given string"""
def checker(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
func(*args, **kwargs)
sys.stdout = sys.__stdout__
capt = capturedOutput.getvalue()
if assert_in:
assert assert_in in capt
return None
if length >= 0:
assert len(capt) > length
return capt
return wrapper
return checker | 2b295c59609b8581e641d6c99608c95144e9b403 | 702,207 |
def dahua_brightness_to_hass_brightness(bri_str: str) -> int:
"""
Converts a dahua brightness (which is 0 to 100 inclusive) and converts it to what HASS
expects, which is 0 to 255 inclusive
"""
bri = 100
if not bri_str:
bri = int(bri_str)
current = bri / 100
return int(current * 255) | d1d8d02f896edc4a16fbb1b26c99416b60764fc6 | 702,208 |
from pathlib import Path
def api(path:str):
"""Associate a path with a given method."""
def inner(fn):
fn.path = Path(path)
return fn
return inner | a03d5ea05c5b91e3e18b121fba7771691f508b0c | 702,210 |
import torch
def randn(*args, **kwargs):
"""
In ``treetensor``, you can use ``randn`` to create a tree of tensors with numbers
obey standard normal distribution.
Example::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.randn(2, 3) # the same as torch.randn(2, 3)
tensor([[-0.8534, -0.5754, -0.2507],
[ 0.0826, -1.4110, 0.9748]])
>>> ttorch.randn({'a': (2, 3), 'b': {'x': (4, )}})
<Tensor 0x7ff363bb6518>
├── a --> tensor([[ 0.5398, 0.7529, -2.0339],
│ [-0.5722, -1.1900, 0.7945]])
└── b --> <Tensor 0x7ff363bb6438>
└── x --> tensor([-0.7181, 0.1670, -1.3587, -1.5129])
"""
return torch.randn(*args, **kwargs) | e92e45bc0210f6b00c468629c741eab2d0a7a50a | 702,212 |
def hsv_complement_color(h, s, v):
""" get the complement of a rgb color
:param h: Hue value (0-360)
:param s: Saturation value (0-255)
:param v: Value value (0-255)
:return: HSV tuple """
# perform 180° hue change
tmp = 180
if h > 180:
tmp = -tmp
return h + tmp, s, v | dd9769b93742349eaca124ee60d65f047c18f2b5 | 702,214 |
import requests
def telegram_bot_sendtext(bot_message: str, *, bot_token: str, bot_chatid: str):
"""Send a notification to a telegram channel.
Args:
bot_message (str): Message you want to send.
Returns:
[request]: Returns a request object which is the response to send to the channel.
"""
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatid + '&parse_mode=html&text=' + bot_message
response = requests.get(send_text)
return response.json() | 372d350ad5513a233b148b3f75b8fa5bdd2f2904 | 702,215 |
def preprocess_words_scores(type2freq_1, type2score_1, type2freq_2, type2score_2,
stop_lens, stop_words, handle_missing_scores):
"""
Filters stop words according to a list of words or stop lens on the scores
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types, values are frequencies of those types
type2score_1, type2freq_2: dict
Keys are types, values are scores associated with those types
stop_lens: iteratble of 2-tuples
Denotes intervals that should be excluded from word shifts
stop_words: iterable
Denotes words that should be excluded from word shifts
handle_missing_scores_scores
If 'error', throws an error whenever a word has a score in one score
dictionary but not the other. If 'exclude', excludes any word that is
missing a score in one score dictionary from all word shift
calculations, regardless if it may have a score in the other dictionary.
If 'adopt' and the score is missing in one dictionary, then uses the
score from the other dictionary if it is available
"""
ts_1 = set(type2freq_1.keys()).union(set(type2score_1.keys()))
ts_2 = set(type2freq_2.keys()).union(set(type2score_2.keys()))
ts = ts_1.union(ts_2)
type2freq_1_new = dict()
type2score_1_new = dict()
type2freq_2_new = dict()
type2score_2_new = dict()
adopted_score_types = set()
no_score_types = set()
filtered_types = set()
for t in ts:
# Exclude words specified by stop words
if t in stop_words:
filtered_types.add(t)
continue
# Handle words with missing scores before excluding based on stop lens
if t in type2score_1:
s_1 = type2score_1[t]
else:
s_1 = None
if t in type2score_2:
s_2 = type2score_2[t]
else:
s_2 = None
# Word does not have score in either dictioary
if t not in type2score_1 and t not in type2score_2:
no_score_types.add(t)
continue
# Word has score in dict2 but not dict1
elif t not in type2score_1 and t in type2score_2:
if handle_missing_scores == "adopt":
s_1 = type2score_2[t]
s_2 = type2score_2[t]
adopted_score_types.add(t)
elif handle_missing_scores == "error":
raise KeyError(
"Word has freq but no score in type2score_1: {}".format(t)
)
elif handle_missing_scores == "exclude":
no_score_types.add(t)
continue
else:
raise ValueError(
"handle_missing_scores has not been provided a valid argument"
)
# Word has score in dict1 but not dict2
elif t in type2score_1 and t not in type2score_2:
if handle_missing_scores == "adopt":
s_1 = type2score_1[t]
s_2 = type2score_1[t]
adopted_score_types.add(t)
elif handle_missing_scores == "error":
raise KeyError(
"Word has freq but no score in type2score_2: {}".format(t)
)
elif handle_missing_scores == "exclude":
filtered_types.add(t)
continue
else:
raise ValueError(
"handle_missing_scores has not been provided a valid argument"
)
# Word has score in dict1 and dict2
else:
s_1 = type2score_1[t]
s_2 = type2score_2[t]
# Exclude words based on stop lens
filter_word = False
for lower, upper in stop_lens:
# Word is in stop lens
if (lower <= s_1 and s_1 <= upper) and (lower <= s_2 and s_2 <= upper):
filter_word = True
# One score is in stop lens but the other is not
elif (lower <= s_1 and s_1 <= upper) or (lower <= s_2 and s_2 <= upper):
raise ValueError(
"{}: stop_lens cannot be applied consistently.".format(t)\
+ " One word score falls within the stop lens while the"\
+ " other does not."
)
if filter_word:
filtered_types.add(t)
continue
# Set words and freqs for words that pass all checks
type2score_1_new[t] = s_1
if t in type2freq_1:
type2freq_1_new[t] = type2freq_1[t]
else:
type2freq_1_new[t] = 0
type2score_2_new[t] = s_2
if t in type2freq_2:
type2freq_2_new[t] = type2freq_2[t]
else:
type2freq_2_new[t] = 0
# Update types to only be those that made it through all filters
final_types = ts.difference(filtered_types).difference(no_score_types)
return (
type2freq_1_new,
type2freq_2_new,
type2score_1_new,
type2score_2_new,
final_types,
filtered_types,
no_score_types,
adopted_score_types
) | fe91586acbd9722e24daebcb942971e969f990d2 | 702,216 |
def _satisfies_wolfe(val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: Instance of _FnDFn. The function and derivative value at 0.
val_c: Instance of _FnDFn. The function and derivative value at the
point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
"""
exact_wolfe_suff_dec = (sufficient_decrease_param * val_0.df >=
(val_c.f - val_0.f) / val_c.x)
wolfe_curvature = val_c.df >= curvature_param * val_0.df
exact_wolfe = exact_wolfe_suff_dec & wolfe_curvature
approx_wolfe_applies = val_c.f <= f_lim
approx_wolfe_suff_dec = ((2 * sufficient_decrease_param - 1) * val_0.df
>= val_c.df)
approx_wolfe = approx_wolfe_applies & approx_wolfe_suff_dec & wolfe_curvature
is_satisfied = exact_wolfe | approx_wolfe
return is_satisfied | d4afc60f4a6e3d7749935fbbf6d291f4224cc04c | 702,217 |
def fit_parabola(x1,x2,x3,y1,y2,y3):
"""Returns the parabola coefficients a,b,c given 3 data points [y(x)=a*x**2+b*x+c]"""
denom = (x1-x2)*(x1-x3)*(x2-x3)
a = (x3*(y2-y1)+x2*(y1-y3)+x1*(y3-y2))/denom
b = (x1**2*(y2-y3)+x3**2*(y1-y2)+x2**2*(y3-y1))/denom
c = (x2**2*(x3*y1-x1*y3)+x2*(x1**2*y3-x3**2*y1)+x1*x3*(x3-x1)*y2)/denom
return a,b,c | e077f5a895e353d5b980b15bee603be5c34d3ec4 | 702,218 |
def generate_list(usb_dict: dict) -> list:
"""
wrapper for list conversion
:param usb_dict: usb dictionary
:return: list of usb devices for tui print
"""
devices = []
for usb in usb_dict.values():
devices.append(usb)
return devices | 8d1d106c7b9fd4078b0d78f0370bd9d22ecda368 | 702,219 |
def file_test_list(tmpdir_factory, string_test_list):
"""An example of output from
tempest run --list-tests
"""
filename = tmpdir_factory.mktemp('data').join('file_test_list_one').strpath
with open(filename, 'w') as f:
f.write(string_test_list)
return filename | 6ce88df4834d90b1507e9b9171946a5fda1d557a | 702,220 |
def reset_line_breaks(curr_boundary={}):
"""
Builds a fresh line breaks dictionary while keeping any
information provided concerning line boundaries.
Parameters
----------
curr_boundary: dict
Line boundaries to be preserved
Returns
-------
dict
The newly initialized line breaks dictionary
"""
start = []
end = []
tokens = []
if "end" in curr_boundary:
end = curr_boundary["end"]
if "start" in curr_boundary:
start = curr_boundary["start"]
if "tokens" in curr_boundary:
tokens = curr_boundary["tokens"]
line_breaks = {
"end": end,
"pageBoundaries": {},
"start": start,
"tokens": tokens
}
return line_breaks | da7f1fc0f206e8a39f0a0d42f1652a3c4bb23200 | 702,221 |
def stop(M, i):
"""
Check if the algorithm converged.
:param M: input matrix
:param i: iteration steo
:return: boolean: True if converged
"""
# this saves time, so we dont have to do multiplication in the first 7 iterations
if i > 6:
M_temp = M ** 2 - M
m = M_temp.max() - M_temp.min()
if abs(m) < 1e-8:
return True
return False | 3fc6cec40db8e52aed6e435a5552a8cf8edb68a1 | 702,222 |
def InrecaMoreIsBetter(caseAttrib, queryValue, jump, weight):
"""
Returns the similarity of two numbers following the INRECA - More is better formula.
"""
try:
queryValue = float(queryValue)
# build query string
queryFnc = {
"function_score": {
"query": {
"match_all": {}
},
"script_score": {
"script": {
"source": "if (doc[params.attrib].value <= params.queryValue) { return 1 } return params.jump * (1 - ((params.queryValue - doc[params.attrib].value) / params.queryValue))",
"params": {
"jump": jump,
"attrib": caseAttrib,
"queryValue": queryValue
}
}
},
"boost": weight,
"_name": "inrecamore"
}
}
return queryFnc
except ValueError:
print("InrecaMoreIsBetter() is only applicable to numbers") | 3488ae92e463cdd2baae917e25d91415a44d5ec5 | 702,223 |
import argparse
def get_args():
""" Get command-line arguments. """
parser = argparse.ArgumentParser(
description='get extent of the given image')
parser.add_argument(
'path_img', metavar='path_img', type=str, nargs='+',
help='path of the remote sensing image (.tif)')
return parser.parse_args() | 1209d3b48bd05a5a5e0b7fa116c962183d8eae12 | 702,224 |
def swapAMP(oldAMP, newAMP):
"""
Swap delivery of messages from an old L{AMP} instance to a new one.
This is useful for implementors of L{StoreSpawnerService} since they will
typically want to create one protocol for initializing the store, and
another for processing application commands.
@param oldAMP: An AMP instance currently hooked up to a transport, whose
job is done and wants to stop receiving messages.
@param newAMP: An AMP instance who wants to take over and start receiving
messages previously destined for oldAMP.
@return: C{newAMP}
"""
oldAMP.boxReceiver = newAMP
newAMP.startReceivingBoxes(oldAMP)
return newAMP | cf47e038be3b2c89ab9e6f23777cd6b39d073c20 | 702,225 |
def show_states_func(sp):
"""Function returning states for each frame
"""
E0 = sp.params["E0"]
dE = sp.params["dE"]
omega = sp.params["omega"]
return [[E0,"--k"], [E0+dE, "--b"], [E0+dE-omega, "--r"]] | b7d426682c324b08d622efd202b8d5cdf78336cd | 702,226 |
def split_text(text, sep='\n'):
"""Split text."""
if isinstance(text, bytes):
text = text.decode("utf8")
return [elm.strip() for elm in text.split(sep) if elm.strip()] | 5247919ab151b2e4d1b4e631046d7030e673a68a | 702,228 |
import os
def write_csv(df, **kwargs):
"""
write csv
:param df:
:param kwargs: file_name: file name
file_path: file path
sep: sep
path_or_buf: path or buffer
header: ['A', 'B']
index: index
:return: {
'file_name': file_name,
'output_path': output_path
}
"""
file_name = kwargs.get('file_name', '')
file_path = kwargs.get('file_path', '')
output_path = os.path.join(file_path, file_name)
sep = kwargs.get('sep', ',')
header = kwargs.get('header')
index = kwargs.get('index', False)
df.to_csv(path_or_buf=output_path,
sep=sep,
header=header,
index=index,
encoding='utf-8')
return {
'file_name': file_name,
'output_path': output_path
} | ac98530b24c48dae9abc79e021a32f274a57f3e6 | 702,229 |
import re
def replace_image_link(target_str):
"""
Replace the shorthand of an image link { image.jpg } with the full link {{ img_tag("image.jpg") | safe }}
:param target_str: String with images in it to be edited
:return: string with images formatted as {{ img_tag("image.jpg") | safe }}
"""
# Find all image links as {image.jpg} or { image.jpg } etc...
image_list = re.findall(r'{(\s*\w+\.\w+\s*)}', target_str)
# Drop the first and last characters to get citation numbers.
img_name_list = [cit.strip() for cit in image_list]
for idx, img in enumerate(image_list):
# Replace the citation shorthand with the proper markdown.
target_str = target_str.replace(img, f'{{ img_tag("{img_name_list[idx]}") | safe }}')
return target_str | d72cbaacecec7d2654a20f50098f21059130dbc3 | 702,230 |
def create_option(option, display, window, active=True):
"""
Returns an option `dict` to be used by lottus
:param option `str`: the value of the option
:param option `str`: the value that will be displayed
:param window `str`: the name of the window that this option points to
:param active `bool`: indicates wheter the option will be showed to the client
"""
return {
'option': option,
'display': display,
'window': window,
'active': active
} | 31eb71dee85a7876997d4b41914f974cbcfcf938 | 702,231 |
import re
def remove_proximity_around_booleans(query_str):
"""
Clients like PEP-Web (Gavant) send fulltext1 as a proximity string.
This removes the proximity if there's a boolean inside.
We could have the client "not do that", but it's actually easier to
remove than to parse and add.
>>> a = '(article_xml:"dog AND cat"~25 AND body:"quick fox"~25) OR title:fox'
>>> remove_proximity_around_booleans(a)
'(article_xml:(dog AND cat) AND body:"quick fox"~25) OR title:fox'
>>> a = 'body_xml:"Even and Attention"~25 && body_xml:tuckett'
>>> remove_proximity_around_booleans(a)
'body_xml:"Even and Attention"~25 && body_xml:tuckett'
"""
srch_ptn = r'\"([A-z\s0-9\!\@\*\~\-\&\|\[\]]+)\"~25'
changes = False
while 1:
m = re.search(srch_ptn, query_str)
if m is not None:
# does it have a boolean, a quote, or a bracket (range)?
# n = re.search(r"\s(AND|OR|NOT|\&\&|\|\|)\s|([\"\[\']])", m.group(1), flags=re.IGNORECASE)
# 2021-04-01 Booleans must be UPPERCASE now
n = re.search(r"\s(AND|OR|NOT|\&\&|\|\|)\s|([\"\[\']])", m.group(1))
# if it's not None, then this is not a proximity match
if n is not None:
query_str = re.subn(srch_ptn, r'(\1)', query_str, 1)[0]
else: # change it so it doesn't match next loop iter
query_str = re.subn(srch_ptn, r'"\1"~26', query_str, 1)[0]
changes = True
else:
if changes:
# change proximity ranges back
query_str = re.sub("~26", "~25", query_str)
break
return query_str | b89ac8ab52cf00f1902603c38bc3f4fdd47cbda2 | 702,232 |
def _provenance_str(provenance):
"""Utility function used by compare_provenance to print diff
"""
return ["%s==%s" % (key, value) for (key, value) in provenance] | 2cbe1f177122a49bb747cce0ccca3fd715349a6a | 702,233 |
def api_client(application, request):
"""
Fixture that returns api_client
Parameters:
app (Application): Application for which create the client.
Returns:
api_client (HttpClient): Api client for application
"""
def _api_client(app=application, **kwargs):
client = app.api_client(**kwargs)
request.addfinalizer(client.close)
return client
return _api_client | cf2894c8f8c2adb8a8700dfa1b9f3a99e86909d8 | 702,234 |
def get_domain_adapt_config(cfg):
"""Get the configure parameters for video data for action recognition domain adaptation from the cfg files"""
config_params = {
"data_params": {
"dataset_root": cfg.DATASET.ROOT,
"dataset_src_name": cfg.DATASET.SOURCE,
"dataset_src_trainlist": cfg.DATASET.SRC_TRAINLIST,
"dataset_src_testlist": cfg.DATASET.SRC_TESTLIST,
"dataset_tgt_name": cfg.DATASET.TARGET,
"dataset_tgt_trainlist": cfg.DATASET.TGT_TRAINLIST,
"dataset_tgt_testlist": cfg.DATASET.TGT_TESTLIST,
"dataset_image_modality": cfg.DATASET.IMAGE_MODALITY,
"dataset_input_type": cfg.DATASET.INPUT_TYPE,
"dataset_class_type": cfg.DATASET.CLASS_TYPE,
"dataset_num_segments": cfg.DATASET.NUM_SEGMENTS,
"frames_per_segment": cfg.DATASET.FRAMES_PER_SEGMENT,
}
}
return config_params | d203aa00b3349ec7c2e6043a0496fc7da82f1b08 | 702,235 |
def rgb2hex(r,g,b):
"""
Convert a RGB vector to hexadecimal values
"""
hexfmt = "#%02x%02x%02x"%(r,g,b)
return hexfmt | cf0452aa22d9dbdee3158a4926fa0755ca19fbd3 | 702,236 |
def _parse_docstring_field(field_lines):
"""
@param field_string:
@type field_string:
@return: return pair:
argument name, dict of updates for argument info
@rtype: C{dict}
"""
if field_lines.startswith('@type'):
field_data = field_lines.split(None, 2)
arg_name = field_data[1].strip(':')
arg_type = field_data[2].replace('\n', ' ').strip()
return arg_name, {'type_name': arg_type}
if field_lines.startswith('@keyword') or field_lines.startswith('@param'):
field_data = field_lines.split(None, 2)
arg_name = field_data[1].strip(':')
arg_description = field_data[2].strip()
return arg_name, {'description': arg_description,
'required': '(required)' in arg_description} | fb10dc1db15d56fc70ee32e733113309eb9b24c3 | 702,237 |
from functools import reduce
def _is_num_tuple(t,size):
"""Returns: True if t is a sequence of numbers; False otherwise.
If the sequence is not of the given size, it also returns False.
Parameter t: The value to test
Precondition: NONE
Parameter size: The size of the sequence
Precondition: size is an int >= 0
"""
try:
return len(t) == size and reduce(lambda x, y: x and y, map(lambda z: type(z) in [int, float], t))
except:
return False | 64b3795b8e90dc38a7c48cd177d8e2aaffc0aa3d | 702,239 |
import argparse
def options_parse():
"""
Command line option parser
"""
parser = argparse.ArgumentParser()
# Options for model parameters setup (only change if model training was changed)
parser.add_argument('--num_filters', type=int, default=64,
help='Filter dimensions for DenseNet (all layers same). Default=64')
parser.add_argument('--num_classes_ax_cor', type=int, default=79,
help='Number of classes to predict in axial and coronal net, including background. Default=79')
parser.add_argument('--num_classes_sag', type=int, default=51,
help='Number of classes to predict in sagittal net, including background. Default=51')
parser.add_argument('--num_channels', type=int, default=7,
help='Number of input channels. Default=7 (thick slices)')
parser.add_argument('--kernel_height', type=int, default=5, help='Height of Kernel (Default 5)')
parser.add_argument('--kernel_width', type=int, default=5, help='Width of Kernel (Default 5)')
parser.add_argument('--stride', type=int, default=1, help="Stride during convolution (Default 1)")
parser.add_argument('--stride_pool', type=int, default=2, help="Stride during pooling (Default 2)")
parser.add_argument('--pool', type=int, default=2, help='Size of pooling filter (Default 2)')
sel_option = parser.parse_args()
return sel_option | 60cb3e0b74bada6c720bc0038aefc809a9d825f1 | 702,240 |
def jsonify_dict(d):
"""Turns python booleans into strings so hps dict can be written in json.
Creates a shallow-copied dictionary first, then accomplishes string
conversion.
Args:
d: hyperparameter dictionary
Returns: hyperparameter dictionary with bool's as strings
"""
d2 = d.copy() # shallow copy is fine by assumption of d being shallow
def jsonify_bool(boolean_value):
if boolean_value:
return "true"
else:
return "false"
for key in d2.keys():
if isinstance(d2[key], bool):
d2[key] = jsonify_bool(d2[key])
return d2 | afbf5819fc4fda444076562b02deb22f8146f123 | 702,241 |
def concat(funcname, args):
"""Return args spliced by sql concat operator."""
return " || ".join(args) | baf8d1a9e128d9c490744a93a90ae00b3072dc24 | 702,242 |
def generate_fe_entry(entry, name):
"""add function
"""
java_output = ""
java_output += "\"" + name + "\""
java_output += ", \"" + entry["symbol"] + "\""
if entry["user_visible"]:
java_output += ", true"
else:
java_output += ", false"
if 'prepare' in entry:
java_output += ', "%s"' % entry["prepare"]
else:
java_output += ', null'
if 'close' in entry:
java_output += ', "%s"' % entry["close"]
else:
java_output += ', null'
java_output += ", Function.NullableMode." + entry["nullable_mode"]
java_output += ", PrimitiveType." + entry["ret_type"]
# Check the last entry for varargs indicator.
if entry["args"] and entry["args"][-1] == "...":
entry["args"].pop()
java_output += ", true"
else:
java_output += ", false"
for arg in entry["args"]:
java_output += ", PrimitiveType." + arg
return java_output | edb2ca8b4624873a57de4f2cbaf4159e2b58f19b | 702,244 |
from pathlib import Path
def _load_requirements(requirements_file, folder="requirements"):
"""Load requirements from a file."""
requirements = []
with open(Path(folder) / Path(requirements_file), "r") as f:
for line in f:
line = line.strip()
if line and not line.startswith("#"):
requirements.append(line)
return requirements | e9d56a025986f9a2899b3d070033abcdeec21956 | 702,245 |
def get_emails(notification_rec):
"""
Get list of emails for users listed in the specified notification
"""
# Use a set instead of list as there could be duplicates.
ret = []
for recipient in notification_rec.recipients.all():
ret.append(recipient.email)
return ret | 9c01b1e5615cf3a35fbda0c4d92a1e092cfc3d59 | 702,246 |
import six
def text_type(string, encoding='utf-8'):
"""
Given text, or bytes as input, return text in both python 2/3
This is needed because the arguments to six.binary_type and six.text_type change based on
if you are passing it text or bytes, and if you simply pass bytes to
six.text_type without an encoding you will get output like: ``six.text_type(b'hello-world')``
which is not desirable.
"""
if isinstance(string, six.text_type):
return six.text_type(string)
else:
return six.text_type(string, encoding) | 5b962c348769ccb1029cd0d41fc23ddb6942d37d | 702,247 |
def quote_value(value: str) -> str:
"""
Ensures values with ";" are quoted.
>>> quote_value("foo")
'foo'
>>> quote_value("foo;bar")
'"foo;bar"'
"""
if value.find(";") != -1:
return f'"{value}"'
return value | e6bb23a17d554742115582feb90ba621ddd7fc66 | 702,248 |
def estimation_formula_bg(growth, eps):
"""
The stock price estimation formula suggested by Benjamin Graham
According to "The Intelligent Investor"
"""
return (2*growth+8.5)*eps | b675c3ec8cd82d600473d2646cc759bc8f3279be | 702,249 |
def make_quality_step(env, run_time, route_to, transit_time=1, **kwargs):
"""
"""
return {
'location': env['quality_bench'],
'worker': env['qual_inspector'],
'manned': True,
'setup_time': 0,
'run_time': run_time,
'teardown_time': 0,
'transit_time': transit_time,
'route_to': route_to
} | 3f4ac237f4d0d0cd9245d6e615495563a5152a23 | 702,250 |
def reverse_sorting_order(str_name):
"""Return False if str_name ends with one of the err_strings suffixes. Otherwise, return True.
Negation was introduced as the function is used to determine the order of the sorting depending on scoring
function name: if scoring ends with "_error" or "_loss", it means that lower score is better. If it doesn't,
then it means that higher score is better. As default sorting is ascending, reverse=True needs to be explicitly
provided for the sorted function so that sortable object (e.g. list) is sorted in a descending fashion.
Args:
str_name (str): string to be evaluated
Returns:
bool: False if str_name ends with one of the strings defined inside the function; True otherwise.
"""
err_strings = ("_error", "_loss")
return not str_name.endswith(err_strings) | 481b78912262fd086121b6281d3f53c13f3571ff | 702,251 |
def getUserInput():
"""
Purpose: to validate user Input
Parameters: None
Returns: x - validated user input
"""
x = input("Recursion Depth: ")
try:
x = int(x)
except ValueError:
x = getUserInput()
return x | 87a67cf12e1a45e16068bf7c64e53b33e3cdbdeb | 702,252 |
import subprocess
import os
def run_pd_rpc(cmd_or_code, no_print=False):
"""
This function invokes run_pd_rpc.py tool. It has a single string argument
cmd_or_code that works as follows:
If it is a string:
* if the string starts with os.sep, then it is a filename
* otherwise it is a piece of code (passed via "--eval"
Else it is a list/tuple and it is passed "as-is"
Note: do not attempt to run the tool in the interactive mode!
"""
path = os.path.join(os.environ['HOME'], "tools", "run_pd_rpc.py")
command = [path]
if isinstance(cmd_or_code, str):
if cmd_or_code.startswith(os.sep):
command.extend(["--no-wait", cmd_or_code])
else:
command.extend(["--no-wait", "--eval", cmd_or_code])
else:
command.extend(cmd_or_code)
result = subprocess.check_output(command).decode("utf-8")[:-1]
if not no_print:
print(result)
return result | 7dafdcfdd640595b8c4ef2815205c9fb9928ee51 | 702,253 |
import numpy as np
def extrapolate_accel_data_testing(filename):
"""Extrapolate data from a txt file
data will have format:
x1,y1,z1;
x2,y2,z2;
..
xn,yn,zn;
:param filename: file to read from
:return: datax, datay, dataz vectors (np.array)
"""
x = []
y = []
z = []
with open(filename, 'r') as f:
alllines = f.readlines()
for line in alllines:
components = line.split(',')
sep = ';'
cleaned_z = components[2].split(sep,1)[0]
components[2] = cleaned_z
x.append(float(components[0]))
y.append(float(components[1]))
z.append(float(components[2]))
return np.array(x), np.array(y), np.array(z) | 53d6f44450285306b41eb8e4969a32d4d1c3ceb7 | 702,254 |
def model_repr(models, key):
"""Get a model representation."""
m = [(char, proba)
for char, proba in models[key].items() if proba > 0.0001]
m = sorted(m, key=lambda n: n[1], reverse=True)
s = ""
for char, prob in m:
s += u"{}={:4.2f}% ".format(char, prob * 100)
return s | a4b658b8a7fd7529e14f452e426ca35447991042 | 702,255 |
import glob
def findNewFiles():
"""
Find all of the files that have been downloaded, and then determine all of
those that have not been decrypted. Return a list of those that need to be
decrypted now.
"""
allFiles = glob.glob('*.gpg')
oldFiles = glob.glob('*.gz')
retFiles = []
for f in allFiles:
if f[:-4] not in oldFiles:
retFiles.append(f)
return retFiles | 0d9052856e1cf8ba605026f1a74e259b06e5f342 | 702,256 |
from pathlib import Path
def walk(_path: str) -> tuple:
"""
:param _path:
:return: tuple(_path, tuple('dirs',), tuple('files',))
"""
dirs = []
files = []
path = Path(_path)
for i in path.iterdir():
if i.is_file():
files.append(str(i))
if i.is_dir():
dirs.append(str(i))
return path.resolve(), dirs, files | 06c8a7daa310da8df3d52a05e3f950d9c1c5ef37 | 702,257 |
def idx2token(idx, reverse_vocab):
"""
index换取词
:param idx: index
:param reverse_vocab: 反查表 @see chatbot.build_vocab
:return: 词
"""
return reverse_vocab[idx] | 4ce26e6a6a103133ffe0212d01a4c52a8a23479d | 702,258 |
def message_from_lax(data):
"""
format a message from a Lax response data
"""
return data.get("message") if data.get("message") else "(empty message)" | 81ba7399bc0e3e86ee1967988a17fd7f3524d8ab | 702,259 |
import pwd
import os
def get_username():
"""get the current users user name"""
return pwd.getpwuid(os.geteuid()).pw_name | 0748a32ba9925250f1cae6ff454d79ef3808c59a | 702,260 |
def part1(_input):
"""
part 1
"""
dif1 = 0
dif3 = 0
for idx, num in enumerate(_input):
try:
nxt = abs(_input[idx+1] - num)
if nxt == 1:
dif1 += 1
if nxt == 3:
dif3 += 1
except IndexError:
break
# my adapter
dif3 += 1
return dif1 * dif3 | 5bd2e4edffe2cef9dc30d51025d54a7831675788 | 702,261 |
import itertools
def get_n_bits_combinations(num_bits: int) -> list:
"""
Function returning list containing all combinations of n bits.
Given num_bits binary bits, each bit has value 0 or 1,
there are in total 2**n_bits combinations.
:param num_bits: int, number of combinations to evaluate
:return: a list of length 2**n_bits,
return[i] is the binary representation of the decimal integer.
:Example:
>>> from deepreg.model.layer_util import get_n_bits_combinations
>>> get_n_bits_combinations(3)
[[0, 0, 0], # 0
[0, 0, 1], # 1
[0, 1, 0], # 2
[0, 1, 1], # 3
[1, 0, 0], # 4
[1, 0, 1], # 5
[1, 1, 0], # 6
[1, 1, 1]] # 7
"""
assert num_bits >= 1
return [list(i) for i in itertools.product([0, 1], repeat=num_bits)] | 6813f76f856a639688d6b80ddce0e605707f8d1f | 702,262 |
def counting_sort(nums):
"""
counting sort algorithm's complexity is:
time = O(n + k)
space = O(n + k)
where k = max(nums)
Apply this algorithm when O(k) <= O(n), and when all numbers in nums are >= 0
"""
# get frequency list
num_max = max(nums)
freqs = [0] * (num_max + 1)
for num in nums:
freqs[num] += 1
# get sumfreqs
sumfreqs = [0] * (num_max + 1)
for i in range(1, len(sumfreqs)):
sumfreqs[i] = freqs[i] + sumfreqs[i-1]
# get the sorted list
ans = [0] * len(nums)
for num in nums[::-1]:
ans[sumfreqs[num] - 1] = num
sumfreqs[num] -= 1
return ans | 7bfdd8db0f1b177d4e6d3749ef10b997c0c07f81 | 702,263 |
def make_dict(data_for_dict):
""" takes all text from nubbe list and makes a dictionary.
data_for_dict: nubbe list created with parse_file()
"""
column_name_list = data_for_dict[0]
db_list = data_for_dict[1:]
column_list1 = []
column_list2 = []
column_list3 = []
column_list4 = []
column_list5 = []
column_list6 = []
column_list7 = []
column_list8 = []
column_list9 = []
column_list10 = []
column_list11 = []
nubbe_dict = {}
for line in db_list:
my_string1 = ''
my_string2 = ''
my_string3 = ''
my_string4 = ''
my_string5 = ''
my_string6 = ''
my_string7 = ''
my_string8 = ''
my_string9 = ''
my_string10 = ''
my_string11 = ''
my_string1 = line[0]
column_list1 += [my_string1]
my_string2 += line[1]
column_list2 += [my_string2]
my_string3 += line[2]
column_list3 += [my_string3]
my_string4 += line[3]
column_list4 += [my_string4]
my_string5 += line[4]
column_list5 += [my_string5]
my_string6 += line[5]
column_list6 += [my_string6]
my_string7 += line[6]
column_list7 += [my_string7]
my_string8 += line[7]
column_list8 += [my_string8]
my_string9 += line[8]
column_list9 += [my_string9]
my_string10 += line[9]
column_list10 += [my_string10]
my_string11 += line[10]
column_list11 += [my_string11]
nubbe_dict[column_name_list[0]] = column_list1
nubbe_dict[column_name_list[1]] = column_list2
nubbe_dict[column_name_list[2]] = column_list3
nubbe_dict[column_name_list[3]] = column_list4
nubbe_dict[column_name_list[4]] = column_list5
nubbe_dict[column_name_list[5]] = column_list6
nubbe_dict[column_name_list[6]] = column_list7
nubbe_dict[column_name_list[7]] = column_list8
nubbe_dict[column_name_list[8]] = column_list9
nubbe_dict[column_name_list[9]] = column_list10
nubbe_dict[column_name_list[10]] = column_list11
return (nubbe_dict) | cb4725d0d23a33ba9e0b699032478dd1e54940b2 | 702,264 |
def get_centers(bins):
"""Return the center of the provided bins.
Example:
>>> get_centers(bins=np.array([0.0, 1.0, 2.0]))
array([0.5, 1.5])
"""
bins = bins.astype(float)
return (bins[:-1] + bins[1:]) / 2 | 4f5b3454e1ef718302c7e5ea204954d498ca9e10 | 702,265 |
def hex_16bit(value):
"""Converts 16bit value into bytearray.
args:
16bit value
returns:
bytearray of size 2
"""
if value > 0xffff or value < 0:
raise Exception('Sar file 16bit value %s out of range' % value)
return value.to_bytes(2, 'little') | 1c5aab076798b40459bf5afab73fd92e8dbb93a1 | 702,266 |
import os
def ensure_path( path, inc_file=False ):
"""
extension: ensures that the input path exists, creating directories as needed
input: string (path/to/file), Boolean (see notes)
output: None
notes: if inc_file is True the last element of path is assumed to be a file
this file is created if it does not exist and is unaltered otherwise
"""
path_list = path.split( os.path.sep )
#if path starts '/', do so for curpath
curpath = '' if path_list[0] != '' else os.path.sep
for folder in path_list[:-1]:
curpath = os.path.join( curpath, folder )
if not os.path.isdir( curpath ):
os.mkdir( curpath )
curpath = os.path.join( curpath, path_list[-1] )
if inc_file is False:
if not os.path.isdir( curpath ):
os.mkdir( curpath )
else:
if not os.path.isfile( curpath ):
with open( curpath, 'a' ):
pass
return None | 30868d42ade7acc952d83b2bd208b5e9489c92f7 | 702,267 |
import copy
import json
import hashlib
def generate_key(dict_data, daily=True):
"""generate key from a dictionary"""
cache_dict = copy.copy(dict_data)
json_data = json.dumps(cache_dict)
return hashlib.md5(json_data.encode('utf-8')).hexdigest() | dee5f0519fccc89353be7fafe006a5280c535261 | 702,268 |
def count_attrib_correct(pred, label, idx):
"""
:param pred:
:param label:
:param idx:
:return:
"""
assert pred.size(0) == label.size(0)
correct_num = 0
for one, two in zip(pred, label):
if one[idx] == two[idx]:
correct_num += 1
return correct_num | 5ad4b4191f99e379bfec14d2fd2fd954273e97f7 | 702,269 |
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result | e96e7fbd074115a4291cc495c0251d1083f4592e | 702,270 |
import torch
def reduce(tensor: torch.Tensor, reduction: str) -> torch.Tensor:
"""Reduces the given tensor using a specific criterion.
Args:
tensor (torch.Tensor): input tensor
reduction (str): string with fixed values [elementwise_mean, none, sum]
Raises:
ValueError: when the reduction is not supported
Returns:
torch.Tensor: reduced tensor, or the tensor itself
"""
if reduction in ("elementwise_mean", "mean"):
return torch.mean(tensor)
if reduction == 'sum':
return torch.sum(tensor)
if reduction is None or reduction == 'none':
return tensor
raise ValueError('Reduction parameter unknown.') | a77edd7f9a8486a8fd604b9a35c2ecfe28d43c8c | 702,271 |
def safe_read_tensor_value(variable):
"""Reads variable value or raises an exception."""
value = variable.tensor_value
if value is None:
raise ValueError("".join((
"Attempted to read a TensorVariable in a context where it has no ",
"value. This commonly happens for one of two reasons:",
"",
" 1) You created a model in one transformed function and directly",
" accessed the model variables (e.g. via `model.variables` or"
" `model.w`) inside another transformed function.",
" 2) You are trying to read a model variable outside of a",
" transformed function.",
"",
"For (1) you can safely do this if you do not read the value of the",
"variable (e.g. you just use metadata like `v.shape` or `v.dtype`).",
"If you want to read the value of the variable then you must pass in",
"the value (e.g. pass the result of `f.init(..)`).",
"",
"For (2) to read variable values inspect the result of a transformed",
"function (e.g. look at the `params` dictionary returned from ",
"`f.init(..)`).")))
return value | a9242ed8913e16cf13eb4bf6dfc1625c3e93723b | 702,272 |
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
# MY_CODE
cow_dict = {}
with open(filename, 'r') as f:
for line in f:
name, weight = line.split(',')
cow_dict[name] = int(weight)
return cow_dict | aa44df075a4aa8d44d37743b8a351ef57133d148 | 702,273 |
def wrap_hashlib(hasher, length=None):
"""
Wraps hashlib's functions, returning a function that returns the hex-digest of its input.
>>> from hashlib import sha1
>>> wrap_hashlib(sha1)(b'heyo')
'f8bb1031d6d82b30817a872b8a2ec31d5380cee5'
:param hasher: A function from :mod:`hashlib`
:return: Function
"""
args = []
if length is not None:
args = [length]
def _hasher(data):
return hasher(data).hexdigest(*args)
return _hasher | dbd07d4151a5c5c523fe75c3f29b72abfd15c3b8 | 702,274 |
import re
def isBlank(s):
""" Returns True if string contains only space characters."""
return bool(re.compile("^\s*$").match(s)) | 1e6f7f7cefa4fea3d5b7443d74a265a79c3db3d7 | 702,275 |
def __extract_tzd(m):
"""Return the Time Zone Designator as an offset in seconds from UTC."""
if not m:
return 0
tzd = m.group("tzd")
if not tzd:
return 0
if tzd == "Z":
return 0
hours = int(m.group("tzdhours"), 10)
minutes = m.group("tzdminutes")
if minutes:
minutes = int(minutes, 10)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == "+":
return -offset
return offset | 5e786cab67a2151df8ed8851dc19a6adbc365aea | 702,276 |
def print_errors_result(result, verbose=False):
"""
"""
spacers = 4
if verbose:
print(f"\nTotal Stats\n{spacers*' '}max. err.: {result[0]:6.3f};")
print(f"{(spacers-2)*' '}c-avg. err.: {result[1]:6.3f};")
print(f"{(spacers-2)*' '}t-avg. err.: {result[2]:6.3f}")
return None | 83fadbb6fe977c262a822d7217f0103cd7f9ecde | 702,277 |
def compute_heuristic_conn_4(init_pos, coord):
"""Returns Manhattan heuristic for distance from coord to init_pos
init_pos - coordinate of position of goal configuration
coord - coordinate of configuration for which heursitic is
being computed
Returns the heuristic distance to goal through a
Manhattan metric calculation.
"""
return sum(map(lambda x, y: abs(x - y), coord, init_pos)) | 873fcbad5ebadcb8d0f0009c6d3bb615146bab5a | 702,279 |
def cancel_and_stop_intent_handler(handler_input):
"""Single handler for Cancel and Stop Intent."""
speech_text = "OK Mate calm down."
attr = handler_input.attributes_manager.session_attributes
attr['readShows'] = False
attr['readMovies'] = False
attr['readBoth'] = False
attr['active_request'] = ''
attr['repeat'] = ''
attr['movie'] = {}
attr['show'] = {}
attr['readBoxOffice'] = False
handler_input.response_builder.speak(speech_text).set_should_end_session(True)
return handler_input.response_builder.response | c31bdbdb7810e0afd71e073c4bbc6f6486294782 | 702,280 |
def frequency(text, char):
""" Counts frequency of a character in a string. """
count = 0
for c in text:
if c == char:
count += 1
return count | 5a58161f6aed1f8ba88ed6490891b544b23449cd | 702,281 |
def get_latest_tle_from_restapi(number):
"""
:param number: get the latest TLE from Restful API
:return: a TLE
"""
return "see restful_client.py" | ac6c5e908bea386a12427ac07cd0249cf1b6f7e7 | 702,282 |
import time
def measure_command(func, kwargs):
""" Measures the execution time of a function
:param func: function
:param kwargs: dict
keyword arguments
:return: float, result
(time, result of fucntion)
"""
time_start = time.time()
r = func(**kwargs)
dt = time.time() - time_start
return dt, r | 33ca8627681b3f32d8d39fc088175a6a38d51097 | 702,283 |
import os
import zipfile
def in_pyz():
""" Determine if running in pyz archive """
pyz_file = os.path.abspath(os.path.dirname(__file__))
if zipfile.is_zipfile(pyz_file):
return True
else:
return False | 0f9eae66abeec4f6f916cd037e16c743b0f3a77b | 702,285 |
import inspect
from functools import partial
def cli_handle_exception(exception_handler, exception_classes):
"""
deal with exception stack info, and then just throw the wrapper exception by cli
>>> def cli_handler1(ex):
... assert False, 'Need root permission'
>>> @cli_handle_exception(cli_handler1, ZeroDivisionError)
... def f():
... 1/0
>>> f()
AssertionError Traceback (most recent call last)
<ipython-input-9-c43e34e6d405> in <module>()
----> 1 f()
<ipython-input-7-1220a5dc628a> in newfunc(exception_chain, *args, **kwargs)
25 return result
26
---> 27 exception_handler(raised_exception)
28
29 return partial(newfunc, exception_chain)
<ipython-input-4-7313ecff13f5> in cli_handler1(ex)
1 def cli_handler1(ex):
----> 2 assert False, 'Need root permission'
AssertionError: Need root permission
"""
def wrapper(f):
nonlocal exception_classes
if inspect.isclass(exception_classes):
exception_classes = [exception_classes]
exception_chain = list(exception_classes)
exception_chain.reverse() # for recursive invokation
def newfunc(exception_chain, *args, **kwargs): # recursion
exception_class = exception_chain[0]
raised_exception = None
try:
if len(exception_chain) == 1:
result = f(*args, **kwargs)
else:
result = newfunc(exception_chain[1:], *args, **kwargs) # spread exception
except exception_class as ex:
raised_exception = ex # goto next exception_handler to clean old exception stack.
else:
return result
exception_handler(raised_exception)
return partial(newfunc, exception_chain)
return wrapper | 2cfa6a074de1c55142a4f8f2d71ed08a66ec726a | 702,286 |
def frequency_sort(items):
"""
Return rating by frequency
"""
if sorted(items) == list(set(items)):
return items
elif len(set([items.count(x) for x in items])) == 1:
return sorted(items)
else:
return sorted(
sorted(items, reverse=True), key=lambda k: (items.count(k)), reverse=True
) | e782c137c5efececd46db6d9ac013790e943d8c8 | 702,287 |
def templatepartsmap(spec, t, partnames):
"""Create a mapping of {part: ref}"""
partsmap = {spec.ref: spec.ref} # initial ref must exist in t
if spec.mapfile:
partsmap.update((p, p) for p in partnames if p in t)
elif spec.ref:
for part in partnames:
ref = '%s:%s' % (spec.ref, part) # select config sub-section
if ref in t:
partsmap[part] = ref
return partsmap | 22ea9a0d474d832cf427568ee2e04adf1e00e0d4 | 702,288 |
import numpy
def non_nan_values_per_image(input_data):
"""Create boolean mask to index nans in original data,
assuming all samples in the dataset have nans at the same position"""
boolean_nan_idx = numpy.isnan(input_data[:, 0, 0])
count = numpy.count_nonzero(numpy.isnan(input_data[:, 0, 0]))
non_nan_values = int(len(input_data)) - int(count)
return non_nan_values, boolean_nan_idx | e963193bd4d5fcfcefb06b1e3687010ecf851745 | 702,289 |
def pozice_od_hrace():
"""Fuknce pozice od hrace se zeptá uživatele kam chce umístít svůj znak,
ověří, že se jedná o číslo a pozici vrátí"""
while True:
try:
cisloPolickaHrace = int(input("Na kolikáté místo v herním poli chceš umístit tvůj znak \"x\"? "))-1
except ValueError:
print("To není číslo")
else:
return cisloPolickaHrace | 4faede9c2e3357070eb0ab218e970a1788610dbb | 702,290 |
import os
def _fixpath(p):
"""Apply tilde expansion and absolutization to a path."""
return os.path.abspath(os.path.expanduser(p)) | 662eaea19625d11e0d88b486133ead928ee57c48 | 702,292 |
import math
def find_dice_medial(dice):
"""Средний бросок кости в формате навроде 1d6, 2d6, 1d12.
"""
dice_list = dice.split('d')
medial_number = math.floor(int(dice_list[0]) * (int(dice_list[1]) / 2))
return medial_number | 22af27cb4d3a06fe9b1c6af345f4deb4df46af23 | 702,293 |
def largest_number(seq_seq):
"""
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
largest = 0
for k in range(len(seq_seq)):
for j in range(len(seq_seq[k])):
if seq_seq[k][j] > largest or -seq_seq[k][j] > largest:
largest = seq_seq[k][j]
if largest != 0:
return largest | 5a2418e1f8ee0413e8306a04d3ee17a909b7b0c3 | 702,295 |
import argparse
import sys
import os
def _parse_args(cmdl):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--inbam",
"-i",
help="input BAM (/dev/stdin)",
type=argparse.FileType("r"),
default=sys.stdin,
)
parser.add_argument(
"--outbam",
"-o",
help="output BAM (/dev/null)",
type=argparse.FileType("w"),
default=os.devnull,
)
parser.add_argument(
"--stats",
"-s",
help="output stats (/dev/stderr)",
type=argparse.FileType("w"),
default=sys.stderr,
)
parser.add_argument("--rmdup", "-r", help="remove duplicates", action="store_true")
parser.add_argument(
"--aln_wiggle",
"-a",
help="bp alignment wiggle at ends (2)",
type=int,
default=2,
)
parser.add_argument(
"--len_wiggle",
"-l",
help="fraction read length wiggle (0.01)",
type=int,
default=0.01,
)
parser.add_argument(
"--dup_list",
"-d",
help="output duplicate read names",
type=argparse.FileType("w"),
)
return parser.parse_args(cmdl) | dd83c05c56041082df6794592216b88e77333440 | 702,296 |
def __unwrap_nonsense_request(request):
"""
Unwrap the given "estimate nonsense" request into a string.
Args:
request: A JSON-like dict describing an "estimate nonsense" request
(as described on https://clusterdocs.azurewebsites.net/)
Returns: A string that represents the sentence of which to estimate the offensiveness
"""
# request = json.loads(request)
sentence = request["sentence"]
return sentence | 793b5b352db1edd31e537e39bd7ac0c3f62e0fc0 | 702,297 |
def _clean(target_str: str, is_cellref: bool = False) -> str:
"""Rids a string of its most common problems: spacing, capitalisation,etc."""
try:
output_str = target_str.lstrip().rstrip()
except AttributeError:
raise AttributeError("Cannot clean value other than a string here.")
if is_cellref:
output_str = output_str.upper()
return output_str | 778658332059679356c399c7bb5b0c66383650d3 | 702,298 |
def get_values(record, tag):
"""Gets values that matches |tag| from |record|."""
keys = [key for key in record.keys() if key[0] == tag]
return [record[k] for k in sorted(keys)] | 7b75e300cbdb5c1840681c78af9adc4dc1f21838 | 702,299 |
from typing import List
def expected_value(values: List[float]) -> float:
"""Return the expected value of the input list
>>> expected_value([1, 2, 3])
2.0
"""
return sum(values) / len(values) | b856157d21bd8a82813bfb8ae39c4c5a1f3aef53 | 702,300 |
def add_layers_to_end_of_conn_mat(conn_mat, num_add_layers):
""" Adds layers with no edges and returns. """
new_num_layers = conn_mat.shape[0] + num_add_layers
conn_mat.resize((new_num_layers, new_num_layers))
return conn_mat | 4fb327f5b63b6c38ed0efa77d5247ae81113c7d3 | 702,301 |
import select
import sys
def is_input():
"""
Utility to check if there is input available.
Returns
-------
is_input: ``bool``
``True`` if there is data in sys.stdin
"""
return select.select([sys.stdin], [], [], 1) == ([sys.stdin], [], []) | 0d0194f72686bd839181a74eaa76d3baacbbd9ba | 702,302 |
def make_t0(df):
"""
Make "timepoint 0" data for each condition by copying the group of uninfected cells from the lowest
moi and timepoint, for each moi and strain,
Receive and return dataframe.
"""
# get the minimal timepoint and moi in the experiment
timepoints = sorted(df['timepoint'].unique())
mois = sorted(df['moi'].unique())
# copy all uninfected cells of the lowest moi and timepoint to a new dataframe t0_df
t0_df = df.loc[(df['infection'] == 0) & (df['timepoint'] == min(timepoints)) & (df['moi'] == min(mois))].copy(
deep=True)
# define them as timepoint 0
t0_df['timepoint'] = 0
# copy the "uninfected cells" dataframe for each condition, so it acts as the "timepoint 0" of each condition
for moi in mois:
t0_df['moi'] = moi
t0_df['condition'] = 'HA' + str(moi)
df = df.append(t0_df)
return df | 0b31f295f5053c211f296a5e865d14f6f32d4a5a | 702,303 |
import requests
def getAddressByAmp(lnglat):
"""
逆地理编码,通过高德地图api
:param lnglat:
:return:
"""
key = "efe4e9291a4a665ff691c55e3a3b871d"
url = "https://restapi.amap.com/v3/geocode/regeo?"
params = {
"key": key,
"location": lnglat
}
headers = {
"Content-type": "application/json",
'Upgrade': 'HTTP/1.1'
}
try:
ret = requests.get(url=url, headers=headers, params=params, timeout=1).text
except requests.exceptions.ConnectionError as e:
raise e
except Exception as e:
raise e
ret = eval(ret)
infocode = ret['infocode']
if infocode == '10000':
info = ret["regeocode"]
district = info["addressComponent"]["district"]
adcode = info["addressComponent"]["adcode"]
formatted_address = info["formatted_address"]
data_from = "gaode"
else:
district = '[g]获取失败 原因: ' + infocode
formatted_address = '[g]获取失败 原因: ' + infocode
data_from = "gaode"
adcode = 0
content = {
'status': infocode,
"district": district,
"formatted_address": formatted_address,
"data_from": data_from,
"adcode": adcode
}
return content | c25d70d34b69890780a7514791919c2fb78c21b5 | 702,304 |
def color_name(data, bits):
"""Color names in #RRGGBB format, given the number of bits for each component."""
ret = ["#"]
for i in range(3):
ret.append("%02X" % (data[i] << (8 - bits[i])))
return ''.join(ret) | 623ba9759f3dec88c60db2b120cdf151008e55b6 | 702,307 |
def is_valid_gadget(gadget, bad_chars):
"""Determine if a gadget is valid (i.e., contains no bad characters).
Args:
gadget (Gadget): A namedtuple-like object with `shellcode` and `asm`
fields.
bad_chars (bytearray): The bad characters not allowed to be present.
Returns:
bool: Whether the specified gadget is acceptable.
"""
gadget_chars = bytearray(gadget.shellcode)
for bc in bad_chars:
if bc in gadget_chars:
return False
return True | 00189e08120e377ec873aa4267f3240d72943966 | 702,308 |
def month(dt):
""" For a given datetime, return the matching first-day-of-month date. """
return dt.date().replace(day=1) | 480fcdfd7a69f95aa071e2061efc4740802d72d6 | 702,309 |
def count_frequency(df, col):
"""Count the number of occurence value in a column."""
df['Freq'] = df.groupby(col)[col].transform('count')
return df | 28f502d79bacaba474c6df8b642f8e3f7875d1f3 | 702,310 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.