content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_wikipedia_pages_by_list(titles_or_page_ids):
"""
Get Wikipedia pages using list of titles or page ids.
@param titles_or_page_ids: List of titles or page ids.
@return: List of pages.
>>> titles_or_page_ids = 'Aromatics_byggnad'
>>> pages = get_wikipedia_pages_by_list(titles_or_page_ids)
>>> pages[0]['pageid']
4868947
>>> titles_or_page_ids = ['Aromatics_byggnad']
>>> pages = get_wikipedia_pages_by_list(titles_or_page_ids)
>>> pages[0]['pageid']
4868947
>>> titles_or_page_ids = ['Dragontorpet Abrahamsberg', 'Farfadern']
>>> pages = get_wikipedia_pages_by_list(titles_or_page_ids)
>>> pages[0]['pageid']
3879445
>>> titles_or_page_ids = [1160607, 3879445]
>>> pages = get_wikipedia_pages_by_list(titles_or_page_ids)
>>> pages[0]['pageid']
3879445
"""
# Function for splitting a list into smaller lists, see
# http://stackoverflow.com/questions/752308/split-list-into-smaller-lists
split_list = lambda l, n=WIKIPEDIA_REQUEST_MAX_PAGES: [l[:]] if len(l) <= n else [l[i:i+n] for i in range(0, len(l), n)]
if isinstance(titles_or_page_ids, str):
titles_or_page_ids = [titles_or_page_ids]
titles_or_page_ids = split_list(titles_or_page_ids, WIKIPEDIA_REQUEST_MAX_PAGES)
pages = []
for values in titles_or_page_ids:
if all([isinstance(v, str) for v in values]):
results = get_wikipedia_page('titles', '|'.join(values))
else:
results = get_wikipedia_page('pageids', '|'.join(map(str, values)))
pages.extend(results['query']['pages'].values())
return pages
# TODO: What about 'continue'... | 4,100 |
def probit(s: pd.Series, error: str = "warn") -> pd.Series:
"""
Transforms the Series via the inverse CDF of the Normal distribution.
Each value in the series should be between 0 and 1. Use `error` to
control the behavior if any series entries are outside of (0, 1).
>>> import pandas as pd
>>> import janitor
>>> s = pd.Series([0.1, 0.5, 0.8], name="numbers")
>>> s.probit()
0 -1.281552
1 0.000000
2 0.841621
dtype: float64
:param s: Input Series.
:param error: Determines behavior when `s` is outside of `(0, 1)`.
If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then
a `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan`
is returned for the problematic entries; defaults to `'warn'`.
:raises RuntimeError: Raised when there are problematic values
in the Series and `error='raise'`.
:return: Transformed Series
"""
s = s.copy()
outside_support = (s <= 0) | (s >= 1)
if (outside_support).any():
msg = f"{outside_support.sum()} value(s) are outside of (0, 1)"
if error.lower() == "warn":
warnings.warn(msg, RuntimeWarning)
if error.lower() == "raise":
raise RuntimeError(msg)
else:
pass
s[outside_support] = np.nan
with np.errstate(all="ignore"):
out = pd.Series(norm.ppf(s), index=s.index)
return out | 4,101 |
def make_poem(token_nums, df, new_rowi):
"""
should return a series to be put at the end of the dataframe
Having a list in a df cell is apparently a pain so words are joined with "_"
"""
print(token_nums)
words = df.iloc[token_nums,0].to_list()
words_out = []
for word in words:
print(word)
if "_" in word:
j = word.rsplit("_")
print(j)
words_out = words_out + j
else:
words_out.append(word)
print(words_out)
tts.make_svg(words_out, str(new_rowi))
file_hash = tts.ipfs_upload(f"pics/{new_rowi}.svg")
tts.pin(file_hash, f"{new_rowi}.svg")
ser = pd.DataFrame({"text": "_".join(words_out), "numbers": new_rowi, "ipfs": file_hash, "metadata": f"metadata/meta-{new_rowi}.json",
"policy_id" :"<policyID>", "sold" : 1, "pic" : f"pics/{new_rowi}.svg"}, index = [new_rowi])
d = tts.build_ingredient_dict("<policyID>", words_out, new_rowi, f"{file_hash}")
with open(f"metadata/meta-{new_rowi}.json", 'w') as f:
json.dump(d, f)
return ser | 4,102 |
def merge_multilinestrings(network):
"""Try to merge all multilinestring geometries into linestring geometries.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
edges = network.edges.copy()
edges['geometry']= edges.geometry.apply(lambda x: merge_multilinestring(x))
return Network(edges=edges,
nodes=network.nodes) | 4,103 |
def _read_yaml_definition(uarchdefs, path):
"""
:param uarchdefs:
:param path:
"""
uarchdef = read_yaml(os.path.join(path, "microarchitecture.yaml"), SCHEMA)
uarchdef["Path"] = path
uarchdefs.append(uarchdef)
_read_uarch_extensions(uarchdefs, path)
baseuarch = read_yaml(DEFAULT_UARCH, SCHEMA)
baseuarch["Path"] = DEFAULT_UARCH
uarchdefs.append(baseuarch)
complete_uarchdef = {}
uarchdefs.reverse()
for uarchdef in uarchdefs:
for key, val in uarchdef.items():
if not isinstance(val, dict):
complete_uarchdef[key] = uarchdef[key]
else:
override = val.get("Override", False)
if key not in complete_uarchdef:
complete_uarchdef[key] = {}
for key2 in val:
if key2 in ["YAML", "Modules", "Path"]:
if key2 not in complete_uarchdef[key]:
complete_uarchdef[key][key2] = []
if os.path.isabs(val[key2]):
if override:
complete_uarchdef[key][key2] = [val[key2]]
else:
complete_uarchdef[key][key2].append(val[key2])
else:
if override:
complete_uarchdef[key][key2] = [
os.path.join(
uarchdef["Path"], val[key2]
)
]
else:
complete_uarchdef[key][key2].append(
os.path.join(
uarchdef["Path"], val[key2]
)
)
elif key2 == "Module":
if val[key2].startswith("microprobe"):
val[key2] = os.path.join(
os.path.dirname(__file__), "..", "..", "..",
val[key2]
)
if os.path.isabs(val[key2]):
complete_uarchdef[key][key2] = val[key2]
else:
complete_uarchdef[key][key2] = os.path.join(
uarchdef["Path"], val[key2]
)
else:
complete_uarchdef[key][key2] = val[key2]
return complete_uarchdef | 4,104 |
def timestamp_old ():
""" store timestamp field """
timestamp = {}
timestamp['timestamp'] = False
try:
today = datetime.datetime.now()
# print('Timestamp: {:%Y-%m-%d %H:%M:%S}'.format(today))
timestamp['timestamp'] = "{:%Y-%m-%d %H:%M:%S}".format(today)
except Exception as e:
print ("Failure in getting time:", e)
return timestamp | 4,105 |
def download_model(model: str, saving_directory: str = None) -> str:
"""
Function that loads pretrained models from AWS.
:param model: Name of the model to be loaded.
:param saving_directory: RELATIVE path to the saving folder (must end with /).
Return:
- Path to model checkpoint.
"""
if saving_directory is None:
saving_directory = get_cache_folder()
if not saving_directory.endswith("/"):
saving_directory += "/"
if not os.path.exists(saving_directory):
os.makedirs(saving_directory)
if os.path.isdir(saving_directory + model):
logger.info(f"{model} is already in cache.")
if not model.endswith("/"):
model += "/"
elif model not in available_metrics.keys():
raise Exception(
f"{model} is not in the `availale_metrics` or is a valid checkpoint folder."
)
elif available_metrics[model].startswith("https://"):
download_file_maybe_extract(
available_metrics[model], directory=saving_directory
)
else:
raise Exception("Invalid model name!")
# CLEAN Cache
if os.path.exists(saving_directory + model + ".zip"):
os.remove(saving_directory + model + ".zip")
if os.path.exists(saving_directory + model + ".tar.gz"):
os.remove(saving_directory + model + ".tar.gz")
if os.path.exists(saving_directory + model + ".tar"):
os.remove(saving_directory + model + ".tar")
checkpoints_folder = saving_directory + model + "/checkpoints"
checkpoints = [
file for file in os.listdir(checkpoints_folder) if file.endswith(".ckpt")
]
checkpoint = checkpoints[-1]
checkpoint_path = checkpoints_folder + "/" + checkpoint
return checkpoint_path | 4,106 |
def get_series(currency_id: str, interval: str) -> pd.DataFrame:
""" Get the time series for the given currency_id. Timestamps and dates are given in UTC time. """
url = f"https://api.coincap.io/v2/assets/{currency_id}/history"
js = request_and_jsonize_calm(url, params={'interval': interval})
times, prices, dates = [], [], []
for measurement in js['data']:
timestamp_seconds = float(measurement['time']) // 1000
times.append(timestamp_seconds) # Timestamp is in milliseconds
prices.append(float(measurement['priceUsd']))
dates.append(datetime.fromtimestamp(timestamp_seconds))
df = pd.DataFrame(
{
'date': dates,
'time': times,
'price': prices
}
)
return df | 4,107 |
def write_interpolated_6hrly(data,z,file_name=None):
"""docstring for write_interpolated_6hrly"""
outputq_file="interpolated_{}.nc"
if file_name!=None:
outputq_file="{}_"+file_name
for k in data.keys():
if k!="z":
mygis.write(outputq_file.format(k),data[k],varname=k) | 4,108 |
def formatTitle(title):
"""
The formatTitle function formats titles extracted from the scraped HTML code.
"""
title = html.unescape(title)
if(len(title) > 40):
return title[:40] + "..."
return title | 4,109 |
def isPalindrome(x):
"""
:type x: int
:rtype: bool
"""
def sub_judge(start, end, string):
if start >= end:
return True
if string[start] == string[end]:
return sub_judge(start + 1, end - 1, string)
else:
return False
return sub_judge(0, len(str(x)) - 1, str(x)) | 4,110 |
def count_partitions(n, m):
"""Count the partitions of n using parts up to size m.
>>> count_partitions(6, 4)
9
>>> count_partitions(10, 10)
42
"""
if n == 0:
return 1
elif n < 0:
return 0
elif m == 0:
return 0
else:
with_m = count_partitions(n-m, m)
without_m = count_partitions(n, m-1)
return with_m + without_m | 4,111 |
def parse_pairs(string):
"""
Converts string where are data wrote using such method:
Key: Value
To dictionary where "Key" is key and "Value" is value. If there's newline, space and dot or text - that must be
added to previous value.
:param string: string that contains data to convert
:return:
:raises Exception
"""
pairs = {}
last_key = None
for line in string.split('\n'):
# If line is continuing of previous value - add it
if re.match('( [^\n]+| \\.)', line) is not None:
pairs[last_key] += '\n' + line
else:
# Regexp passes:
# Key: Value
# abc: DEF
# Won't pass:
# a adn dsj jsd dsi ads pf
match = re.match('([^:]+): ([^\n]+)', line)
if match is not None:
pairs.update({match.group(1): match.group(2)})
last_key = match.group(1)
elif not re.match('\\s+|', line):
raise IllegalFormatException("Line\n%s\nDoesn't match patterns "
"\"([^:]+): ([^\\n]+) and \"( [^\\n]+| \\.)\"!" % line)
return pairs | 4,112 |
def check_yum_package(package_name, logger):
"""
check if a yum package is installed
:param package_name: name to be checked
:param logger: rs log obj
:return: boolean
"""
logger.trace("Checking if package '{}' is installed.", package_name)
command = "yum list installed {}".format(package_name)
try:
execute_in_bash(command, logger)
except:
logger.trace("Package '{}' is not installed.", package_name)
return False
logger.trace("Package '{}' is already installed.", package_name)
return True | 4,113 |
def get(filename, name):
"""
Read a given element from an SVG file
"""
root = etree.parse(filename).getroot()
return root.xpath("//*[@id='%s']" % name)[0].get("d") | 4,114 |
def softmaxCostAndGradient(predicted, target, outputVectors, dataset):
""" Softmax cost function for word2vec models
Implement the cost and gradients for one predicted word vector
and one target word vector as a building block for word2vec
models, assuming the softmax prediction function and cross
entropy loss.
Arguments:
predicted -- numpy ndarray, predicted word vector (\hat{v} in
the written component)
target -- integer, the index of the target word
outputVectors -- "output" vectors (as rows) for all tokens
dataset -- needed for negative sampling, unused here.
Return:
cost -- cross entropy cost for the softmax word prediction
gradPred -- the gradient with respect to the predicted word
vector
grad -- the gradient with respect to all the other word
vectors
We will not provide starter code for this function, but feel
free to reference the code you previously wrote for this
assignment!
"""
### YOUR CODE HERE
# print("+++++++++++++++++++++ softmaxCostAndGradient +++++++++++++++++++++++")
# print("The shape of predicted(v_c) is {}, which means each word is presented by {} dims.".format(predicted.shape, predicted.shape[0]))
# print("target(o)'s type is {}, and it's value is {},the u_o now is u_target.".format(type(target), target))
# print("The shape of outputVectors(u_w) is {}, which means we have {} words.".format(outputVectors.shape, outputVectors.shape[0]))
y_hat = softmax(np.matmul(outputVectors, predicted))
# print("y_hat is{}.".format(y_hat))
# print("Then we should minus 1 at the location at {}".format(target+1))
cost = -np.log(y_hat[target])
y_hat[target] = y_hat[target] - 1
dy = y_hat.copy()
# print("so we can get the dy:{}".format(y_hat))
# print("To get the gradPred, according to the wirte solution what we should know the shapes of dy{} and outputVectors{}".
# format(dy.shape, outputVectors.shape))
gradPred = np.matmul(dy.T, outputVectors)
# print("we can get the gradPred easily in shape{}".format(gradPred.shape))
# print("To get the grad, according to the wirte solution what we should know the shapes of dy{} and predicted{}".
# format(dy.shape, predicted.shape))
grad = np.outer(dy, predicted)
# print("we can get the grad easily in shape{}".format(grad.shape))
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
### END YOUR CODE
return cost, gradPred, grad | 4,115 |
def get_osf_meta_schemas():
"""Returns the current contents of all known schema files."""
schemas = [
ensure_schema_structure(from_json(json_filename))
for json_filename in OSF_META_SCHEMA_FILES
]
return schemas | 4,116 |
def test_stop_after_success():
"""
after resolving a cep the next provider groups should not be used
"""
working_provider, mock_cepaddress = create_mock_provider()
next_provider, _ = create_mock_provider()
cep_resolver = CEPResolver(providers=[{working_provider}, {next_provider}])
assert cep_resolver("11111000") == mock_cepaddress
next_provider.assert_not_called() | 4,117 |
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the configured Numato USB GPIO switch ports."""
if discovery_info is None:
return
api = hass.data[DOMAIN][DATA_API]
switches = []
devices = hass.data[DOMAIN][CONF_DEVICES]
for device in [d for d in devices if CONF_SWITCHES in d]:
device_id = device[CONF_ID]
platform = device[CONF_SWITCHES]
invert_logic = platform[CONF_INVERT_LOGIC]
ports = platform[CONF_PORTS]
for port, port_name in ports.items():
try:
api.setup_output(device_id, port)
api.write_output(device_id, port, 1 if invert_logic else 0)
except NumatoGpioError as err:
_LOGGER.error(
"Failed to initialize switch '%s' on Numato device %s port %s: %s",
port_name,
device_id,
port,
err,
)
continue
switches.append(
NumatoGpioSwitch(
port_name,
device_id,
port,
invert_logic,
api,
)
)
add_entities(switches, True) | 4,118 |
def update(request, bleep_id):
"""
Process a bleep form update
"""
if request.method == 'POST':
form = BleepForm(request.POST)
if form.is_valid():
# Process and clean the data
# ...
# update the form with current bleep data
b = Bleep.objects.get(pk=bleep_id)
form = BleepForm(request.POST, instance=b)
form.save()
return HttpResponseRedirect('/bleeps/'+bleep_id)
else:
form = BleepForm() # Create an unbound form
return render_to_response('bleep/form.html', {
'form': form,
'content_form': CommentForm()}, context_instance=RequestContext(request)) | 4,119 |
def split_passports(file: Path) -> Iterator[Mapping]:
"""
Split a given passport file
:param file: passport file to process
:return: generator yielding a list of fields
"""
passport = {}
for line in open(file):
if line == '\n':
yield passport
passport = {}
continue
fields = line.strip().split(' ')
passport.update({k:v for k, v in [f.split(':') for f in fields]})
yield passport | 4,120 |
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
header = str(header)
if not ecre.search(header):
return [(header, None)]
else:
decoded = []
dec = ''
for line in header.splitlines():
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
if decoded and decoded[-1][1] is None:
decoded[-1] = (
decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [ s.lower() for s in parts[0:2] ]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
paderr = len(encoded) % 4
if paderr:
encoded += '==='[:4 - paderr]
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (
decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded | 4,121 |
def user_from_identity():
"""Returns the User model object of the current jwt identity"""
username = get_jwt_identity()
return User.query.filter(User.username == username).scalar() | 4,122 |
def test_write_output_file():
""" Test writing an output file """
npdf = 40
nbins = 21
pz_pdf = np.random.uniform(size=(npdf, nbins))
zgrid = np.linspace(0, 4, nbins)
zmode = zgrid[np.argmax(pz_pdf, axis=1)]
data_dict = dict(zmode=zmode, pz_pdf=pz_pdf)
group, outf = io.initializeHdf5Write(test_outfile, 'data', photoz_mode=((npdf,), 'f4'), photoz_pdf=((npdf, nbins), 'f4'))
io.writeDictToHdf5Chunk(group, data_dict, 0, npdf, zmode='photoz_mode', pz_pdf='photoz_pdf')
io.finalizeHdf5Write(outf, 'md', zgrid=zgrid)
os.unlink(test_outfile) | 4,123 |
async def test_check_segment_or_target(
data_type, defined, missing, used, sequence_id, dbi
):
"""
Test that issues with `segment` or `target` fields in sequence editing requests are
detected.
"""
await dbi.otus.insert_one({"_id": "foo", "schema": [{"name": "RNA1"}]})
await dbi.references.insert_one(
{"_id": "bar", "data_type": data_type, "targets": [{"name": "CPN60"}]}
)
await dbi.sequences.insert_one(
{
"_id": "boo",
"otu_id": "foo",
"isolate_id": "baz",
"target": "CPN60" if used else "ITS2",
}
)
data = dict()
if data_type == "barcode":
data["target"] = "CPN60" if defined else "ITS2"
else:
data["segment"] = "RNA1" if defined else "RNA2"
if missing:
data = dict()
message = await check_sequence_segment_or_target(
dbi, "foo", "baz", sequence_id, "bar", data
)
# The only case where an error message should be returned for a genome-type
# reference.
if data_type == "genome" and not missing and not defined:
assert message == "Segment RNA2 is not defined for the parent OTU"
return
if data_type == "barcode":
if sequence_id is None and missing:
assert message == "The 'target' field is required for barcode references"
return
if not missing and not defined:
assert message == "Target ITS2 is not defined for the parent reference"
return
if sequence_id != "boo" and not missing and used and data_type == "barcode":
assert message == "Target CPN60 is already used in isolate baz"
return
assert message is None | 4,124 |
def pdf2img(file_path):
"""
Transforming the pdf files to images.
:param file_path: the path saving the raw material
:return:
"""
formats = ['.pdf', '.PDF'] ## do not consider the situation like '.pDF', '.Pdf', etc.
formats_recognize = ['.pdf', '.PDF', '.jpg', '.png']
if not os.path.exists(file_path + 'img'):
os.makedirs(file_path + 'img')
pathDir = os.listdir(file_path)
for allDir in pathDir:
if allDir[len(allDir) - 4: len(allDir)] in formats:
## the default value of dpi is 200, turn up this value the processing time will increased dramatically.
convert_from_path(os.path.join(file_path, allDir), output_folder=file_path + 'img', fmt='.png', dpi=200,
output_file=allDir.split('.', 1)[0])
elif allDir[len(allDir) - 4: len(allDir)] in formats_recognize:
shutil.copyfile(os.path.join(file_path, allDir), os.path.join(file_path + 'img', allDir))
else:
print('The {0} is not image or PDF file.'.format(allDir)) | 4,125 |
def combine(first: Set[T], second: Set[T]) -> Set[T]:
"""Combine two sets of tuples, prioritising the second."""
result = second.copy()
for pf in first:
include = True
for pr in result:
if pf[0] == pr[0]:
include = False
break
if pf[1] == pr[1]:
include = False
break
if include:
result.add(pf)
return result | 4,126 |
def minsize<VAL1>(event, context):
"""
AutoScalingGroup起動台数調整
"""
""" Create Connection """
try:
client = boto3.client('autoscaling', region_name = '<Region>')
except:
print('Connection Error')
return 1
""" Update AutoScalingGroup """
try:
client.update_auto_scaling_group(AutoScalingGroupName = '<AutoScalingGroup>', MinSize = <VAL1>, DesiredCapacity = <VAL1>)
except:
print('Update AutoScalingGroup Error')
return 1
return 0 | 4,127 |
def thin(image, n_iter=None):
"""
Perform morphological thinning of a binary image
Parameters
----------
image : binary (M, N) ndarray
The image to be thinned.
n_iter : int, number of iterations, optional
Regardless of the value of this parameter, the thinned image
is returned immediately if an iteration produces no change.
If this parameter is specified it thus sets an upper bound on
the number of iterations performed.
Returns
-------
out : ndarray of bools
Thinned image.
See also
--------
skeletonize
Notes
-----
This algorithm [1]_ works by making multiple passes over the image,
removing pixels matching a set of criteria designed to thin
connected regions while preserving eight-connected components and
2 x 2 squares [2]_. In each of the two sub-iterations the algorithm
correlates the intermediate skeleton image with a neighborhood mask,
then looks up each neighborhood in a lookup table indicating whether
the central pixel should be deleted in that sub-iteration.
References
----------
.. [1] Z. Guo and R. W. Hall, "Parallel thinning with
two-subiteration algorithms," Comm. ACM, vol. 32, no. 3,
pp. 359-373, 1989.
.. [2] Lam, L., Seong-Whan Lee, and Ching Y. Suen, "Thinning
Methodologies-A Comprehensive Survey," IEEE Transactions on
Pattern Analysis and Machine Intelligence, Vol 14, No. 9,
September 1992, p. 879
Examples
--------
>>> square = np.zeros((7, 7), dtype=np.uint8)
>>> square[1:-1, 2:-2] = 1
>>> square[0,1] = 1
>>> square
array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> skel = bwmorph_thin(square)
>>> skel.astype(np.uint8)
array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
return _bwmorph_luts(image, THIN_LUTS, n_iter=n_iter) | 4,128 |
def test_return_quotes():
"""test return an obj with more than one quote"""
obj = quotes.get_quotes()
assert len(obj) > 0 | 4,129 |
def rings(xgr):
""" rings in the graph (minimal basis)
"""
xgrs = [bond_induced_subgraph(xgr, bnd_keys)
for bnd_keys in rings_bond_keys(xgr)]
return tuple(sorted(xgrs, key=frozen)) | 4,130 |
def test_pion_decay_kelner(particle_dists):
"""
test PionDecayKelner06
"""
from ..radiative import PionDecayKelner06 as PionDecay
ECPL,PL,BPL = particle_dists
for pdist in [ECPL,PL,BPL]:
pdist.amplitude = 1*(1/u.TeV)
lum_ref = [5.54225481494e-13,
1.21723084093e-12,
7.35927471e-14]
energy = np.logspace(9, 13, 20) * u.eV
pp = PionDecay(ECPL)
Wp = pp.Wp.to('erg').value
lpp = trapz_loglog(pp.spectrum(energy) * energy, energy).to('erg/s')
assert(lpp.unit == u.erg / u.s)
assert_allclose(lpp.value, lum_ref[0]) | 4,131 |
def prior_search(binary, left_fit, right_fit, margin=50):
"""
searches within the margin of previous left and right fit indices
Parameters:
binary: np.ndarray, binary image from the video
left_fit: list, left line curve fitting coefficients
right_fit: list, right line curve fitting coefficients
margin: int, margin to search lane for
Returns:
left_fitx: list, left line x indices
right_fitx: list, right line x indices
ploty: y indices for curve fitting
left_fit: list, left line curve fitting coefficients
right_fit: list, right line curve fitting coefficients
"""
nonzero = binary.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_indices = (
nonzerox
> (
left_fit[0] * (nonzeroy ** 2)
+ left_fit[1] * nonzeroy
+ left_fit[2]
- margin
)
) & (
nonzerox
< (
left_fit[0] * (nonzeroy ** 2)
+ left_fit[1] * nonzeroy
+ left_fit[2]
+ margin
)
)
right_lane_indices = (
nonzerox
> (
right_fit[0] * (nonzeroy ** 2)
+ right_fit[1] * nonzeroy
+ right_fit[2]
- margin
)
) & (
nonzerox
< (
right_fit[0] * (nonzeroy ** 2)
+ right_fit[1] * nonzeroy
+ right_fit[2]
+ margin
)
)
leftx = nonzerox[left_lane_indices]
lefty = nonzeroy[left_lane_indices]
rightx = nonzerox[right_lane_indices]
righty = nonzeroy[right_lane_indices]
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, binary.shape[0] - 1, binary.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty, left_fit, right_fit | 4,132 |
def _parallel_iter(par, iterator):
"""
Parallelize a partial function and return results in a list.
:param par: Partial function.
:param iterator: Iterable object.
:rtype: list
:return: List of results.
"""
pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1)
output = []
for thread_output in tqdm.tqdm(pool.imap_unordered(par, iterator)):
if thread_output:
output.extend(thread_output)
pool.close()
pool.join()
return output | 4,133 |
def test_cpp_info_merge_with_components():
"""If we try to merge a cpp info with another one and some of them have components, assert"""
cppinfo = NewCppInfo()
cppinfo.components["foo"].cxxflags = ["var"]
other = NewCppInfo()
other.components["foo2"].cxxflags = ["var2"]
with pytest.raises(ConanException) as exc:
cppinfo.merge(other)
assert "Cannot aggregate two cppinfo objects with components" in str(exc.value) | 4,134 |
def reassign_labels(class_img, cluster_centers, k=3):
"""Reassigns mask labels of t series
based on magnitude of the cluster centers.
This assumes land will always be less than thin
cloud which will always be less than thick cloud,
in HOT units"""
idx = np.argsort(cluster_centers.sum(axis=1))
lut = np.zeros_like(idx)
lut[idx] = np.arange(k)
return lut[class_img] | 4,135 |
def parse_args():
"""parse args with argparse
:returns: args
"""
parser = argparse.ArgumentParser(description="Daily Reddit Wallpaper")
parser.add_argument("-s", "--subreddit", type=str, default=config["subreddit"],
help="Example: art, getmotivated, wallpapers, ...")
parser.add_argument("-t", "--time", type=str, default=config["time"],
help="Example: new, hour, day, week, month, year")
parser.add_argument("-n", "--nsfw", action='store_true', default=config["nsfw"], help="Enables NSFW tagged posts.")
parser.add_argument("-d", "--display", type=int, default=config["display"],
help="Desktop display number on OS X (0: all displays, 1: main display, etc")
parser.add_argument("-o", "--output", type=str, default=config["output"],
help="Set the outputfolder in the home directory to save the Wallpapers to.")
args = parser.parse_args()
return args | 4,136 |
def gridarray(a, b):
"""
Given two arrays create an array of all possible pairs, a 2d grid.
E.g. a = [1, 2], b = [2, 4, 5], gridarray(a,b) = [[1,2], [1,4],
[1,5], [2,2], [2,4], [2,5]]. May be used repeatedly for increasing
dimensionality.
DEPRECIATED: Use A, B = np.meshgrid(a, b).
Note that meshgrid works with arbetrary dimension too.
"""
if a == None:
return b # Trivial cases
if b == None:
return a
adim, bdim = 1, 1
if a.ndim > 1:
adim = a.shape[1]
if b.ndim > 1:
bdim = b.shape[1]
ab = np.zeros((a.shape[0] * b.shape[0], adim + bdim), dtype=a.dtype)
count = 0
for aa in a:
for bb in b:
ab[count, 0:adim] = aa
ab[count, adim:] = bb
count = count + 1
return ab | 4,137 |
def spooler_get_task(path: str) -> Optional[dict]:
"""Returns a spooler task information.
:param path: The relative or absolute path to the task to read.
""" | 4,138 |
def has_alphanum(s):
"""
Return True if s has at least one alphanumeric character in any language.
See https://en.wikipedia.org/wiki/Unicode_character_property#General_Category
"""
for c in s:
category = unicodedata.category(c)[0]
if category == 'L' or category == 'N':
return True
return False | 4,139 |
def src_path_join(*kwargs):
"""
reutrns path to the file whose dir information are provided in kwargs
similar to `os.path.join`
:param kwargs:
:return:
"""
return os.path.join(get_src_dir(), *kwargs) | 4,140 |
def get_values_heatmap(entity, measurement, case_id, categorical_filter, categorical, numerical_filter_name, from1,
to1, measurement_filter, date, r):
""" Get numerical values from numerical table from database
get_values use in heatmap, clustering
r: connection with database
Returns
-------
df: DataFrame with columns Name_ID,entity1,entity2,...
"""
entity_fin = "$$" + "$$,$$".join(entity) + "$$"
if not categorical_filter and not case_id and not numerical_filter_name:
sql = """SELECT "Name_ID","measurement","Key",AVG(f."Value") as "Value"
FROM examination_numerical,
unnest("Value") as f("Value")
WHERE "Key" IN ({0}) and "measurement" in ('{1}') and "Date" Between '{2}' and '{3}'
Group by "Name_ID","measurement","Key" """.format(entity_fin, measurement, date[0], date[1])
else:
df = filtering(case_id, categorical_filter, categorical, numerical_filter_name, from1, to1, measurement_filter)
sql = """SELECT en."Name_ID","measurement","Key",AVG(f."Value") as "Value"
FROM examination_numerical as en
right join ({4}) as df
on en."Name_ID" = df."Name_ID" ,
unnest("Value") as f("Value")
WHERE "Key" IN ({0}) and "measurement" in ('{1}') and "Date" Between '{2}' and '{3}'
Group by en."Name_ID","measurement","Key" """.format(entity_fin, measurement, date[0], date[1],df)
try:
df = pd.read_sql(sql, r)
df = df.pivot_table(index=["Name_ID"], columns="Key", values="Value", aggfunc=np.mean).reset_index()
if df.empty or len(df) == 0:
return df, "The entity wasn't measured"
else:
return df, None
except Exception:
return None, "Problem with load data from database" | 4,141 |
async def unwrap_pull_requests(prs_df: pd.DataFrame,
precomputed_done_facts: PullRequestFactsMap,
precomputed_ambiguous_done_facts: Dict[str, List[int]],
with_jira: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
bots: Set[str],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
resolve_rebased: bool = True,
repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[List[MinedPullRequest],
PRDataFrames,
PullRequestFactsMap,
Dict[str, ReleaseMatch],
Optional[asyncio.Task]]:
"""
Fetch all the missing information about PRs in a dataframe.
:param prs_df: dataframe with PullRequest-s.
:param precomputed_done_facts: Preloaded precomputed facts of done PRs (explicit).
:param precomputed_ambiguous_done_facts: Preloaded precomputed facts of done PRs (implicit).
:param with_jira: Value indicating whether to load the mapped JIRA issues.
:param branches: Branches of the relevant repositories.
:param default_branches: Default branches of the relevant repositories.
:param release_settings: Account's release settings.
:param account: State DB account ID.
:param meta_ids: GitHub account IDs.
:param mdb: Metadata DB.
:param pdb: Precomputed DB.
:param cache: Optional memcached client.
:return: Everything that's necessary for PullRequestListMiner.
"""
if prs_df.empty:
async def noop():
return {}
return (
[],
PRDataFrames(*(pd.DataFrame() for _ in dataclass_fields(PRDataFrames))),
{},
{},
asyncio.create_task(noop(), name="noop"),
)
if repositories is None:
repositories = logical_settings.all_logical_repos()
if resolve_rebased:
dags = await fetch_precomputed_commit_history_dags(
prs_df[PullRequest.repository_full_name.name].unique(), account, pdb, cache)
dags = await fetch_repository_commits_no_branch_dates(
dags, branches, BRANCH_FETCH_COMMITS_COLUMNS, True, account, meta_ids, mdb, pdb, cache)
prs_df = await PullRequestMiner.mark_dead_prs(
prs_df, branches, dags, meta_ids, mdb, PullRequest)
facts, ambiguous = precomputed_done_facts, precomputed_ambiguous_done_facts
PullRequestMiner.adjust_pr_closed_merged_timestamps(prs_df)
now = datetime.now(timezone.utc)
if rel_time_from := prs_df[PullRequest.merged_at.name].nonemin():
milestone_prs = prs_df[[PullRequest.merge_commit_sha.name,
PullRequest.merge_commit_id.name,
PullRequest.merged_at.name,
PullRequest.repository_full_name.name]]
milestone_prs.columns = [
Release.sha.name, Release.commit_id.name, Release.published_at.name,
Release.repository_full_name.name,
]
milestone_releases = dummy_releases_df().append(milestone_prs.reset_index(drop=True))
milestone_releases = milestone_releases.take(np.where(
milestone_releases[Release.sha.name].notnull())[0])
releases, matched_bys = await ReleaseLoader.load_releases(
prs_df[PullRequest.repository_full_name.name].unique(), branches, default_branches,
rel_time_from, now, release_settings, logical_settings, prefixer,
account, meta_ids, mdb, pdb, rdb, cache)
add_pdb_misses(pdb, "load_precomputed_done_facts_reponums/ambiguous",
remove_ambiguous_prs(facts, ambiguous, matched_bys))
tasks = [
load_commit_dags(
releases.append(milestone_releases), account, meta_ids, mdb, pdb, cache),
# not nonemax() here! we want NaT-s inside load_merged_unreleased_pull_request_facts
MergedPRFactsLoader.load_merged_unreleased_pull_request_facts(
prs_df, releases[Release.published_at.name].max(), LabelFilter.empty(),
matched_bys, default_branches, release_settings, prefixer, account, pdb),
]
dags, unreleased = await gather(*tasks)
else:
releases, matched_bys, unreleased = dummy_releases_df(), {}, {}
dags = await fetch_precomputed_commit_history_dags(
prs_df[PullRequest.repository_full_name.name].unique(), account, pdb, cache)
for k, v in unreleased.items():
if k not in facts:
facts[k] = v
dfs, _, _ = await PullRequestMiner.mine_by_ids(
prs_df, unreleased, repositories, now, releases, matched_bys, branches, default_branches,
dags, release_settings, logical_settings, prefixer, account, meta_ids,
mdb, pdb, rdb, cache, with_jira=with_jira)
deployment_names = dfs.deployments.index.get_level_values(1).unique()
deployments_task = asyncio.create_task(_load_deployments(
deployment_names, facts, logical_settings, prefixer,
account, meta_ids, mdb, pdb, rdb, cache),
name=f"load_included_deployments({len(deployment_names)})")
dfs.prs = split_logical_repositories(dfs.prs, dfs.labels, repositories, logical_settings)
prs = await list_with_yield(PullRequestMiner(dfs), "PullRequestMiner.__iter__")
filtered_prs = []
with sentry_sdk.start_span(op="PullRequestFactsMiner.__call__",
description=str(len(prs))):
facts_miner = PullRequestFactsMiner(bots)
pdb_misses = 0
for pr in prs:
node_id, repo = \
pr.pr[PullRequest.node_id.name], pr.pr[PullRequest.repository_full_name.name]
if (node_id, repo) not in facts:
try:
facts[(node_id, repo)] = facts_miner(pr)
except ImpossiblePullRequest:
continue
finally:
pdb_misses += 1
filtered_prs.append(pr)
set_pdb_hits(pdb, "fetch_pull_requests/facts", len(filtered_prs) - pdb_misses)
set_pdb_misses(pdb, "fetch_pull_requests/facts", pdb_misses)
if deployments_task is not None:
await deployments_task
deployments_task = deployments_task.result()
return filtered_prs, dfs, facts, matched_bys, deployments_task | 4,142 |
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized | 4,143 |
def schemaGraph (ds, ns, ontology_uri=None):
"""
schemaGraph (datasource, namespace, [ontology_uri,])
Return an RDF graph filled with axioms describing the datasource.
@param ds: the DataSource whose schema has to be converted
@param ns: the namespace uri of the created classes and properties
@param ontology_uri if not given, the namespace uri is used
@see: L{cross.datasource}
"""
# naming scheme:
# t-tablename : table class
# c-tablename.columnname : column property
# _ic-tablename.columnname : inverse column property
# _vc-tablename.columnname.simple_val : column-value instance
# dc-tablename.columnname : column-data property
# nc-tablename.columnname : null-column class
# i-tablename.indexname : index property
# _ii-tablename.indexname : inverse index property
# _vi-tablename.indexname.tuple_val : index-value instance
# di-tablename.indexname : index-data property
# ni-tablename.indexname : null-index class
# f-tablename.foreignkeyname : foreign-key property
# _vf-tablename.foreignkeyname.tuple_val : foreign-key-value instance
# df-tablename.foreignkeyname : foreign-key property
# nf-tablename.foreignkeyname : null-foreign-key class
rdf = Graph()
rdf.bind ('xsd', XSD)
rdf.bind ('owl', OWL)
if ontology_uri is None:
ontology_uri = ns
if ontology_uri[-1] in ['#', '/']:
ontology_uri = ontology_uri[:-1]
ontology_uri = URIRef (ontology_uri)
rdf.add ((ontology_uri, RDF.type, OWL.Ontology))
rdf.add ((RDF.value, RDF.type, OWL.DatatypeProperty))
for t in ds.tables:
t_uri = URIRef ("%st-%s" % (ns, t.uri_name))
_manage_table (t_uri, t, ns, rdf)
for c in t.columns:
_manage_column (t_uri, c, ns, rdf)
for i in t.unique_indexes:
if len (i) > 1:
_manage_unique_index (t_uri, i, ns, rdf)
for f in t.foreign_keys:
_manage_foreignkey (t_uri, f, ns, rdf)
return rdf | 4,144 |
def delete_log_dir():
"""Delete current test logs"""
delete_path(LOG_DIR) | 4,145 |
def ugerest():
"""
Test InfluxDB Connectivity and Accessibility
"""
print("Test UGE RestAPI Availability... ", end='', flush=True)
ip_list = config.getUGE_Addr()
port_list = config.getUGE_Port()
try:
for inx, ip in enumerate(ip_list):
ugerest_url = "http://" + ip + ":" + port_list[inx]
response = urllib.request.urlopen(ugerest_url + "/jobs")
if response:
data = json.loads(response.read().decode(response.info().get_param('charset') or 'utf-8'))
if 'errorCode' in data:
print(f"{FAILED}\n- [{data['errorCode']}]: {data['errorMessage']}")
else:
print(f"{FAILED}\n- No Response from {ugerest_url}")
print(f"{OK}")
except Exception as exp:
print(f"{FAILED}\n- {exp}") | 4,146 |
def write_results(char_vars_output, guppy, output):
"""
:param guppy:
:param char_vars_output: Output of char_vars() i.e a dict where nt are keys & counter obj are values for each nt
:param output: name of csv file to be produced
:return:
"""
with open(output, 'w') as out:
writer = csv.writer(out)
writer.writerow(['Nuc', 'Homopolymer Length', f'{guppy}count'])
for nt in char_vars_output.keys():
# char_vars_output will be dict obj with key == nt & value == Counter obj
# print(char_vars_output[nt])
for length in char_vars_output[nt]: # Homopolymer length in Counter obj
writer.writerow([nt, length, char_vars_output[nt][length]]) | 4,147 |
def test_show(capsys):
"""Do we get the expected code for some snippet module?
"""
show(sieve_eratosthenes)
captured = capsys.readouterr()
assert captured.out.startswith("def sieve_of_eratosthenes(n):") | 4,148 |
def is_remote(path):
"""Determine whether a file is in a remote location (which can be handled) based on prefix of connection string."""
for token in ["s3://", "http://", "https://"]: # add
if path.startswith(token):
return True
return False | 4,149 |
def saliency_map(output, input, name="saliency_map"):
"""
Produce a saliency map as described in the paper:
`Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
<https://arxiv.org/abs/1312.6034>`_.
The saliency map is the gradient of the max element in output w.r.t input.
Returns:
tf.Tensor: the saliency map. Has the same shape as input.
"""
max_outp = tf.reduce_max(output, 1)
saliency_op = tf.gradients(max_outp, input)[:][0]
saliency_op = tf.identity(saliency_op, name=name)
return saliency_op | 4,150 |
def pretty_tree(*, program: str = None, file: str = None) -> str:
"""Get a pretty-printed string of the parsed AST of the QASM input.
The input will be taken either verbatim from the string ``program``, or read
from the file with name ``file``. Use exactly one of the possible input
arguments, passed by keyword.
Args:
program: a string containing the QASM to be parsed.
file: a string of the filename containing the QASM to be parsed.
Returns:
a pretty-printed version of the parsed AST of the given program.
Raises:
ValueError: no input is given, or too many inputs are given.
Qasm3ParserError: the input was not parseable as valid QASM 3.
"""
if program is not None and file is not None:
raise ValueError("Must supply only one of 'program' and 'file'.")
if program is not None:
input_stream = antlr4.InputStream(program)
elif file is not None:
input_stream = antlr4.FileStream(file, encoding="utf-8")
else:
raise ValueError("One of 'program' and 'file' must be supplied.")
# ANTLR errors (lexing and parsing) are sent to stderr, which we redirect
# to the variable `err`.
with io.StringIO() as err, contextlib.redirect_stderr(err):
lexer = qasm3Lexer(input_stream)
token_stream = antlr4.CommonTokenStream(lexer)
parser = qasm3Parser(token_stream)
tree = _pretty_tree_inner(parser.program(), parser.ruleNames, 0)
error = err.getvalue()
if error:
raise Qasm3ParserError(f"Parse tree build failed. Error:\n{error}")
return tree | 4,151 |
def get_models(datasets):
"""It obtains the models used into the experiments"""
dataframe = pd.read_csv('../results/' + datasets[0] + '/results.csv', sep=';')
models = dataframe['MODEL'].unique()
return models.tolist() | 4,152 |
def classify(infile, out, modeldir, n_classes, configpath):
"""Classify fasta sequences or TRrecords data to identify viral sequences
Classify fasta files or TFrecords containing contigs to identify viral
contigs. The classifier takes contigs and extracts features then makes
predictions. It returns a text file of prediction probabilities for each
model class. This file can be used to filter out viral contigs for analysis.
Args:
infile (str): the path to a fasta file of contigs (2 kb or longer is
recomended) or TFrecords to be classified.
out (str): a file with contig modelpredictions
modeldir (str): a model directory containing a trained model to use
for classification.
n_classes (int): the number of classes in the model (default 4)
configpath (str): path to yaml config files
Returns:
None
Todo:
Convert the model type to Tensorflow SavedModel format and TF serving
API once the Tensorflow API supports using SavedModels with Python 3.
"""
logging.info("Beginning Tensorflow classification")
with open(configpath, "r") as cf:
config = yaml.safe_load(cf)
try:
if infile.endswith(("tfrecord", "TFrecord")):
logging.info("Classifying data from TFRecord file.")
getfeat = False
elif infile.endswith(("fasta","fa","fna")):
logging.info("Classifying data from fasta file.")
getfeat =True
except:
logging.exception("Files with that suffix are not supported. Please \
use .fasta, .fna, or .fa files or .tfrecord files created by \
`vica get_features`")
raise SystemExit(1)
if getfeat:
dtemp = tempfile.mkdtemp()
tfrecfile = os.path.join(dtemp, "data.tfrecord")
logging.info("Extracting Features from the sequence data. For more \
control of options use `vica get_featues`")
vica.get_features.run(infile=infile,
output=tfrecfile,
label=0,
minhashlocal=None,
configpath=configpath)
else:
tfrecfile=infile
kmerdim, kmer, codon, minhash = _featureshape(config["khmer_features"]["ksize"])
input_fn = functools.partial(base_input_fn,
codonlength=config["train_eval"]["codonlength"],
minhashlength=config["train_eval"]["minhashlength"],
kmerdim=kmerdim,
shuffle=False,
shuffle_buffer_size=0,
batch=config["train_eval"]["eval_batch_size"],
epochs=1,
filenames=tfrecfile)
if config["train_eval"]["model"] == "DNN":
estimator = mk_dnn_estimator(modeldir=modeldir,
n_classes=int(n_classes),
kmer=kmer,
codon=codon)
preds = estimator.predict(input_fn=input_fn)
print(preds)
elif config["train_eval"]["model"] == "DNNLogistic":
estimator = mk_dnnlogistic_estimator(modeldir=modeldir,
n_classes=int(n_classes),
minhash=minhash,
kmer=kmer,
codon=codon)
preds = estimator.predict(input_fn=input_fn)
idlist = _ids_from_tfrecords(tfrecfile)
with open(out, "w") as outfile:
csv_writer_instance = csv.writer(outfile, lineterminator='\n')
header = ["ID", "Class", "Class_id"] + ["Prob_class_" + str(i) for i in range(int(n_classes))]
csv_writer_instance.writerow(header)
for recid, rec in zip(idlist, preds):
plist = rec['probabilities']
pliststr = [str(x) for x in plist]
ll = [recid, rec['classes'][0].decode("utf-8"), str(rec['class_ids'][0])]
ll.extend(pliststr)
csv_writer_instance.writerow(ll)
if getfeat:
shutil.rmtree(dtemp) | 4,153 |
def Prepare(benchmark_spec):
"""Install Java, apache ant, authenticate vms
Set up the client machine, backend machine, and frontend
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
frontend = benchmark_spec.vm_groups['frontend'][0]
backend = benchmark_spec.vm_groups['backend'][0]
client = benchmark_spec.vm_groups['client'][0]
vms = benchmark_spec.vms
vm_util.RunThreaded(PreparePrivateKey, vms)
vm_util.RunThreaded(_PrepareVms, vms)
frontend.RemoteCommand('cd %s && '
'wget parsa.epfl.ch/cloudsuite/software/perfkit/'
'web_serving/webservingfiles.tgz && '
'tar xzf webservingfiles.tgz' % BASE_DIR)
_SetupClient(benchmark_spec)
frontend.RemoteCommand('scp -r -o StrictHostKeyChecking=no %s:%s %s' %
(client.ip_address, FABAN_HOME, BASE_DIR))
backend.RemoteCommand('scp -r -o StrictHostKeyChecking=no %s:%s %s' %
(client.ip_address, FABAN_HOME, BASE_DIR))
setup_functions = [_SetupBackend, _SetupFrontend]
vm_util.RunThreaded(lambda f: f(benchmark_spec), setup_functions) | 4,154 |
def _gr_xmin_ ( graph ) :
"""Get x-min for the graph
>>> xmin = graph.xmin()
"""
#
_size = len ( graph )
if 0 == _sise : return 0
#
x_ = ROOT.Double(0)
v_ = ROOT.Double(0)
graph.GetPoint ( 0 , x_ , v_ )
#
return x_ | 4,155 |
def test_http_request_invalid_schema_error(mocker_api_token, mock_base_http_request, demisto_version, client):
"""
When http request return invalid schema exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = InvalidSchema
mocker_api_token.return_value = API_TOKEN, 14400
demisto_version.return_value = {"version": "6.0.2"}
# Execute
with pytest.raises(ValueError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX, headers={})
# Assert
assert str(e.value) == 'Invalid API URL. Supplied schema is invalid, supports http(s).' | 4,156 |
def test_add_incoming_connection():
"""
Tests the add_incoming_connection function
:return: Test passes if all assertions are true. Tests do not pass if otherwise.
"""
center = Coordinates(4, 4)
radius = 10
i = Intersection(center, radius, 15)
empty_connections = i.get_connections()
assert not empty_connections
start1 = Coordinates(1,2)
end1 = Coordinates(9, 10)
len1 = 10
in_ln1 = 3
out_ln1 = 4
ang1 = math.pi/2
start2 = Coordinates(5, 6)
end2 = Coordinates(12, 14)
len2 = 15
in_ln2 = 5
out_ln2 = 1
ang2 = math.pi/4
start3 = Coordinates(7, 8)
end3 = Coordinates(10, 12)
len3 = 20
in_ln3 = 25
out_ln3 = 27
ang3 = 3 * math.pi / 2
r1 = Road(start1, end1, len1, out_ln1, in_ln1, ang1, 20, 'Test')
r2 = Road(start2, end2, len2, out_ln2, in_ln2, ang2, 25, 'Test')
r3 = Road(start3, end3, len3, out_ln3, in_ln3, ang3, 30, 'Test')
add_incoming_connection(i, r1)
non_empty = i.get_connections()
assert non_empty
assert non_empty[0].get_length() == 10
assert non_empty[0].get_angle() == ang1
assert non_empty[0].get_in_lanes() == in_ln1
assert non_empty[0].get_out_lanes() == out_ln1
add_incoming_connection(i, r3)
assert len(i.get_connections()) == 2
assert non_empty[0].get_length() == 10
assert non_empty[0].get_angle() == ang1
assert non_empty[0].get_in_lanes() == in_ln1
assert non_empty[0].get_out_lanes() == out_ln1
assert non_empty[1].get_length() == len3
assert non_empty[1].get_angle() == ang3
assert non_empty[1].get_in_lanes() == in_ln3
assert non_empty[1].get_out_lanes() == out_ln3
add_connection(i, math.pi, 20, 21, 22, 40, 'Test')
assert len(i.get_connections()) == 3
assert non_empty[0].get_length() == 10
assert non_empty[0].get_angle() == ang1
assert non_empty[0].get_in_lanes() == in_ln1
assert non_empty[0].get_out_lanes() == out_ln1
assert non_empty[1].get_length() == len3
assert non_empty[1].get_angle() == ang3
assert non_empty[1].get_in_lanes() == in_ln3
assert non_empty[1].get_out_lanes() == out_ln3
assert non_empty[2].get_length() == 20
assert non_empty[2].get_angle() == math.pi
assert non_empty[2].get_in_lanes() == 21
assert non_empty[2].get_out_lanes() == 22
add_incoming_connection(i, r2)
assert len(i.get_connections()) == 4
assert non_empty[0].get_length() == 10
assert non_empty[0].get_angle() == ang1
assert non_empty[0].get_in_lanes() == in_ln1
assert non_empty[0].get_out_lanes() == out_ln1
assert non_empty[1].get_length() == len3
assert non_empty[1].get_angle() == ang3
assert non_empty[1].get_in_lanes() == in_ln3
assert non_empty[1].get_out_lanes() == out_ln3
assert non_empty[2].get_length() == 20
assert non_empty[2].get_angle() == math.pi
assert non_empty[2].get_in_lanes() == 21
assert non_empty[2].get_out_lanes() == 22
assert non_empty[3].get_length() == 15
assert non_empty[3].get_angle() == ang2
assert non_empty[3].get_in_lanes() == in_ln2
assert non_empty[3].get_out_lanes() == out_ln2 | 4,157 |
def eliminate(values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values | 4,158 |
def route_distance(route):
"""
returns the distance traveled for a given tour
route - sequence of nodes traveled, does not include
start node at the end of the route
"""
dist = 0
prev = route[-1]
for node in route:
dist += node.euclidean_dist(prev)
prev = node
return dist | 4,159 |
def adjust_matrix(matrix):
"""
Sorting matrix cols.
matrix: can be a numpy 2d-array or pytorch 2d-Tensor
Return
------
adjusted pytorch 2d-tensor
"""
if isinstance(matrix, np.ndarray):
tmp = torch.from_numpy(matrix).clone() # ?
else:
tmp = matrix.clone()
tmp /= tmp[:, 0].view([-1, 1])
tmp = sk.col_normalize(tmp, torch.ones(3, dtype=torch.float64))
if torch.sum(torch.log(tmp[:, 1])) > torch.sum(torch.log(tmp[:, 2])):
# return torch.from_numpy(matrix)
return 2
return 1
# ref = matrix[:, 1].copy()
# matrix[:, 1] = matrix[:, 2]
# matrix[:, 2] = ref
# return torch.from_numpy(matrix) | 4,160 |
def get_student_graph(pool, student, student_friends, friends_students, need_spinglass=False):
"""
Получение социального графа пользователя.
:param pool: пул процессов (библиотека multiprocessing)
:param student: идентификатор пользователя
:param student_friends: список друзей пользователя
:param friends_students: список друзей пользователя, имеющих такое же учебное заведение
:param need_spinglass: True, если требуется использовать комбинацию алгоритмов кластеризации multilevel и spinglass
:return: социальный граф пользователя (библиотека NetworkX)
"""
graph = nx.Graph()
for u, fs in pool.imap_unordered(get_friends, student_friends):
graph.add_edges_from((u, f) for f in fs & student_friends)
cluster = get_friends_students_cluster(graph, friends_students, need_spinglass)
graph = graph.subgraph(cluster)
for u, fs in pool.imap_unordered(get_friends, graph.nodes()):
graph.add_edges_from((u, f) for f in fs - student_friends - {student})
redundant_nodes = {node for node, degree in nx.degree(graph).items() if degree <= 2}
graph.remove_nodes_from(redundant_nodes)
foafs = set(graph.nodes()) - student_friends
for u, fs in pool.imap_unordered(get_friends, foafs):
graph.add_edges_from((u, f) for f in fs & foafs)
return graph | 4,161 |
def wdirectory(path):
"""
Change the work directory for a specific path of the data
___
path: string, data path in the system
"""
return os.chdir(path) | 4,162 |
def ile_robil_czy_mial_dobe(dzien, zp, grafik):
"""Czy miał dobę danego dnia?"""
godzin = 0
for wpis in Wpis.objects.filter(user=zp.user, grafik=grafik, dzien=dzien):
godzin += wpis.pion.ile_godzin(dzien)
return (godzin, godzin == 24) | 4,163 |
def load_teacher():
"""
load ready-to-go teacher from "https://towardsdatascience.com/advanced-dqns-playing-pac-man-with-deep-reinforcement-learning-3ffbd99e0814"
:return: a trained teacher model trained with double dueling dqn with prioritized ER
"""
dqn = DQNPacman(input_size=dense_config.input_size, output_size=dense_config.output_size,
model_path=dense_config.model_path, scope=dense_config.scope,
epsilon_stop=dense_config.final_epsilon, epsilon=dense_config.initial_epsilon)
dqn.load_model()
return dqn | 4,164 |
async def test_service_call(
hass, habitica_entry, common_requests, capture_api_call_success
):
"""Test integration setup, service call and unload."""
assert await hass.config_entries.async_setup(habitica_entry.entry_id)
await hass.async_block_till_done()
assert hass.services.has_service(DOMAIN, SERVICE_API_CALL)
assert len(capture_api_call_success) == 0
TEST_SERVICE_DATA = {
ATTR_NAME: "test_user",
ATTR_PATH: ["tasks", "user", "post"],
ATTR_ARGS: TEST_API_CALL_ARGS,
}
assert await hass.services.async_call(
DOMAIN, SERVICE_API_CALL, TEST_SERVICE_DATA, blocking=True
)
assert len(capture_api_call_success) == 1
captured_data = capture_api_call_success[0].data
captured_data[ATTR_ARGS] = captured_data[ATTR_DATA]
del captured_data[ATTR_DATA]
assert captured_data == TEST_SERVICE_DATA
assert await hass.config_entries.async_unload(habitica_entry.entry_id)
assert not hass.services.has_service(DOMAIN, SERVICE_API_CALL) | 4,165 |
def exception_logged(result_output: str, exc: Exception) -> bool:
"""Small utility to search click result output for a specific excpetion .
Args:
result_output: The click result output string to search.
exc: The exception to search for.
Returns:
bool: Whether or not the exception was found
"""
seen_lines: List[dict] = []
for line in result_output.splitlines():
parsed_line = json.loads(line)
seen_lines.append(parsed_line)
for line in seen_lines:
if line.get("exc_info"):
if repr(exc) in line.get("exc_info"):
return True
return False | 4,166 |
def hello(world):
"""Hello, You!"""
return "Hello, {}!".format(world) | 4,167 |
def set_window_user_pointer(window, pointer):
"""
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
"""
data = (False, pointer)
if not isinstance(pointer, ctypes.c_void_p):
data = (True, pointer)
# Create a void pointer for the python object
pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
_window_user_data_repository[window_addr] = data
_glfw.glfwSetWindowUserPointer(window, pointer) | 4,168 |
def uniform_transition_matrix(p=0.01, N=24):
"""Computes uniform transition matrix
Notebook: C5/C5S3_ChordRec_HMM.ipynb
Args:
p (float): Self transition probability (Default value = 0.01)
N (int): Column and row dimension (Default value = 24)
Returns:
A (np.ndarray): Output transition matrix
"""
off_diag_entries = (1-p) / (N-1) # rows should sum up to 1
A = off_diag_entries * np.ones([N, N])
np.fill_diagonal(A, p)
return A | 4,169 |
def __is_connected__(g):
"""
Checks if a the directed acyclic graph is connected.
:return: A boolean indicating if the graph is connected.
"""
u = __convert_to_undirected_graph__(g)
return nx.is_connected(u) | 4,170 |
def get_segments(tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return np.array(segments + [0] * (max_seq_length - len(tokens))) | 4,171 |
def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
if input.is_cuda and linear_func is F.linear and fusedBackend is not None:
gi = linear_func(input, w_ih)
gh = linear_func(hidden, w_hh)
state = fusedBackend.GRUFused.apply
return state(gi, gh, hidden) if b_ih is None else state(gi, gh, hidden, b_ih, b_hh)
gi = linear_func(input, w_ih, b_ih)
gh = linear_func(hidden, w_hh, b_hh)
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy | 4,172 |
def flush_socket(socks, lim=0):
"""remove the data present on the socket"""
input_socks = [socks]
cnt = 0
while True:
i_socks = select.select(input_socks, input_socks, input_socks, 0.0)[0]
if len(i_socks) == 0:
break
for sock in i_socks:
sock.recv(1024)
if lim > 0:
cnt += 1
if cnt >= lim:
#avoid infinite loop due to loss of connection
raise Exception("flush_socket: maximum number of iterations reached") | 4,173 |
def ensure_fov(env):
"""
Ensures that the field of view is consistent with robot's orientation
"""
yaw_radian = convert_angle(quaternion_to_euler(env.robot.get_orientation()))
yaw_degree = degrees(yaw_radian)
print('yaw: ', yaw_degree)
p.resetDebugVisualizerCamera(cameraDistance=cdist, cameraYaw=yaw_degree-90, cameraPitch=cpith, cameraTargetPosition=ctarget) | 4,174 |
def test_requisites_require_no_state_module(state, state_tree):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
sls_contents = """
# Complex require/require_in graph
#
# Relative order of C>E is given by the definition order
#
# D (1) <--+
# |
# B (2) ---+ <-+ <-+ <-+
# | | |
# C (3) <--+ --|---|---+
# | | |
# E (4) ---|---|---+ <-+
# | | |
# A (5) ---+ --+ ------+
#
A:
cmd.run:
- name: echo A fifth
- require:
- C
B:
cmd.run:
- name: echo B second
- require_in:
- A
- C
C:
cmd.run:
- name: echo C third
D:
cmd.run:
- name: echo D first
- require_in:
- B
E:
cmd.run:
- name: echo E fourth
- require:
- B
- require_in:
- A
# will fail with "The following requisites were not found"
G:
cmd.run:
- name: echo G
- require:
- Z
# will fail with "The following requisites were not found"
H:
cmd.run:
- name: echo H
- require:
- Z
"""
expected_result = {
"cmd_|-A_|-echo A fifth_|-run": {
"__run_num__": 4,
"comment": 'Command "echo A fifth" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo B second" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo C third" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo D first" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E fourth_|-run": {
"__run_num__": 3,
"comment": 'Command "echo E fourth" run',
"result": True,
"changes": True,
},
"cmd_|-G_|-echo G_|-run": {
"__run_num__": 5,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " id: Z\n",
"result": False,
"changes": False,
},
"cmd_|-H_|-echo H_|-run": {
"__run_num__": 6,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " id: Z\n",
"result": False,
"changes": False,
},
}
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
result = normalize_ret(ret.raw)
assert result == expected_result | 4,175 |
def _toggle_debug_mode() -> bool:
"""Set debug to true or false.
Can be used for debugging purposes such that exceptions are raised (including the stack trace)
instead of suppressed.
Note: the debug status is always printed when executing this method.
Returns:
Boolean indicating the status of the DEBUG global.
"""
if glob.DEBUG:
glob.DEBUG = False
print("Debugging turned off.")
else:
glob.DEBUG = True
print("Debugging turned on.")
return glob.DEBUG | 4,176 |
def GetAppBasename():
"""Returns the friendly basename of this application."""
return os.path.basename(sys.argv[0]) | 4,177 |
def fetch_and_publish_results(run_once=False):
"""
Harvest data and deploy cards
"""
require('settings', provided_by=['production', 'staging'])
try:
with settings(warn_only=True):
main(run_once)
except KeyboardInterrupt:
sys.exit(0) | 4,178 |
def test_get_signal_external_language_poisonous():
"""
Test that getting the signal for a population with external
language gives a specific signal for edible mushrooms
"""
sim = Simulation(4, 5, 6, 7, "External")
angle = 0
mush = 0b1111100000
population = []
viewer = False
signal = sim.get_signal(angle, mush, population, viewer)
assert signal == [1, 0, 0] | 4,179 |
def connect(dbtype: str, **kwargs) -> subprocess.Popen:
""" Creates a connection to the database server """
# create subprocess
process = subprocess.Popen('/bin/bash', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10)
# connect process to database server
stderr_out = 'errtemp'
cmd = parse_connection_request(dbtype, stderr=stderr_out, **kwargs)
# debug
#print(cmd)
process.stdin.write(bytes(cmd, 'utf-8')) # type: ignore
# get stderr from errtemp file
error_msg = _get_stderr(stderr_out)
#print(error_msg)
if error_msg:
process.communicate()
raise ConnectionRefusedError(error_msg)
return process | 4,180 |
def min_requirements(args=None):
"""
Prints a pip install command to install the minimal supported versions of a
requirement file.
Uses requirements_production.txt by default.
"""
import pip
def get_lowest_versions(requirements_file):
for line in pip.req.parse_requirements(requirements_file, session=pip.download.PipSession()):
yield '%s==%s' % (line.req.key, line.req.specs[0][1])
print('pip install %s' % ' '.join(get_lowest_versions(args.requirements))) | 4,181 |
def client(mock_settings) -> StructurizrClient:
"""Provide a client instance with the mock settings."""
return StructurizrClient(settings=mock_settings) | 4,182 |
def mean_iou(y_true, y_pred, **kwargs):
"""
Compute mean Intersection over Union of two segmentation masks, via Keras.
Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return seg_metrics(y_true, y_pred, metric_name='iou', drop_last = False, **kwargs) | 4,183 |
def reset_config():
"""create a new config.ini file in the user home dir/.pyhdx folder"""
with open(config_file_path, 'w') as target:
version_string = '; pyhdx configuration file ' + __version__ + '\n\n'
target.write(version_string)
with open(current_dir / 'config.ini') as source:
for line in source:
target.write(line)
#shutil.copy(current_dir / 'config.ini', config_file_path) | 4,184 |
async def bluetext(bt_e):
"""Believe me, you will find this useful."""
if bt_e.is_group:
await bt_e.edit(
"/CORES_PRECISO_CLICAR\n"
"/VOCE_E_UM_ANIMAL_ESTUPIDO_QUE_E_ATRAIDO_A_CORES\n"
"/CLIQUE_AQUI"
) | 4,185 |
def _write_blast_summary(seqs, path_sum):
"""Write a summary blast results
"""
# Summarize results
with open(path_sum, 'w') as f_o:
header = ['Contig ID',
'Sample ID',
'Contig Length',
'Aligned Length',
'Aligned coverage of Contig',
'Ref Seq ID',
'Aligned Start at Ref',
'Ref Strand',
'Is HIV?',
'Primer?',
'Primer Seq'
'Large Deletion?',
'Internal Inversion?']
print('\t'.join(header), file=f_o)
sseqid = seqs.ref_id
for qid in seqs.qids:
sample_id = qid.split('_')[0]
aln_len, qlen, sstart, strand = seqs.info[qid]['blast']
is_hiv = seqs.call[qid]['is_hiv']
deletion = seqs.call[qid]['deletion']
inversion = seqs.call[qid]['inversion']
line = '{}\t{}\t{}\t{}\t{:.2f}\t{}\t{}\t{}\t{}\t{}\t{}'
line = line.format(qid, sample_id, qlen, aln_len, aln_len/qlen,
sseqid, sstart, strand, is_hiv, deletion,
inversion)
print(line, file=f_o) | 4,186 |
def pages_to_csv(filename, pages, path=""):
"""Writes a csv file with information about the webpages.
Raises PermissionError if the file is already open.
"""
filename = add_extension(filename, ".csv")
if not path:
filename = os.path.join(os.pardir, filename)
else:
filename = path + filename
with open(filename, 'w', newline='') as myfile:
wr = csv.writer(myfile)
for page in pages:
wr.writerow([str(page)]) | 4,187 |
def get_python_function(target_kwargs_function,func_name,func_spec):
"""Convert a argparse spec into a python function
This function provides a python function with a signature indicated by the ``fun_spec`` dictionary
With the conventions of the :f:mod:`yaml_argparse` modules.
The :py:func:`futile.Utils.function_signature_regenerator` function is used for the conversion
Args:
target_kwargs_function (func): the keyword arguments function we want to give the signature to.
func_name (str): Name of the function, usually the key of the dictionary whose ``func_spec`` is the value
func_spec (dict) : dictionary of the function specifications to be provided to the
:py:func:`futile.Utils.function_signature_regenerator` function.
Returns:
func: the genreated function with signature given by the arguments of ``func_spec``
defaulting to their default value.
Todo:
Create the docstring of the generated function by also including the docstring of the arguments
"""
from copy import deepcopy
from futile.Utils import function_signature_regenerator as fsr
fspec=deepcopy(func_spec)
docstring=fspec.pop("help")
if "shorthelp" in fspec: fspec.pop("shorthelp")
key_args = {key: val["default"] for (key, val) in fspec["args"].items()}
return fsr(target_kwargs_function, fun_name=func_name,
fun_docstring=docstring,**key_args) | 4,188 |
def ydbdr2rgb(ydbdr, *, channel_axis=-1):
"""YDbDr to RGB color space conversion.
Parameters
----------
ydbdr : (..., 3, ...) array_like
The image in YDbDr format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `ydbdr` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This is the color space commonly used by video codecs, also called the
reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
return _convert(rgb_from_ydbdr, ydbdr, name='ydbdr2rgb') | 4,189 |
def _migrate_activity_log(logs, **kwargs):
"""For migrate_activity_log.py script."""
for log in logs:
action = _action_map(log.action)
# Create thread.
thread, tc = CommunicationThread.objects.safer_get_or_create(
addon=log.arguments[0], version=log.arguments[1])
# Filter notes.
note_params = {
'thread': thread,
'note_type': action,
'author': log.user,
'body': log.details.get('comments', ''),
}
notes = CommunicationNote.objects.filter(created=log.created,
**note_params)
if notes.exists():
# Note already exists, move on.
continue
# Create note.
note = CommunicationNote.objects.create(
# Developers should not see escalate/reviewer comments.
read_permission_developer=action not in (cmb.ESCALATION,
cmb.REVIEWER_COMMENT),
**note_params)
note.update(created=log.created)
# Attachments.
if note.attachments.exists():
# Already migrated. Continue.
continue
# Create attachments.
for attachment in log.activitylogattachment_set.all():
note_attachment = note.attachments.create(
filepath=attachment.filepath, mimetype=attachment.mimetype,
description=attachment.description)
note_attachment.update(created=attachment.created) | 4,190 |
def execute(
connection_info: NodeConnectionInfo,
block_id: typing.Union[None, bytes, str, int] = None
) -> dict:
"""Returns current auction system contract information.
:param connection_info: Information required to connect to a node.
:param block_id: Identifier of a finalised block.
:returns: Current auction system contract information.
"""
# Get latest.
# TODO: verify as a null block should return latest auction infor anyway.
if isinstance(block_id, type(None)):
block: dict = get_block(connection_info)
block_id: str = block["hash"]
# Get by hash - bytes | hex.
if isinstance(block_id, (bytes, str)):
response = rpc_client.request(
connection_info.address_rpc,
constants.RPC_STATE_GET_AUCTION_INFO,
block_identifier={
"Hash": block_id.hex() if isinstance(block_id, bytes) else block_id
}
)
# Get by height.
elif isinstance(block_id, int):
response = rpc_client.request(
connection_info.address_rpc,
constants.RPC_STATE_GET_AUCTION_INFO,
block_identifier={
"Height": block_id
}
)
return response.data.result | 4,191 |
def main():
"""Parse command-line args, pass into process"""
cmd_parser = create_cmd_parser()
kwargs = vars(cmd_parser.parse_args())
process(**kwargs) | 4,192 |
def subscriber(pipeline):
"""Joystick subscriber thread
"""
cmd = {}
while True:
# clear command
cmd.clear()
# collect gamepad events (blocking)
events = inputs.get_gamepad()
# parse all events
for event in events:
if event.ev_type is not "Sync":
# print("{} {} {}".format(event.ev_type, event.code, event.state))
val_norm = event.state / 32768.0
if event.code is "ABS_Y":
cmd['abs_y'] = val_norm
if event.code is "ABS_X":
cmd['abs_x'] = val_norm
if event.code is "ABS_RY":
cmd['abs_ry'] = val_norm
if cmd:
# print(cmd)
pipeline.set_message(cmd) | 4,193 |
def direction_to_point(pos1: IntVector2D, pos2: IntVector2D) -> Grid4TransitionsEnum:
"""
Returns the closest direction orientation of position 2 relative to position 1
:param pos1: position we are interested in
:param pos2: position we want to know it is facing
:return: direction NESW as int N:0 E:1 S:2 W:3
"""
diff_vec = np.array((pos1[0] - pos2[0], pos1[1] - pos2[1]))
axis = np.argmax(np.power(diff_vec, 2))
direction = np.sign(diff_vec[axis])
if axis == 0:
if direction > 0:
return Grid4TransitionsEnum.NORTH
else:
return Grid4TransitionsEnum.SOUTH
else:
if direction > 0:
return Grid4TransitionsEnum.WEST
else:
return Grid4TransitionsEnum.EAST | 4,194 |
def test_prime_data_ranking() -> None:
"""Test prime data ranking."""
pass | 4,195 |
def handle_nullboolean(field, request_get):
"""Build a list of chips for NullBooleanField field."""
value = yesno(
field.value(),
pgettext_lazy('Possible values of boolean filter', 'yes,no,all'))
return [{
'content': CHIPS_PATTERN % (field.label, value),
'link': get_cancel_url(request_get, field.name)}] | 4,196 |
def rob(nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == [] or len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
runningTotal = [-1, -1]
runningTotal[0] = nums[0]
runningTotal[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
runningTotal.append(max([nums[i] + runningTotal[i - 2],
runningTotal[i - 1]]))
return runningTotal[-1] | 4,197 |
def test_rest_plugins(mock_post, mock_get):
"""
API: REST Based Plugins()
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# Define how many characters exist per line
row = 80
# Some variables we use to control the data we work with
body_len = 1024
title_len = 1024
# Create a large body and title with random data
body = ''.join(choice(str_alpha + str_num + ' ') for _ in range(body_len))
body = '\r\n'.join([body[i: i + row] for i in range(0, len(body), row)])
# Create our title using random data
title = ''.join(choice(str_alpha + str_num) for _ in range(title_len))
# iterate over our dictionary and test it out
for (url, meta) in TEST_URLS:
# Our expected instance
instance = meta.get('instance', None)
# Our expected server objects
self = meta.get('self', None)
# Our expected Query response (True, False, or exception type)
response = meta.get('response', True)
# Our expected Notify response (True or False)
notify_response = meta.get('notify_response', response)
# Our expected Notify Attachment response (True or False)
attach_response = meta.get('attach_response', notify_response)
# Our expected privacy url
# Don't set this if don't need to check it's value
privacy_url = meta.get('privacy_url')
# Our regular expression
url_matches = meta.get('url_matches')
# Test attachments
# Don't set this if don't need to check it's value
check_attachments = meta.get('check_attachments', True)
# Allow us to force the server response code to be something other then
# the defaults
requests_response_code = meta.get(
'requests_response_code',
requests.codes.ok if response else requests.codes.not_found,
)
# Allow us to force the server response text to be something other then
# the defaults
requests_response_text = meta.get('requests_response_text')
if not isinstance(requests_response_text, six.string_types):
# Convert to string
requests_response_text = dumps(requests_response_text)
# Allow notification type override, otherwise default to INFO
notify_type = meta.get('notify_type', NotifyType.INFO)
# Whether or not we should include an image with our request; unless
# otherwise specified, we assume that images are to be included
include_image = meta.get('include_image', True)
if include_image:
# a default asset
asset = AppriseAsset()
else:
# Disable images
asset = AppriseAsset(image_path_mask=False, image_url_mask=False)
asset.image_url_logo = None
test_requests_exceptions = meta.get(
'test_requests_exceptions', False)
# A request
robj = mock.Mock()
robj.content = u''
mock_get.return_value = robj
mock_post.return_value = robj
if test_requests_exceptions is False:
# Handle our default response
mock_post.return_value.status_code = requests_response_code
mock_get.return_value.status_code = requests_response_code
# Handle our default text response
mock_get.return_value.content = requests_response_text
mock_post.return_value.content = requests_response_text
mock_get.return_value.text = requests_response_text
mock_post.return_value.text = requests_response_text
# Ensure there is no side effect set
mock_post.side_effect = None
mock_get.side_effect = None
else:
# Handle exception testing; first we turn the boolean flag ito
# a list of exceptions
test_requests_exceptions = REQUEST_EXCEPTIONS
try:
obj = Apprise.instantiate(
url, asset=asset, suppress_exceptions=False)
if obj is None:
if instance is not None:
# We're done (assuming this is what we were expecting)
print("{} didn't instantiate itself "
"(we expected it to be a {})".format(url, instance))
assert False
continue
if instance is None:
# Expected None but didn't get it
print('%s instantiated %s (but expected None)' % (
url, str(obj)))
assert False
assert isinstance(obj, instance) is True
if isinstance(obj, plugins.NotifyBase):
# We loaded okay; now lets make sure we can reverse this url
assert isinstance(obj.url(), six.string_types) is True
# Test url() with privacy=True
assert isinstance(
obj.url(privacy=True), six.string_types) is True
# Some Simple Invalid Instance Testing
assert instance.parse_url(None) is None
assert instance.parse_url(object) is None
assert instance.parse_url(42) is None
if privacy_url:
# Assess that our privacy url is as expected
assert obj.url(privacy=True).startswith(privacy_url)
if url_matches:
# Assess that our URL matches a set regex
assert re.search(url_matches, obj.url())
# Instantiate the exact same object again using the URL from
# the one that was already created properly
obj_cmp = Apprise.instantiate(obj.url())
# Our object should be the same instance as what we had
# originally expected above.
if not isinstance(obj_cmp, plugins.NotifyBase):
# Assert messages are hard to trace back with the way
# these tests work. Just printing before throwing our
# assertion failure makes things easier to debug later on
print('TEST FAIL: {} regenerated as {}'.format(
url, obj.url()))
assert False
# Tidy our object
del obj_cmp
if self:
# Iterate over our expected entries inside of our object
for key, val in self.items():
# Test that our object has the desired key
assert hasattr(key, obj) is True
assert getattr(key, obj) == val
#
# Stage 1: with title defined
#
try:
if test_requests_exceptions is False:
# Disable throttling
obj.request_rate_per_sec = 0
# check that we're as expected
assert obj.notify(
body=body, title=title,
notify_type=notify_type) == notify_response
# check that this doesn't change using different overflow
# methods
assert obj.notify(
body=body, title=title,
notify_type=notify_type,
overflow=OverflowMode.UPSTREAM) == notify_response
assert obj.notify(
body=body, title=title,
notify_type=notify_type,
overflow=OverflowMode.TRUNCATE) == notify_response
assert obj.notify(
body=body, title=title,
notify_type=notify_type,
overflow=OverflowMode.SPLIT) == notify_response
#
# Handle varations of the Asset Object missing fields
#
# First make a backup
app_id = asset.app_id
app_desc = asset.app_desc
# now clear records
asset.app_id = None
asset.app_desc = None
# Notify should still work
assert obj.notify(
body=body, title=title,
notify_type=notify_type) == notify_response
# App ID only
asset.app_id = app_id
asset.app_desc = None
# Notify should still work
assert obj.notify(
body=body, title=title,
notify_type=notify_type) == notify_response
# App Desc only
asset.app_id = None
asset.app_desc = app_desc
# Notify should still work
assert obj.notify(
body=body, title=title,
notify_type=notify_type) == notify_response
# Restore
asset.app_id = app_id
asset.app_desc = app_desc
if check_attachments:
# Test single attachment support; even if the service
# doesn't support attachments, it should still
# gracefully ignore the data
attach = os.path.join(TEST_VAR_DIR, 'apprise-test.gif')
assert obj.notify(
body=body, title=title,
notify_type=notify_type,
attach=attach) == attach_response
# Same results should apply to a list of attachments
attach = AppriseAttachment((
os.path.join(TEST_VAR_DIR, 'apprise-test.gif'),
os.path.join(TEST_VAR_DIR, 'apprise-test.png'),
os.path.join(TEST_VAR_DIR, 'apprise-test.jpeg'),
))
assert obj.notify(
body=body, title=title,
notify_type=notify_type,
attach=attach) == attach_response
else:
# Disable throttling
obj.request_rate_per_sec = 0
for _exception in REQUEST_EXCEPTIONS:
mock_post.side_effect = _exception
mock_get.side_effect = _exception
try:
assert obj.notify(
body=body, title=title,
notify_type=NotifyType.INFO) is False
except AssertionError:
# Don't mess with these entries
raise
except Exception:
# We can't handle this exception type
raise
except AssertionError:
# Don't mess with these entries
raise
except Exception as e:
# Check that we were expecting this exception to happen
try:
if not isinstance(e, response):
raise e
except TypeError:
print('%s Unhandled response %s' % (url, type(e)))
raise e
#
# Stage 2: without title defined
#
try:
if test_requests_exceptions is False:
# check that we're as expected
assert obj.notify(body='body', notify_type=notify_type) \
== notify_response
else:
for _exception in REQUEST_EXCEPTIONS:
mock_post.side_effect = _exception
mock_get.side_effect = _exception
try:
assert obj.notify(
body=body,
notify_type=NotifyType.INFO) is False
except AssertionError:
# Don't mess with these entries
raise
except Exception:
# We can't handle this exception type
raise
except AssertionError:
# Don't mess with these entries
raise
except Exception as e:
# Check that we were expecting this exception to happen
if not isinstance(e, response):
raise e
# Tidy our object and allow any possible defined deconstructors to
# be executed.
del obj
except AssertionError:
# Don't mess with these entries
print('%s AssertionError' % url)
raise
except Exception as e:
# Handle our exception
if instance is None:
print('%s %s' % (url, str(e)))
raise e
if not isinstance(e, instance):
print('%s %s' % (url, str(e)))
raise e | 4,198 |
def _transform_collaborators(collaborators: Dict, repo_url: str, transformed_collaborators: Dict) -> None:
"""
Performs data adjustments for outside collaborators in a GitHub repo.
Output data shape = [{permission, repo_url, url (the user's URL), login, name}, ...]
:param collaborators: See cartography.tests.data.github.repos for data shape.
:param repo_url: The URL of the GitHub repo.
:param transformed_collaborators: Output dict. Data shape =
{'ADMIN': [{ user }, ...], 'MAINTAIN': [{ user }, ...], 'READ': [ ... ], 'TRIAGE': [ ... ], 'WRITE': [ ... ]}
:return: Nothing.
"""
# `collaborators` is sometimes None
if collaborators:
for idx, user in enumerate(collaborators['nodes']):
user_permission = collaborators['edges'][idx]['permission']
user['repo_url'] = repo_url
transformed_collaborators[user_permission].append(user) | 4,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.