content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def remove_element(list, remove):
"""[summary]
Args:
list ([list]): [List of objects]
remove ([]): [What element to remove]
Returns:
[list]: [A new list where the element has been removed]
"""
for object in list:
if object._id == remove[0]:
list.remove(object)
return list | 65a9fe296a6d8369127003c33f58022ededfdcba | 705,350 |
from typing import Union
from typing import Mapping
from typing import Iterable
from typing import Tuple
from typing import Any
def update_and_return_dict(
dict_to_update: dict, update_values: Union[Mapping, Iterable[Tuple[Any, Any]]]
) -> dict:
"""Update a dictionary and return the ref to the dictionary that was updated.
Args:
dict_to_update (dict): the dict to update
update_values (Union[Mapping, Iterable[Tuple[Any, Any]]]): the values to update the dict
with
Returns:
dict: the dict that was just updated.
"""
dict_to_update.update(update_values)
return dict_to_update | 8622f96a9d183c8ce5c7f260e97a4cb4420aecc7 | 705,351 |
def get_unity_snapshotschedule_parameters():
"""This method provide parameters required for the ansible snapshot
schedule module on Unity"""
return dict(
name=dict(type='str'),
id=dict(type='str'),
type=dict(type='str', choices=['every_n_hours', 'every_day',
'every_n_days', 'every_week',
'every_month']),
interval=dict(type='int'),
hours_of_day=dict(type='list', elements='int'),
day_interval=dict(type='int'),
days_of_week=dict(type='list', elements='str',
choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY',
'THURSDAY', 'FRIDAY', 'SATURDAY']),
day_of_month=dict(type='int'),
hour=dict(type='int'),
minute=dict(type='int'),
desired_retention=dict(type='int'),
retention_unit=dict(type='str', choices=['hours', 'days'],
default='hours'),
auto_delete=dict(type='bool'),
state=dict(required=True, type='str', choices=['present', 'absent'])
) | a25cb6c62a0a69f2586135677802309e033d86bc | 705,352 |
import sys
def keyboard_interrupt(func):
"""Decorator to be used on a method to check if there was a keyboard interrupt error that was raised."""
def wrap(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except KeyboardInterrupt:
self.close() # this will close the visualizer if necessary
sys.exit(0)
return wrap | 1914924986c278bb919274b746ce13fb718268e8 | 705,353 |
def cutoff_depth(d: int):
"""A cutoff function that searches to depth d."""
return lambda game, state, depth: depth > d | af7396a92f1cd234263e8448a6d1d22b56f4a12c | 705,354 |
def rgb_to_hex(red, green, blue):
"""Return color as #rrggbb for the given RGB color values."""
return '#%02x%02x%02x' % (int(red), int(green), int(blue)) | 7523bcb4b7a033655c9f5059fcf8d0ed656502c8 | 705,356 |
def _is_swiftmodule(path):
"""Predicate to identify Swift modules/interfaces."""
return path.endswith((".swiftmodule", ".swiftinterface")) | 085fa4f8735ce371927f606239d51c44bcca5acb | 705,357 |
def get_desc_dist(descriptors1, descriptors2):
""" Given two lists of descriptors compute the descriptor distance
between each pair of feature. """
#desc_dists = 2 - 2 * (descriptors1 @ descriptors2.transpose())
desc_sims = - descriptors1 @ descriptors2.transpose()
# desc_sims = desc_sims.astype('float64')
# # Weight the descriptor distances
# desc_sims = np.exp(desc_sims)
# desc_sims /= np.sum(desc_sims, axis=1, keepdims=True)
# desc_sims = 1 - desc_sims*desc_sims
#desc_dist = np.linalg.norm(descriptors1[:, None] - descriptors2[None], axis=2)
#desc_dist = 2 - 2 * descriptors1 @ descriptors2.transpose()
return desc_sims | 2baea3bfa01b77765ec3ce95fd9a6be742783420 | 705,359 |
import os
def get_mem_usage():
"""returns percentage and vsz mem usage of this script"""
pid = os.getpid()
psout = os.popen( "ps -p %s u"%pid ).read()
parsed_psout = psout.split("\n")[1].split()
return float(parsed_psout[3]), int( parsed_psout[4] ) | 9d0060f435a1fb0d77a31ce946d7e46ffb7b4762 | 705,360 |
import os
import requests
def download_file(url, local_folder=None):
"""Downloads file pointed to by `url`.
If `local_folder` is not supplied, downloads to the current folder.
"""
filename = os.path.basename(url)
if local_folder:
filename = os.path.join(local_folder, filename)
# Download the file
print("Downloading: " + url)
response = requests.get(url, stream=True)
if response.status_code != 200:
raise Exception("download file failed with status code: %d, fetching url '%s'" % (response.status_code, url))
# Write the file to disk
with open(filename, "wb") as handle:
handle.write(response.content)
return filename | 2229239b4c54c9ef7858b3013cb78d00e0ea2ae0 | 705,361 |
import csv
def simple_file_scan(reader, bucket_name, region_name, file_name):
""" Does an initial scan of the file, figuring out the file row count and which rows are too long/short
Args:
reader: the csv reader
bucket_name: the bucket to pull from
region_name: the region to pull from
file_name: name of the file to pull
Returns:
file_row_count: the number of lines in the file
short_rows: a list of row numbers that have too few fields
long_rows: a list of rows that have too many fields
"""
# Count file rows: throws a File Level Error for non-UTF8 characters
# Also getting short and long rows for formatting errors and pandas processing
temp_file = open(reader.get_filename(region_name, bucket_name, file_name), encoding='utf-8')
file_row_count = 0
header_length = 0
short_rows = []
long_rows = []
# Getting the delimiter
header_line = temp_file.readline()
delimiter = '|' if header_line.count('|') > header_line.count(',') else ','
temp_file.seek(0)
for line in csv.reader(temp_file, delimiter=delimiter):
if line:
file_row_count += 1
line_length = len(line)
# Setting the expected length for the file
if header_length == 0:
header_length = line_length
# All lines that are shorter than they should be
elif line_length < header_length:
short_rows.append(file_row_count)
# All lines that are longer than they should be
elif line_length > header_length:
long_rows.append(file_row_count)
try:
temp_file.close()
except AttributeError:
# File does not exist, and so does not need to be closed
pass
return file_row_count, short_rows, long_rows | ccd1aad870124a9b48f05bbe0d7fe510ae36bc33 | 705,363 |
import os
import sqlite3
def get_users_name(path):
"""
登録されているユーザ情報の回収
Parameters
----------
path : str
homeディレクトリまでのパス
Returns
-------
name_dict : dict
登録ユーザ情報の辞書
"""
path_db = os.path.join(path, 'data', 'list.db')
name_list = []
with sqlite3.connect(path_db) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('select * from miyano')
for row in cur:
d = (row['number'], row['name'])
name_list.append(d)
cur.close()
name_dict = dict(name_list)
return name_dict | 4a71b52a4dfa1e40eab62134795944b43a774a73 | 705,364 |
def get_rest_parameter_state(parameter_parsing_states):
"""
Gets the rest parameter from the given content if there is any.
Parameters
----------
parameter_parsing_states `list` of ``ParameterParsingStateBase``
The created parameter parser state instances.
Returns
-------
parameter_parsing_state : ``ParameterParsingState``, `None`
"""
for parameter_parsing_state in parameter_parsing_states:
if parameter_parsing_state.content_parser_parameter.is_rest:
return parameter_parsing_state | e90d1ee848af7666a72d9d0d4fb74e3fedf496fa | 705,365 |
import io
def read_all_files(filenames):
"""Read all files into a StringIO buffer."""
return io.StringIO('\n'.join(open(f).read() for f in filenames)) | efb2e3e8f35b2def5f1861ecf06d6e4135797ccf | 705,366 |
def path_inside_dir(path, directory):
"""
Returns True if the specified @path is inside @directory,
performing component-wide comparison. Otherwise returns False.
"""
return ((directory == "" and path != "")
or path.rstrip("/").startswith(directory.rstrip("/") + "/")) | 30ad431f9115addd2041e4b6c9c1c8c563b93fe9 | 705,367 |
def butterworth_type_filter(frequency, highcut_frequency, order=2):
"""
Butterworth low pass filter
Parameters
----------
highcut_frequency: float
high-cut frequency for the low pass filter
fs: float
sampling rate, 1./ dt, (default = 1MHz)
period:
period of the signal (e.g. 25Hz base frequency, 0.04s)
order: int
The order of the butterworth filter
Returns
-------
frequency, h: ndarray, ndarray
Filter values (`h`) at frequencies (`frequency`) are provided.
"""
# Nyquist frequency
h = 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** order
highcut_frequency = 300 * 1e3
h *= 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** 1
return h | f8ff570d209560d65b4ccc9fdfd2d26ec8a12d35 | 705,368 |
def coro1():
"""定义一个简单的基于生成器的协程作为子生成器"""
word = yield 'hello'
yield word
return word # 注意这里协程可以返回值了,返回的值会被塞到 StopIteration value 属性 作为 yield from 表达式的返回值 | 1bfcfb150748c002638d2c6536299025864ac1f6 | 705,369 |
def add_to_master_list(single_list, master_list):
"""This function appends items in a list to the master list.
:param single_list: List of dictionaries from the paginated query
:type single_list: list
:param master_list: Master list of dictionaries containing group information
:type master_list: list
:returns: The master list with the appended data
"""
for list_item in single_list:
master_list.append(list_item)
return master_list | 4b4e122e334624626c7db4f09278b44b8b141504 | 705,370 |
import os
import re
def get_sdkconfig_value(sdkconfig_file, key):
"""
Return the value of given key from sdkconfig_file.
If sdkconfig_file does not exist or the option is not present, returns None.
"""
assert key.startswith('CONFIG_')
if not os.path.exists(sdkconfig_file):
return None
# keep track of the last seen value for the given key
value = None
# if the value is quoted, this excludes the quotes from the value
pattern = re.compile(r"^{}=\"?([^\"]*)\"?$".format(key))
with open(sdkconfig_file, 'r') as f:
for line in f:
match = re.match(pattern, line)
if match:
value = match.group(1)
return value | d8f11dec3406d5fc166883d99bc3f42ca4eb6483 | 705,372 |
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start] + match.string[end:] | 6d34396c2d3c957d55dbef16c2673bb7f571205c | 705,373 |
def cubicgw(ipparams, width, etc = []):
"""
This function fits the variation in Gaussian-measured PRF half-widths using a 2D cubic.
Parameters
----------
x1: linear coefficient in x
x2: quadratic coefficient in x
x3: cubic coefficient in x
y1: linear coefficient in y
y2: quadratic coefficient in y
y3: cubic coefficient in y
c : constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2018-11-16 Kevin Stevenson, STScI
[email protected]
Original version
"""
x1 = ipparams[0]
x2 = ipparams[1]
x3 = ipparams[2]
y1 = ipparams[3]
y2 = ipparams[4]
y3 = ipparams[5]
c = ipparams[6]
s0 = ipparams[7]
sy, sx = width
return x1*(sx-s0) + x2*(sx-s0)**2 + x3*(sx-s0)**3 + y1*(sy-s0) + y2*(sy-s0)**2 + y3*(sy-s0)**3 + c | 334be9d8dc8baaddf122243e4f19d681efc707cf | 705,374 |
def get_columns_by_type(df, req_type):
"""
get columns by type of data frame
Parameters:
df : data frame
req_type : type of column like categorical, integer,
Returns:
df: Pandas data frame
"""
g = df.columns.to_series().groupby(df.dtypes).groups
type_dict = {k.name: v for k, v in g.items()}
return type_dict.get(req_type) | aeedea92fbfb720ca6e7a9cd9920827a6ad8c6b0 | 705,376 |
def get_total(lines):
"""
This function takes in a list of lines and returns
a single float value that is the total of a particular
variable for a given year and tech.
Parameters:
-----------
lines : list
This is a list of datalines that we want to total.
Returns:
--------
total : float
This is the sum total from the data lines.
"""
total = 0.0
for line in lines:
data_sep = line.split()
total += float(data_sep[0])
return total | 284f8061f3659999ae7e4df104c86d0077b384da | 705,377 |
def box(t, t_start, t_stop):
"""Box-shape (Theta-function)
The shape is 0 before `t_start` and after `t_stop` and 1 elsewhere.
Args:
t (float): Time point or time grid
t_start (float): First value of `t` for which the box has value 1
t_stop (float): Last value of `t` for which the box has value 1
Note:
You may use :class:`numpy.vectorize`, :func:`functools.partial`, or
:func:`qutip_callback`, cf. :func:`flattop`.
"""
if t < t_start:
return 0.0
if t > t_stop:
return 0.0
return 1.0 | 8f4f0e57323f38c9cfa57b1661c597b756e8c4e7 | 705,378 |
import json
import time
def sfn_result(session, arn, wait=10):
"""Get the results of a StepFunction execution
Args:
session (Session): Boto3 session
arn (string): ARN of the execution to get the results of
wait (int): Seconds to wait between polling
Returns:
dict|None: Dict of Json data or
None if there was an error getting the failure output
"""
client = session.client('stepfunctions')
while True:
resp = client.describe_execution(executionArn = arn)
if resp['status'] != 'RUNNING':
if 'output' in resp:
return json.loads(resp['output'])
else:
resp = client.get_execution_history(executionArn = arn,
reverseOrder = True)
event = resp['events'][0]
for key in ['Failed', 'Aborted', 'TimedOut']:
key = 'execution{}EventDetails'.format(key)
if key in event:
return event[key]
return None
else:
time.sleep(wait) | ba8a80e81aa5929360d5c9f63fb7dff5ebaf91f3 | 705,379 |
def RoleAdmin():
"""超级管理员"""
return 1 | 78a4fce55fa0fb331c0274c23213ae72afe7184f | 705,381 |
def run_program(intcodes):
"""run intcodes, which are stored as a dict of step: intcode pairs"""
pc = 0
last = len(intcodes) - 1
while pc <= last:
if intcodes[pc] == 1:
# add
if pc + 3 > last:
raise Exception("out of opcodes")
arg1 = intcodes[pc + 1]
arg2 = intcodes[pc + 2]
dest = intcodes[pc + 3]
intcodes[dest] = intcodes[arg1] + intcodes[arg2]
pc += 4
elif intcodes[pc] == 2:
# multiply
if pc + 3 > last:
raise Exception("out of opcodes")
arg1 = intcodes[pc + 1]
arg2 = intcodes[pc + 2]
dest = intcodes[pc + 3]
intcodes[dest] = intcodes[arg1] * intcodes[arg2]
pc += 4
elif intcodes[pc] == 99:
# end program
return intcodes
else:
# invalid
raise Exception("invalid opcode: {}".format(intcodes[pc]))
# should never reach this point (only if end is reached before program
# stop instruction)
raise Exception("ran out of intcodes before program stop reached") | e87343483abddffd9508be6da7814abcbcd59a79 | 705,382 |
def to_numpy(tensor):
"""Convert 3-D torch tensor to a 3-D numpy array.
Args:
tensor: Tensor to be converted.
"""
return tensor.transpose(0, 1).transpose(1, 2).clone().numpy() | 034e016caccdf18e8e33e476673884e2354e21c7 | 705,383 |
def decorator(IterativeReconAlg, name=None, docstring=None):
"""
Calls run_main_iter when parameters are given to it.
:param IterativeReconAlg: obj, class
instance of IterativeReconAlg
:param name: str
for name of func
:param docstring: str
other documentation that may need to be included from external source.
:return: func
Examples
--------
>>> import tigre
>>> from tigre.demos.Test_data.data_loader import load_head_phantom
>>> geo = tigre.geometry_defaut(high_quality=False)
>>> src = load_head_phantom(number_of_voxels=geo.nVoxel)
>>> proj = Ax(src,geo,angles)
>>> angles = np.linspace(0,2*np.pi,100)
>>> iterativereconalg = decorator(IterativeReconAlg)
>>> output = iterativereconalg(proj,geo,angles, niter=50)
"""
def iterativereconalg(proj, geo, angles, niter, **kwargs):
alg = IterativeReconAlg(proj, geo, angles, niter, **kwargs)
if name is not None:
alg.name = name
alg.run_main_iter()
if alg.computel2:
return alg.getres(), alg.geterrors()
else:
return alg.getres()
if docstring is not None:
setattr(
iterativereconalg,
'__doc__',
docstring +
IterativeReconAlg.__doc__)
else:
setattr(iterativereconalg, '__doc__', IterativeReconAlg.__doc__)
if name is not None:
setattr(iterativereconalg, '__name__', name)
return iterativereconalg | 0c7224ea3d58c367d8b7519f7f8ba4d68c00076e | 705,384 |
import time
def wait_for_result(polling_function, polling_config):
"""
wait_for_result will periodically run `polling_function`
using the parameters described in `polling_config` and return the
output of the polling function.
Args:
polling_config (PollingConfig): The parameters to use to poll
the db.
polling_function (Callable[[], (bool, Any)]): The function being
polled. The function takes no arguments and must return a
status which indicates if the function was succesful or
not, as well as some return value.
Returns:
Any: The output of the polling function, if it is succesful,
None otherwise.
"""
if polling_config.polling_interval == 0:
iterations = 1
else:
iterations = int(polling_config.timeout // polling_config.polling_interval) + 1
for _ in range(iterations):
(status, result) = polling_function()
if status:
return result
time.sleep(polling_config.polling_interval)
if polling_config.strict:
assert False
return None | 663f23b3134dabcf3cc3c2f72db33d09ca480555 | 705,386 |
def file_version_summary(list_of_files):
"""
Given the result of list_file_versions, returns a list
of all file versions, with "+" for upload and "-" for
hide, looking like this:
['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg']
"""
return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files] | 8ca8e75c3395ea13c6db54149b12e62f07aefc13 | 705,387 |
def make_players(data, what_to_replace_null_data_with):
"""
1. feature selection
2. replacing null values
:param data:
:param what_to_replace_null_data_with: accepted values: "1", "mean", "median"
:return: players
"""
players = data[["Overall", "Potential", "Position", "Skill Moves", "Crossing", "Finishing",
"HeadingAccuracy", "ShortPassing", "Volleys",
"Dribbling", "Curve", "FKAccuracy", "LongPassing", "BallControl",
"Acceleration", "SprintSpeed", "Agility", "Reactions",
"Balance", "ShotPower", "Jumping", "Stamina", "Strength",
"LongShots", "Aggression", "Interceptions", "Positioning",
"Vision", "Penalties", "Composure", "Marking",
"StandingTackle", "SlidingTackle",
"GKDiving", "GKHandling", "GKKicking", "GKPositioning",
"GKReflexes"]]
for col in players:
if col != "Position":
if what_to_replace_null_data_with == "1":
players[col].fillna(1, inplace=True)
elif what_to_replace_null_data_with == "mean":
players[col].fillna(players[col].mean(), inplace=True)
elif what_to_replace_null_data_with == "median":
players[col].fillna(players[col].median(), inplace=True)
else:
raise ValueError("Invalid value for second parameter")
# drop 60 NA positions from dataframe
players = players.dropna()
return players | 081e563f475e7e05caf3761954646b8a35ec8e54 | 705,388 |
def pwm_to_duty_cycle(pulsewidth_micros, pwm_params):
"""Converts a pwm signal (measured in microseconds)
to a corresponding duty cycle on the gpio pwm pin
Parameters
----------
pulsewidth_micros : float
Width of the pwm signal in microseconds
pwm_params : PWMParams
PWMParams object
Returns
-------
float
PWM duty cycle corresponding to the pulse width
"""
return int(pulsewidth_micros / 1e6 * pwm_params.freq * pwm_params.range) | e627b84bf7e01f3d4dcb98ec94271cd34249fb23 | 705,389 |
def decode(codes, alphabet):
""" Converts one-hot encodings to string
Parameters
----------
code : torch.Tensor
One-hot encodings.
alphabet : Alphabet
Matches one-hot encodings to letters.
Returns
-------
genes : list of Tensor
List of proteins
others : list of Tensor
List of proteins
states : list of Tensor
List of alignment state strings
dm : torch.Tensor
B x N x M dimension matrix with padding.
"""
s = list(map(lambda x: alphabet[int(x)], codes))
return ''.join(s) | 79ff69034293a8fb7d005ec89c98ae5e7535e487 | 705,390 |
import re
def getNormform_space(synonym):
"""
"""
return re.sub("[^a-z0-9]", " ", synonym.lower()) | 5e03a89ca25cb5b4ae9a76ef9fb44c213a043cbd | 705,391 |
import re
def rename_leaves_taxids(tree):
"""
Rename the leaf nodes with just the NCBI taxonomy ID if we have it
:param tree: the tree to rename
:return: the tree with renamed leaves
"""
for n in tree.get_leaves():
m = re.search(r'\[(\d+)\]', n.name)
if m:
n.name = m.groups()[0]
return tree | 26b55177b1e9372ff58f3a79ab703c639661551c | 705,392 |
from typing import Dict
def remove_none_dict(input_dict: Dict) -> Dict:
"""
removes all none values from a dict
:param input_dict: any dictionary in the world is OK
:return: same dictionary but without None values
"""
return {key: value for key, value in input_dict.items() if value is not None} | 3f91d653a680f0f9d842ab44cbbb9ea4142c12ab | 705,393 |
import os
def is_pro():
"""Check if working in PRO"""
return os.environ.get("VTASKS_ENV", "False") == "True" | e193f5d6e4c24d57a2903fcb5714d5f7a8473fcb | 705,394 |
import os
def pick_projects(directory):
"""
Finds all subdirectories in directory containing a .json file
:param directory: string containing directory of subdirectories to search
:return: list projects found under the given directory
"""
ext = '.json'
subs = [x[0] for x in os.walk(directory)]
projects = []
for sub in subs:
files = []
for f in os.listdir(sub):
if f.endswith(ext):
files.append(f)
if len(files) > 0:
sizes = [os.stat(os.path.join(sub, pick)).st_size for pick in files]
max_size = max(sizes)
index = sizes.index(max_size)
projects.append(os.path.join(sub, files[index]))
return projects | 577668e5f7729bb3fcfa39398b7fade9475fefbe | 705,395 |
def inject_where(builder):
"""
helper function to append to the query the generated where clause
:param builder: the current builder
:return:
"""
query = builder.v.query
if callable(query):
return builder
lower = query.lower()
where = lower.find(' where ')
before = -1
for before_q in [' group by', ' order by', ' limit', ' offset']:
before = lower.find(before_q)
if before >= 0:
break
if where >= 0:
if before < 0:
builder.append(' {and_where}')
else:
builder.query('{} {{and_where}} {}'.format(query[:before], query[before:]))
builder.replace('and_where')
query = builder.v.query
builder.query('{} {{joins}} {}'.format(query[:where], query[where:]))
builder.replace('joins')
else:
if before < 0:
builder.append('{joins} {where}')
else:
builder.query('{} {{joins}} {{where}} {}'.format(query[:before], query[before:]))
builder.replace('where')
builder.replace('joins')
return builder | 78682f5c3712ffcb9e96a8c7c624d6f0177884ec | 705,396 |
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print("Program to find the character from an input ASCII value.")
ascii_val = int(input("Enter ASCII value to find character: "))
print("\nASCII {asci} in character is \"{char}\""
.format(asci=ascii_val, char=chr(ascii_val)))
return 0 | 45bec0eb658cc17005b97e6fa812c806f5b77440 | 705,397 |
def requires_ids_or_filenames(method):
"""
A decorator for spectrum library methods that require either a list of Ids or a list of filenames.
:param method:
A method belonging to a sub-class of SpectrumLibrary.
"""
def wrapper(model, *args, **kwargs):
have_ids = ("ids" in kwargs) and (kwargs["ids"] is not None)
have_filenames = ("filenames" in kwargs) and (kwargs["filenames"] is not None)
assert have_ids or have_filenames, "Must supply a list of Ids or a list of filenames"
assert not (have_ids and have_filenames), "Must supply either a list of Ids or a list of filenames, not both."
# If a single Id is supplied, rather than a list of Ids, turn it into a one-entry tuple
if have_ids and not isinstance(kwargs["ids"], (list, tuple)):
kwargs["ids"] = (kwargs["ids"],)
# If a single filename is supplied, turn it into a one-entry tuple
if have_filenames and not isinstance(kwargs["filenames"], (list, tuple)):
kwargs["filenames"] = (kwargs["filenames"],)
return method(model, *args, **kwargs)
return wrapper | df4cb705f11567e8e5a23da730aead8e7c90f378 | 705,398 |
import io
import tokenize
def remove_comments_and_docstrings(source):
"""
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
"""
io_obj = io.StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out | ffd185fc2517342e9eb0e596c431838009befde5 | 705,399 |
def prettify_name_tuple(tup):
""" Processes the intersect tuples from the steam API. """
res = []
for name in tup:
res.append(name.split("_")[0])
return ", ".join(res) | 68d9e7170f02cf4a5de434806e7abcd99e5a77e7 | 705,400 |
import sys
def open_file(file_name):
""" Opens a comma separated CSV file
Parameters
----------
file_name: string
The path to the CSV file.
Returns:
--------
Output: the opened file
"""
# Checks for file not found and perrmission errors
try:
f = open(file_name, 'r')
except FileNotFoundError:
print("Couldn't find file " + file_name)
sys.exit(3)
except PermissionError:
print("Couldn't access file " + file_name)
sys.exit(4)
# opens the file
f = open(file_name, 'r', encoding="ISO-8859-1")
return(f) | 21e3abe90fbfb169568ef051fa3f130cc7f1315a | 705,401 |
def parse_data_name(line):
"""
Parses the name of a data item line, which will be used as an attribute name
"""
first = line.index("<") + 1
last = line.rindex(">")
return line[first:last] | 53a9c7e89f5fa5f47dad6bfc211d3de713c15c67 | 705,402 |
def api_error(api, error):
"""format error message for api error, if error is present"""
if error is not None:
return "calling: %s: got %s" % (api, error)
return None | a9269a93d51e3203646886a893998ffec6488c95 | 705,403 |
def build_template(ranges, template, build_date, use_proxy=False, redir_target=""):
"""
Input: output of process_<provider>_ranges(), output of get_template()
Output: Rendered template string ready to write to disk
"""
return template.render(
ranges=ranges["ranges"],
header_comments=ranges["header_comments"],
build_date=build_date,
use_proxy=use_proxy,
redir_target=redir_target,
) | ec10897cb6f92e2b927f4ef84511a7deab8cd37d | 705,405 |
import textwrap
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip()) | 8d392daede103cb2a871b94d415c705fa51d7cef | 705,406 |
def returns_normally(expr):
"""For use inside `test[]` and its sisters.
Assert that `expr` runs to completion without raising or signaling.
Usage::
test[returns_normally(myfunc())]
"""
# The magic is, `test[]` lifts its expr into a lambda. When the test runs,
# our arg gets evaluated first, and then its value is passed to us.
#
# To make the test succeed whenever `unpythonic.syntax.testingtools._observe`
# didn't catch an unexpected signal or exception in `expr`, we just ignore
# our arg, and:
return True | 469447c704247f46a0cebf1b582c08d36f53383f | 705,407 |
import re
def finder(input, collection, fuzzy=False, accessor=lambda x: x):
"""
Args:
input (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the `input`.
fuzzy (bool): perform a fuzzy search (default=False)
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collection` using the `input`.
"""
suggestions = []
input = str(input) if not isinstance(input, str) else input
pat = input
if fuzzy:
pat = ".*?".join(map(re.escape, input))
regex = re.compile(pat, re.IGNORECASE)
for item in collection:
r = regex.search(accessor(item))
if r:
suggestions.append((len(r.group()), r.start(), accessor(item), item))
return (z[-1] for z in sorted(suggestions)) | 1bbe22f6b38f447f20071bd810cfee6e4e491f5f | 705,408 |
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path | 2d4714ec53304fb6cafabd5255a838b478780f8a | 705,409 |
def describe_inheritance_rule(rule):
"""
Given a dictionary representing a koji inheritance rule (i.e., one of the
elements of getInheritanceData()'s result), return a tuple of strings to be
appended to a module's stdout_lines array conforming to the output of
koji's taginfo CLI command, e.g.:
0 .... a-parent-tag
10 M... another-parent-tag
maxdepth: 1
100 .F.. yet-another-parent-tag
package filter: ^prefix-
"""
# koji_cli/commands.py near the end of anon_handle_taginfo()
flags = '%s%s%s%s' % (
'M' if rule['maxdepth'] not in ('', None) else '.',
'F' if rule['pkg_filter'] not in ('', None) else '.',
'I' if rule['intransitive'] else '.',
'N' if rule['noconfig'] else '.',
)
result = ["%4d %s %s" % (rule['priority'], flags, rule['name'])]
if rule['maxdepth'] not in ('', None):
result.append(" maxdepth: %d" % rule['maxdepth'])
if rule['pkg_filter'] not in ('', None):
result.append(" package filter: %s" % rule['pkg_filter'])
return tuple(result) | 32eae010365d8fd5b253f23acf8104932773c7c1 | 705,410 |
def find_colour(rgb):
"""Compare given rgb triplet to predefined colours to find the closest one"""
# this cannot normally happen to an image that is processed automatically, since colours
# are rbg by default, but it can happen if the function is called with invalid values
if rgb[0] < 0 or rgb[0] > 255 or rgb[1] < 0 or rgb[1] > 255 or rgb[2] < 0 or rgb[2] > 255:
return "part of the rgb triplet was invalid"
# dictionary of predefined colours
colours = {
(255, 0, 0): "red",
(255, 100, 100): "red",
(200, 100, 100): "red",
(150, 0, 0): "red",
(150, 50, 50): "red",
(50, 0, 0): "red",
(0, 255, 0): "green",
(100, 255, 100): "green",
(100, 200, 100): "green",
(0, 150, 0): "green",
(50, 150, 50): "green",
(0, 50, 0): "green",
(0, 0, 255): "blue",
(100, 100, 255): "blue",
(100, 100, 200): "blue",
(0, 0, 150): "blue",
(50, 50, 150): "blue",
(0, 0, 50): "blue",
(255, 255, 0): "yellow",
(255, 255, 100): "yellow",
(200, 200, 100): "yellow",
(150, 150, 0): "yellow",
(150, 150, 50): "yellow",
(50, 50, 0): "yellow",
(247, 248, 232): "yellow", # light yellow colour used on most of the map
(233, 231, 182): "yellow", # darker yellow used in some places
(255, 0, 255): "magenta",
(255, 100, 255): "magenta",
(200, 100, 200): "magenta",
(150, 0, 150): "magenta",
(150, 50, 150): "magenta",
(50, 0, 50): "magenta",
(0, 255, 255): "teal",
(100, 255, 255): "teal",
(100, 200, 200): "teal",
(0, 150, 150): "teal",
(50, 150, 150): "teal",
(0, 50, 50): "teal",
(232, 248, 248): "teal", # light blue-ish colour used for water in some places
(255, 255, 255): "white",
(0, 0, 0): "black"
}
# calculate euclidean distance to all of the predefined colours
# pick the closest one
# note: 30000 was arbitrarily chosen as a threshold for a "close enough" colour
# i.e. if a distance is greater than that it cannot reasonably be considered closest,
# even if it is the smallest distance, though it should be quite unlikely to happen,
# due to the number of predefined colours
min_dist = 30000
nearest_colour = ""
for colour in colours:
# euclidean distance
dist = pow((colour[0] - rgb[0]), 2) + pow((colour[1] - rgb[1]), 2) + pow(
(colour[2] - rgb[2]), 2)
if dist < min_dist:
min_dist = dist
nearest_colour = colours[colour]
# colour is considered gray if the r g b values are all within 10 of each other
gray = 1
differences = [abs(rgb[0] - rgb[1]), abs(rgb[1] - rgb[2]), abs(rgb[2] - rgb[1])]
for diff in differences:
if diff > 10:
gray = 0
if gray == 1 and rgb[0] != 0 and rgb[1] != 0 and rgb[2] != 0\
and rgb[0] != 255 and rgb[1] != 255 and rgb[2] != 255:
return "gray"
return nearest_colour | f2c6e2b7daa7fd45411376cfc64409486c18e252 | 705,411 |
def log_sum_exp(input, dim=None, keepdim=False):
"""Numerically stable LogSumExp.
Args:
input (Tensor)
dim (int): Dimension along with the sum is performed
keepdim (bool): Whether to retain the last dimension on summing
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
input = input.view(-1)
dim = 0
max_val = input.max(dim=dim, keepdim=True)[0]
output = max_val + (input - max_val).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
output = output.squeeze(dim)
return output | c9c867d9d81191922a56716dab128ea71821a638 | 705,412 |
def make_screen_hicolor(screen):
"""returns a screen to pass to MainLoop init
with 256 colors.
"""
screen.set_terminal_properties(256)
screen.reset_default_terminal_palette()
return screen | 1fa4ee36825ca9672af58332001463e5b804d171 | 705,413 |
def cached_object_arg_test(x):
"""takes a MyTestClass instance and returns a string"""
return str(x) | 5880976f0c74dc588b1b4e93cee349fee06473ee | 705,414 |
def adriatic_name(p, i, j, a):
""" Return the name for given parameters of Adriatic indices"""
#(j)
name1 = {1:'Randic type ',\
2:'sum ',\
3:'inverse sum ', \
4:'misbalance ', \
5:'inverse misbalance ', \
6:'min-max ', \
7:'max-min ', \
8:'symmetric division '}
# (i,a)
name2 = {(1, 0.5):'lor',\
(1,1):'lo', \
(1,2):'los', \
(2,-1):'in', \
(2, -0.5):'ir', \
(2, 0.5):'ro', \
(2,1):'', \
(2,2):'s', \
(3, 0.5):'ha', \
(3,2):'two'}
#(p)
name3 = {0: 'deg', 1: 'di'}
return (name1[j] + name2[(i, a)] + name3[p]) | d08ed926d80aa19326ab4548288a0b9cb02737e4 | 705,415 |
def gcd(number1: int, number2: int) -> int:
"""Counts a greatest common divisor of two numbers.
:param number1: a first number
:param number2: a second number
:return: greatest common divisor"""
number_pair = (min(abs(number1), abs(number2)), max(abs(number1), abs(number2)))
while number_pair[0] > 0:
number_pair = (number_pair[1] % number_pair[0], number_pair[0])
return number_pair[1] | 9f22c315cc23e2bbf954f06d416a2c44f95ddbb7 | 705,416 |
def get_overlapping_timestamps(timestamps: list, starttime: int, endtime: int):
"""
Find the timestamps in the provided list of timestamps that fall between starttime/endtime. Return these timestamps
as a list. First timestamp in the list is always the nearest to the starttime without going over.
Parameters
----------
timestamps
list of timestamps we want to pull from, to get the timestamps between starttime and endtime
starttime
integer utc timestamp in seconds
endtime
integer utc timestamp in seconds
Returns
-------
list
list of timestamps that are within the starttime/endtime range
"""
final_timestamps = []
# we require a starting time stamp that is either less than the given starttime or no greater than
# the given starttime by 60 seconds
buffer = 60
starting_timestamp = None
for tstmp in timestamps: # first pass, find the nearest timestamp (to starttime) without going over the starttime
if tstmp < starttime + buffer:
if not starting_timestamp:
starting_timestamp = tstmp
elif (tstmp > starting_timestamp) and (tstmp <= starttime):
starting_timestamp = tstmp
if starting_timestamp is None:
# raise ValueError('VesselFile: Found no overlapping timestamps for range {} -> {}, within the available timestamps: {}'.format(starttime, endtime, timestamps))
return final_timestamps
starttime = starting_timestamp
final_timestamps.append(str(starttime))
for tstmp in timestamps: # second pass, append all timestamps that are between the starting timestamp and endtime
if (tstmp > starttime) and (tstmp <= endtime):
final_timestamps.append(str(tstmp))
return final_timestamps | 0ad6836d43d670f811b436e34887e159462c9ec1 | 705,417 |
import os
def get_field_h5files(sdir, prefix_dirs="ph"):
"""Return names of field h5 files in a directory
Parameters
----------
sdir: str
Path to the search directory
prefix_dirs: str
If no matching files are found in sdir, search
subdirectories whose name starts with this string.
Returns
-------
files: list of str
Paths to the found h5 files
Notes
-----
If DIR does not contain any h5 fields, then returns all h5 fields
in subdirectories that start with `prefix`.
This method ignores h5 files of the eps structure, i.e. h5 files
starting with "eps" are ignored.
"""
sdir = os.path.realpath(sdir)
files = os.listdir(sdir)
ffil = []
for f in files:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(sdir, f))
ffil.sort()
if len(ffil):
return ffil
else:
# go through subdirs
for df in files:
if (df.startswith(prefix_dirs) and
os.path.isdir(os.path.join(sdir, df))):
df = os.path.join(sdir, df)
sfiles = os.listdir(df)
for f in sfiles:
if f.endswith(".h5") and not f.startswith("eps"):
ffil.append(os.path.join(df, f))
ffil.sort()
return ffil | 4940a3ea642e477481ff926a7d5638d6af6e0120 | 705,418 |
def is_sequence(arg):
"""Check if an object is iterable (you can loop over it) and not a string."""
return not hasattr(arg, "strip") and hasattr(arg, "__iter__") | 466154b8ef9d19b53744187d44dc6cd172a70f62 | 705,419 |
def fix_stddev_function_name(self, compiler, connection):
"""
Fix function names to 'STDEV' or 'STDEVP' as used by mssql
"""
function = 'STDEV'
if self.function == 'STDDEV_POP':
function = 'STDEVP'
return self.as_sql(compiler, connection, function=function) | b1fa48801fb397590ad5fb249d928906e7c21c8a | 705,420 |
def process_hub_timeout(bit):
"""Return the HUB timeout."""
if bit == '1':
return '5 Seconds'
return '2 Seconds' | 64f11056a341d64d670e2e8c918a796c954854a1 | 705,422 |
def d_d_theta_inv(y, alpha):
"""
xi'(y) = 1/theta''(xi(y)) > 0
= alpha / (1 - |y|)^2
Nikolova et al 2014, table 1, theta_2 and eq 5.
"""
assert -1 < y < 1 and alpha > 0
denom = 1 - abs(y)
return alpha / (denom*denom) | 8ed796a46021f64ca3e24972abcf70bd6f64976d | 705,424 |
def center_crop(img, crop_height, crop_width):
""" Crop the central part of an image.
Args:
img (ndarray): image to be cropped.
crop_height (int): height of the crop.
crop_width (int): width of the crop.
Return:
(ndarray): the cropped image.
"""
def get_center_crop_coords(height, width, crop_height, crop_width):
y1 = (height - crop_height) // 2
y2 = y1 + crop_height
x1 = (width - crop_width) // 2
x2 = x1 + crop_width
return x1, y1, x2, y2
height, width = img.shape[:2]
x1, y1, x2, y2 = get_center_crop_coords(
height, width, crop_height, crop_width)
return img[y1:y2, x1:x2, ...] | 6e5fafee8c34632b61d047e9eb66e9b08b5d0203 | 705,425 |
import os
def cmk_arn_value(variable_name):
"""Retrieve target CMK ARN from environment variable."""
arn = os.environ.get(variable_name, None)
if arn is None:
raise ValueError(
'Environment variable "{}" must be set to a valid KMS CMK ARN for examples to run'.format(
variable_name
)
)
if arn.startswith("arn:") and ":alias/" not in arn:
return arn
raise ValueError("KMS CMK ARN provided for examples must be a key not an alias") | d235ad95c050bd68d8428d6c0d737ad394a5d1ea | 705,426 |
def approximated_atmo_spectrum(energy):
"""Gives an approximated atmospheric neutrino spectrum.
Can be used for comparing expected true energy distribution to recorded
energy proxy distributions. It is normalised such that the weight for an
energy of 1 is equal to 1. (It is agnostic to energy units)
:param energy: True neutrino energy (in some consistent unit)
:return: Spectrum weight for that energy
"""
return energy**-3.7 | 86768b5ba0bd31ef19a89dfe27af6c793492daa7 | 705,427 |
def obj_size_avg_residual(coeffs, avg_size, class_id):
"""
:param coeffs: object sizes
:param size_template: dictionary that saves the mean size of each category
:param class_id: nyu class id.
:return: size residual ground truth normalized by the average size
"""
size_residual = (coeffs - avg_size[class_id]) / avg_size[class_id]
return size_residual | 8d44d4ebf273baf460195ec7c6ade58c8d057025 | 705,428 |
def _get_chromosome_dirs(input_directory):
"""Collect chromosome directories"""
dirs = []
for d in input_directory.iterdir():
if not d.is_dir():
continue
# Just in case user re-runs and
# does not delete output files
elif d.name == 'logs':
continue
elif d.name == 'p_distance_output':
continue
else:
dirs.append(d)
return dirs | 5047c0c158f11794e312643dbf7d307b381ba59f | 705,429 |
def format_imports(import_statements):
"""
-----
examples:
@need
from fastest.constants import TestBodies
@end
@let
import_input = TestBodies.TEST_STACK_IMPORTS_INPUT
output = TestBodies.TEST_STACK_IMPORTS_OUTPUT
@end
1) format_imports(import_input) -> output
-----
:param import_statements: list
:return: list
"""
return [
'{}\n'.format(import_statement.strip())
for import_statement in import_statements
if len(import_statement) > 0
] | 91514d19da4a4dab8c832e6fc2d3c6cbe7cca04a | 705,430 |
import mpmath
def sf(k, r, p):
"""
Survival function of the negative binomial distribution.
Parameters
----------
r : int
Number of failures until the experiment is stopped.
p : float
Probability of success.
"""
with mpmath.extradps(5):
k = mpmath.mpf(k)
r = mpmath.mpf(r)
p = mpmath.mpf(p)
return mpmath.betainc(k + 1, r, 0, p, regularized=True) | d836e2cd762c5fa2be11d83aae31ba5d8589b3f0 | 705,431 |
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring) | 891e3a828b496467c201ad14e0540abf235d7e64 | 705,432 |
def extract_signals(data, fs, segmentation_times):
"""
Signal that given the set of segmentation times, extract the signal from the raw trace.
Args:
data : Numpy
The input seismic data containing both, start and end times of the seismic data.
fs : float
The sampling frequency.
segmentation_times : list
A list containing the segmentation of the file
Returns:
List
A list containing the extracted signals.
"""
signals = []
durations = []
for m in segmentation_times:
segmented = data[int(m[0] * fs): int(m[1] * fs)]
signals.append(segmented)
durations.append(segmented.shape[0]/float(fs))
return signals, durations | 81ff3d0b343dbba218eb5d2d988b8ca20d1a7209 | 705,433 |
import math
def cos_d(x:int)->float:
"""
This function takes in input in radians and returns the
computed derivaive of cos which is -sin.
"""
return -math.sin(x) | e8a0ba95d6a53d8c88bfa867dd423e23eb782911 | 705,434 |
def access(path, mode):
"""Use the real uid/gid to test for access to path.
:type path: bytes | unicode
:type mode: int
:rtype: bool
"""
return False | c0baab44d63bf354e4da9e5d0048d8b05c1f8040 | 705,435 |
def follow_card(card, deck_size, shuffles, shuffler):
"""Follow position of the card in deck of deck_size during shuffles."""
position = card
for shuffle, parameter in shuffles:
shuffling = shuffler(shuffle)
position = shuffling(deck_size, position, parameter)
return position | 10774bd899afde0d64cbf800bc3dad1d86543022 | 705,437 |
import tempfile
def get_secure_directory():
"""get a temporary secure sub directory"""
temp_dir = tempfile.mkdtemp(suffix='',prefix='')
return temp_dir | 08fb9587a2d17778e9a733312b08b504d6aff7bd | 705,438 |
def area(shape):
"""Multimethod dispatch key"""
return shape.get('type') | 0561d97ad21afdda160bd3063948e884a5c02945 | 705,439 |
import torch
def besseli(X, order=0, Nk=64):
""" Approximates the modified Bessel function of the first kind,
of either order zero or one.
OBS: Inputing float32 can lead to numerical issues.
Args:
X (torch.tensor): Input (N, 1).
order (int, optional): 0 or 1, defaults to 0.
Nk (int, optional): Terms in summation, higher number, better approximation.
Defaults to 50.
Returns:
I (torch.tensor): Modified Bessel function of the first kind (N, 1).
See also:
https://mathworld.wolfram.com/ModifiedBesselFunctionoftheFirstKind.html
"""
device = X.device
dtype = X.dtype
if len(X.shape) == 1:
X = X[:, None]
N = X.shape[0]
else:
N = 1
# Compute factorial term
X = X.repeat(1, Nk)
K = torch.arange(0, Nk, dtype=dtype, device=device)
K = K.repeat(N, 1)
K_factorial = (K + 1).lgamma().exp()
if order == 0:
# ..0th order
i = torch.sum((0.25 * X ** 2) ** K / (K_factorial ** 2), dim=1, dtype=torch.float64)
else:
# ..1st order
i = torch.sum(
0.5 * X * ((0.25 * X ** 2) ** K /
(K_factorial * torch.exp(torch.lgamma(K + 2)))), dim=1, dtype=torch.float64)
return i | 5233398b240244f13af595088077b8e43d2a4b2f | 705,440 |
import random
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
line, text = '', ''
# Iterating 200 time to pick up random keys and values
for count in range(0, 200):
key = random.choice(list(mimic_dict.keys()))
val = mimic_dict.get(key)
line += ('{} {} '.format(key, random.choice(val)))
# print 70 columns per line
if len(line) > 70:
text += line + '\n'
line = ''
print(text)
return True | 61dea92175feff7cb3e3744460ccf692cfc18ca7 | 705,441 |
def check_grid_side(ctx, param, value: int) -> int:
"""
check the size of the grid
:type value: int
"""
if value < 5:
raise ValueError("all sides of grid must be at least 5")
return value | 9e1403ca90c8f0716e10248b418ca59fe501c0c4 | 705,442 |
def min_rank(series, ascending=True):
"""
Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`).
"""
ranks = series.rank(method="min", ascending=ascending)
return ranks | a772618570517a324a202d4803983240cb54396b | 705,443 |
def algorithms():
"""Get a list of the names of the available stemming algorithms.
The only algorithm currently supported is the "english", or porter2,
algorithm.
"""
return ['english'] | d09bef4090fbca1729a25784d7befdb8a436bfa6 | 705,444 |
import yaml
def load_yaml(filepath):
"""Import YAML config file."""
with open(filepath, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc) | e1ec81bf36d293788303e4b3379e45ecdfb38dc0 | 705,445 |
import json
import logging
def parse_json(json_path):
"""
parser JSON
Args:
json_path: input json file path
Returns:
json_dict: parser json dict result
"""
try:
with open(json_path) as json_file:
json_dict = json.load(json_file)
except Exception:
logging.error("json file load error !")
else:
return json_dict | 4bb9b14d3a751451dd2a75da9b60a355934ffa65 | 705,446 |
def _get_ex_msg(obj):
""" Get exception message """
return obj.value.message if hasattr(obj, 'value') else obj.message | be7be0657afab2fe1daba174c441d18f12e78355 | 705,447 |
def _validate_isofactor(isofactor, signed):
""" [Docstring]
"""
if isofactor[0] == 0.0:
return (False, "Error: 'isovalue' cannot be zero")
if isofactor[1] <= 1.0:
return (False, "Error: 'factor' must be greater than one")
if not signed and isofactor[0] < 0:
return (False, "Error: Negative 'isovalue' in absolute "
"thresholding mode")
return (True, "") | 7b4a4faf3671fdee364cdae41e178f8e6a0453b8 | 705,448 |
def count_possibilities(dic):
"""
Counts how many unique names can be created from the
combinations of each lists contained in the passed dictionary.
"""
total = 1
for key, value in dic.items():
total *= len(value)
return total | 856eee9bac0ddf3dbc7b714bb26fe6d4f003ef95 | 705,449 |
def get_dgs(align_dg_dict):
"""
Function that creates inverse dictionary of align_dg_dict
align_dg_dict: dict. Dictionary of alignments and clustering DG assignments
Returns dg_align_dict: dict, k=dg_id, v=[alignids]
align_dg_dict comes from get_spectral(graph) or get_cliques(graph)
"""
dgs_list = set(align_dg_dict.values()) #list of all duplex groups
dg_align_dict = {}
for dg in dgs_list:
dg_align_list =[x for (x,y) in align_dg_dict.items() if y == dg]
dg_align_dict[dg] = dg_align_list
return dg_align_dict
#test case: | 85bca47657c83d2b308d38f05d1c88d9a78fa448 | 705,451 |
from typing import Optional
def parse_options(dict_in: Optional[dict], defaults: Optional[dict] = None):
"""
Utility function to be used for e.g. kwargs
1) creates a copy of dict_in, such that it is safe to change its entries
2) converts None to an empty dictionary (this is useful, since empty dictionaries cant be argument defaults)
3) optionally, sets defaults, if keys are not present
Parameters
----------
dict_in
defaults
Returns
-------
"""
if dict_in is None:
dict_in = {}
else:
dict_in = dict_in.copy()
if defaults:
for key in defaults:
dict_in.setdefault(key, defaults[key])
return dict_in | d679539ba29f4acab11f5db59c324473a2e24cc6 | 705,452 |
def GetMaxHarmonic( efit ):
"""Determine highest-order of harmonic amplitudes in an ellipse-fit object"""
# We assume that columns named "ai3_err", "ai4_err", "ai5_err", etc.
# exist, up to "aiM_err", where M is the maximum harmonic number
momentNums = [int(cname.rstrip("_err")[2:]) for cname in efit.colNames
if cname[:2] == "ai" and cname[-4:] == "_err"]
return max(momentNums) | e11645efa40ce3995788a05c8955d0d5a8804955 | 705,453 |
def no_op(loss_tensors):
"""no op on input"""
return loss_tensors | 317474aa2ed41668781042a22fb43834dc672bf2 | 705,455 |
import torch
def generic_fftshift(x,axis=[-2,-1],inverse=False):
"""
Fourier shift to center the low frequency components
Parameters
----------
x : torch Tensor
Input array
inverse : bool
whether the shift is for fft or ifft
Returns
-------
shifted array
"""
if len(axis) > len(x.shape):
raise ValueError('Not enough axis to shift around!')
y = x
for axe in axis:
dim_size = x.shape[axe]
shift = int(dim_size/2)
if inverse:
if not dim_size%2 == 0:
shift += 1
y = torch.roll(y,shift,axe)
return y | 8b5f84f0ed2931a3c1afac7c5632e2b1955b1cd5 | 705,457 |
def make_new_images(dataset, imgs_train, imgs_val):
"""
Split the annotations in dataset into two files train and val
according to the img ids in imgs_train, imgs_val.
"""
table_imgs = {x['id']:x for x in dataset['images']}
table_anns = {x['image_id']:x for x in dataset['annotations']}
keys = ['info', 'licenses', 'images', 'annotations', 'categories']
# Train
dataset_train = dict.fromkeys(keys)
dataset_train['info'] = dataset['info']
dataset_train['licenses'] = dataset['licenses']
dataset_train['categories'] = dataset['categories']
dataset_train['images'] = [table_imgs[x] for x in imgs_train]
dataset_train['annotations'] = [table_anns[x] for x in imgs_train]
# Validation
dataset_val = dict.fromkeys(keys)
dataset_val['info'] = dataset['info']
dataset_val['licenses'] = dataset['licenses']
dataset_val['categories'] = dataset['categories']
dataset_val['images'] = [table_imgs[x] for x in imgs_val]
dataset_val['annotations'] = [table_anns[x] for x in imgs_val]
return dataset_train, dataset_val | d5851974ad63caaadd390f91bdf395a4a6f1514d | 705,458 |
import tkinter as tk
from tkinter import filedialog
def select_file(title: str) -> str:
"""Opens a file select window and return the path to selected file"""
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(title=title)
return file_path | 59fdb7945389c2ba75d27e1fe20b596c4497bac1 | 705,459 |
def _loop_over(var):
""" Checks if a variable is in the form of an iterable (list/tuple)
and if not, returns it as a list. Useful for allowing argument
inputs to be either lists (e.g. [1, 3, 4]) or single-valued (e.g. 3).
Parameters
----------
var : int or float or list
Variable to check for iterability.
Returns
-------
var : list
Variable converted to list if single-valued input.
"""
if hasattr(var,"__iter__"):
return var
else:
return [var] | 254143646416af441d3858140b951b7854a0241c | 705,461 |
from typing import Any
from typing import List
from typing import Dict
def transform_database_account_resources(
account_id: Any, name: Any, resource_group: Any, resources: List[Dict],
) -> List[Dict]:
"""
Transform the SQL Database/Cassandra Keyspace/MongoDB Database/Table Resource response for neo4j ingestion.
"""
for resource in resources:
resource['database_account_name'] = name
resource['database_account_id'] = account_id
resource['resource_group_name'] = resource_group
return resources | dac566a1e09e1e395ff6fb78d6f8931a2bca58cb | 705,462 |
def find_max_1(array: list) -> int:
"""
O(n^2)
:param array: list of integers
:return: integer
"""
overallmax = array[0]
for i in array:
is_greatest = True
for j in array:
if j > i:
is_greatest = False
if is_greatest:
overallmax = i
return overallmax | 9bab28c3d72062af75ac5c2e19c1e9d87e6fc468 | 705,463 |
def file_size(value, fmt="{value:.1f} {suffix}", si=False):
"""
Takes a raw number of bytes and returns a humanized filesize.
"""
if si:
base = 1000
suffixes = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
else:
base = 1024
suffixes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
max_suffix_index = len(suffixes) - 1
for i, suffix in enumerate(suffixes):
unit = base ** (i + 1)
if value < unit or i == max_suffix_index:
return fmt.format(value=(base * value / unit), suffix=suffix) | 272250966c0d301a86a136a7e84af6049e9fe47f | 705,464 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.