content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def split_pkg(pkg):
"""nice little code snippet from isuru and CJ"""
if not pkg.endswith(".tar.bz2"):
raise RuntimeError("Can only process packages that end in .tar.bz2")
pkg = pkg[:-8]
plat, pkg_name = pkg.split("/")
name_ver, build = pkg_name.rsplit("-", 1)
name, ver = name_ver.rsplit("-", 1)
return plat, name, ver, build | 3568fc28c54e7de16e969be627804fbb80938d65 | 705,796 |
def coins(n, arr):
"""
Counting all ways e.g.: (5,1) and (1,5)
"""
# Stop case
if n < 0:
return 0
if n == 0:
return 1
ways = 0
for i in range(0, len(arr)):
ways += coins(n - arr[i], arr)
return ways | cb269db7aef58ae2368a6e6dc04ce6743ebd3d0e | 705,797 |
def compute_diff(old, new):
"""
Compute a diff that, when applied to object `old`, will give object
`new`. Do not modify `old` or `new`.
"""
if not isinstance(old, dict) or not isinstance(new, dict):
return new
diff = {}
for key, val in new.items():
if key not in old:
diff[key] = val
elif old[key] != val:
diff[key] = compute_diff(old[key], val)
for key in old:
if key not in new:
diff[key] = "$delete"
return diff | f6e7674faa2a60be17994fbd110f8e1d67eb9886 | 705,798 |
import os
def get_file_to_dict(fliepath,splitsign,name):
"""
读取对应路径的文件,如果没有则创建
返回dict,splitsign为分隔符
"""
if os.path.exists(fliepath+name+'.txt'):
dict = {}
with open(fliepath+name+'.txt',mode='r',encoding='utf-8') as ff:
try:
list = ff.read().splitlines()
for l in list:
s = str(l).split(splitsign,1)
dict[s[0].strip()] = s[1].strip()
except:
dict = {}
ff.close()
else:
with open(fliepath+name+'.txt', mode='w', encoding='utf-8') as ff:
dict = {}
ff.close()
return dict | 998daf4fa33453c30d6543febe275d46939113f3 | 705,799 |
def smtp_config_generator_str(results, key, inp):
"""
Set server/username config.
:param kwargs: Values. Refer to `:func:smtp_config_writer`.
:type kwargs: dict
:param key: Key for results dict.
:type key: str
:param inp: Input question.
:type inp: str
"""
if results[key] is None:
results[key] = input(inp)
return results | f2cccfaf569f005e03bb351379db85de7146eda0 | 705,800 |
def default_rollout_step(policy, obs, step_num):
"""
The default rollout step function is the policy's compute_action function.
A rollout step function allows a developer to specify the behavior
that will occur at every step of the rollout--given a policy
and the last observation from the env--to decide
what action to take next. This usually involves the rollout's
policy and may perform learning. It also, may involve using, updating,
or saving learning related state including hyper-parameters
such as epsilon in epsilon greedy.
You can provide your own function with the same signature as this default
if you want to have a more complex behavior at each step of the rollout.
"""
return policy.compute_action(obs) | a6e9dff784e46b9a59ae34334a027b427e8d230a | 705,801 |
import glob
def find_file(filename):
"""
This helper function checks whether the file exists or not
"""
file_list = list(glob.glob("*.txt"))
if filename in file_list:
return True
else:
return False | 42895e66e258ba960c890f871be8c261aec02852 | 705,802 |
import json
def _export_gene_set_pan_genome(meth, pan_genome_id):
"""Export orthologs from Pangenome as external FeatureSet objects. [26]
:param pan_genome_id: ID of pangenome object [26.1]
:type pan_genome_id: kbtypes.KBaseGenomes.Pangenome
:ui_name pan_genome_id: Pangenome ID
:return: Generated Compare Genome
:rtype: kbtypes.KBaseGenomes.Pangenome
:output_widget: kbasePanGenome
"""
meth.stages = 1 # for reporting progress
return json.dumps({'ws': meth.workspace_id, 'name': pan_genome_id, 'withExport': 'true'}) | 5dfc5a6d253a66cf7ff50c736f9fdaa43a2334a8 | 705,804 |
import time
def _get_token(cls, token_type):
"""
when token expire flush,return token value
"""
assert token_type in ['tenant_access_token', 'app_access_token'], token_type
if not hasattr(cls.request, token_type) or hasattr(cls.request, token_type) or\
time.time() >= getattr(cls.request, token_type)['expire']:
setattr(cls.request, token_type, getattr(cls, 'get_%s' % token_type)())
return getattr(cls.request, token_type)[token_type] | 15ee9c6926f929960b20ee68901f1d675f43639a | 705,805 |
def sort_by_directory(path):
"""returns 0 if path is a directory, otherwise 1 (for sorting)"""
return 1 - path.is_directory | 1cd066e69885901ae1b2b167feb061e98ed5f3ed | 705,806 |
def _finditem(obj, key):
"""
Check if giben key exists in an object
:param obj: dictionary/list
:param key: key
:return: value at the key position
"""
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item | 0f7c5b801acfae6a66d175163d726cba22380f7c | 705,807 |
def true_fov(M, fov_e=50):
"""Calulates the True Field of View (FOV) of the telescope & eyepiece pair
Args:
fov_e (float): FOV of eyepiece; default 50 deg
M (float): Magnification of Telescope
Returns:
float: True Field of View (deg)
"""
return fov_e/M | 7735135d326f3000ac60274972263a8a71648033 | 705,808 |
def _increment_inertia(centroid, reference_point, m, mass, cg, I):
"""helper method"""
if m == 0.:
return mass
(x, y, z) = centroid - reference_point
x2 = x * x
y2 = y * y
z2 = z * z
I[0] += m * (y2 + z2) # Ixx
I[1] += m * (x2 + z2) # Iyy
I[2] += m * (x2 + y2) # Izz
I[3] += m * x * y # Ixy
I[4] += m * x * z # Ixz
I[5] += m * y * z # Iyz
mass += m
cg += m * centroid
return mass | f95d8c01061243929fa1d3dd48903bedc938bbd8 | 705,809 |
def air_density(temp, patm, pw = 0):
"""
Calculates the density of dry air by means of the universal gas law as a
function of air temperature and atmospheric pressure.
m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)]
where:
Pd: Patm - Pw
Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K]
Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K]
T: air temperature [K]
m/V: density of air [kg/m³]
Parameters
----------
temp : float
Air temperature [K].
patm : float
Atmospheric pressure [Pa].
pw : float
Vapour pressure [Pa]. Default to 0 Pa (dry air).
Returns
-------
float
Air density [kg/m³].
"""
rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)]
pd = patm - pw
return (pd / (rd * temp)) + (pw / (rw * temp)) | 1af7afbf562fec105566a2c934f83c73f0be1173 | 705,812 |
def is_instrument_port(port_name):
"""test if a string can be a com of gpib port"""
answer = False
if isinstance(port_name, str):
ports = ["COM", "com", "GPIB0::", "gpib0::"]
for port in ports:
if port in port_name:
answer = not (port == port_name)
return answer | f45f47d35a9172264d0474502b0df883685071a0 | 705,813 |
import os
def get_anvil_path():
"""Gets the anvil/ path.
Returns:
The full path to the anvil/ source.
"""
return os.path.normpath(os.path.dirname(__file__)) | 2b8b0b0d99764634a5307f12864d85fe9b75ad57 | 705,814 |
def getblock(lst, limit):
"""Return first limit entries from list lst and remove them from the list"""
r = lst[-limit:]
del lst[-limit:]
return r | 8d230dec59fe00375d92b6c6a8b51f3e6e2d9126 | 705,815 |
import functools
import warnings
def ignore_python_warnings(function):
"""
Decorator for ignoring *Python* warnings.
Parameters
----------
function : object
Function to decorate.
Returns
-------
object
Examples
--------
>>> @ignore_python_warnings
... def f():
... warnings.warn('This is an ignored warning!')
>>> f()
"""
@functools.wraps(function)
def wrapped(*args, **kwargs):
"""
Wrapped function.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return function(*args, **kwargs)
return wrapped | 438e54fe927f787783175faacf4eb9608fd27cf0 | 705,816 |
def get_requirements(extra=None):
"""
Load the requirements for the given extra from the appropriate
requirements-extra.txt, or the main requirements.txt if no extra is
specified.
"""
filename = f"requirements-{extra}.txt" if extra else "requirements.txt"
with open(filename) as fp:
# Parse out as one per line
return [l.strip() for l in fp.readlines() if l.strip()] | 7ce9e348357925b7ff165ebd8f13300d849ea0ee | 705,817 |
def _format_mojang_uuid(uuid):
"""
Formats a non-hyphenated UUID into a whitelist-compatible UUID
:param str uuid: uuid to format
:return str: formatted uuid
Example:
>>> _format_mojang_uuid('1449a8a244d940ebacf551b88ae95dee')
'1449a8a2-44d9-40eb-acf5-51b88ae95dee'
Must have 32 characters:
>>> _format_mojang_uuid('1')
Traceback (most recent call last):
...
ValueError: Expected UUID to have 32 characters
"""
if len(uuid) != 32:
raise ValueError('Expected UUID to have 32 characters')
return uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:] | 517071b28f1e747091e2a539cd5d0b8765bebeba | 705,818 |
def generate_uri(graph_base, username):
"""
Args:
graph_base ():
username ():
Returns:
"""
return "{}{}".format(graph_base, username) | 5e3557d300ed1a706e7b5257719135063d8c44e6 | 705,819 |
def get_word(path):
""" extract word name from json path """
return path.split('.')[0] | e749bcdaaf65de0299d35cdf2a2264568ad5051b | 705,821 |
import os
def sanitize_path(path):
"""
Ensure the local filesystem path we're supposed to write to is legit.
"""
if not path.startswith("/"):
raise Exception("Path must be fully qualified.")
os.chdir(path)
return os.getcwd() | 203cbf5cad2f252540bfa1cff74c68c236d4b105 | 705,822 |
import os
import yaml
def get_configs(conf_file=None):
""" Get configurations, like db_url
"""
if not conf_file:
conf_file = os.path.join(os.path.dirname(__file__),
os.pardir,
'config.yml')
with open(conf_file, 'r') as fp:
configs = yaml.safe_load(fp)
return configs | 0baee739a2fc8770e11c247d38ff486d194579c4 | 705,823 |
import json
def readJsonFile(filePath):
"""read data from json file
Args:
filePath (str): location of the json file
Returns:
variable: data read form the json file
"""
result = None
with open(filePath, 'r') as myfile:
result = json.load(myfile)
return result | cf15e358c52edcfb00d0ca5257cf2b5456c6e951 | 705,825 |
def get_namespace_leaf(namespace):
"""
From a provided namespace, return it's leaf.
>>> get_namespace_leaf('foo.bar')
'bar'
>>> get_namespace_leaf('foo')
'foo'
:param namespace:
:return:
"""
return namespace.rsplit(".", 1)[-1] | 0cb21247f9d1ce5fa4dd8d313142c4b09a92fd7a | 705,826 |
def sort(list_):
"""
This function is a selection sort algorithm. It will put a list in numerical order.
:param list_: a list
:return: a list ordered by numerial order.
"""
for minimum in range(0, len(list_)):
for c in range(minimum + 1, len(list_)):
if list_[c] < list_[minimum]:
temporary = list_[minimum]
list_[minimum] = list_[c]
list_[c] = temporary
return list_ | 99007e4b72a616ae73a20358afc94c76e0011d3e | 705,827 |
import random
def auxiliar2(Letra, tabuleiro):
"""
Função auxiliar para jogada do computador, esta função compõe a estratégia e é responsável por realizar uma das jogadas do computador. Recebe como parâmetro o Simbolo do computador
e retorna a jogada que será realizada.
"""
if Letra == "X":
Letra2 = "O"
else:
Letra2 = "X"
if tabuleiro[1] == Letra2 and tabuleiro[5] == Letra:
if tabuleiro[3] == Letra:
return 7
elif tabuleiro[2] == Letra:
return 8
elif tabuleiro[4] == Letra:
return 6
elif tabuleiro[6] == Letra:
return 4
elif tabuleiro[7] == Letra:
return 3
elif tabuleiro[8] == Letra:
return 2
elif tabuleiro[9] == Letra:
jogada = random.choice([3, 7])
return jogada
elif tabuleiro[3] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[7] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[7] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[3] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[9] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[3] == Letra2:
return 7
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[7] == Letra2:
return 3
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[1] == Letra2:
jogada = random.choice([3, 7])
return jogada | 1db9ca4a9a9e97a3b689efc4295b59ad553cfd7a | 705,828 |
import re
def harmonize_geonames_id(uri):
"""checks if a geonames Url points to geonames' rdf expression"""
if 'geonames' in uri:
geo_id = "".join(re.findall(r'\d', uri))
return "http://sws.geonames.org/{}/".format(geo_id)
else:
return uri | acfb8cb4277363c6bee4844a0a95ed2ea464e741 | 705,830 |
import random
def generate_id():
"""Generate Hexadecimal 32 length id."""
return "%032x" % random.randrange(16 ** 32) | 2f9a9eb7cc1808515fb7d71607899bb43d2ac682 | 705,832 |
def __charge_to_sdf(charge):
"""Translate RDkit charge to the SDF language.
Args:
charge (int): Numerical atom charge.
Returns:
str: Str representation of a charge in the sdf language
"""
if charge == -3:
return "7"
elif charge == -2:
return "6"
elif charge == -1:
return "5"
elif charge == 0:
return "0"
elif charge == 1:
return "+1"
elif charge == 2:
return "+2"
elif charge == 3:
return "+4"
else:
return "0" | 1bfda86ee023e8c11991eaae2969b87a349b7f7e | 705,833 |
def balanced(banked_chemicals):
"""return true if all non-ore chemicals have non-negative amounts."""
def _enough(chemical):
return chemical == "ORE" or banked_chemicals[chemical] >= 0
return all(map(_enough, banked_chemicals)) | c42d492bfc67664040095260c24bbff155e98d5e | 705,834 |
import bz2
import json
def dict2json(thedict, json_it=False, compress_it=False):
"""if json_it convert thedict to json
if compress_it, do a bzip2 compression on the json"""
if compress_it:
return bz2.compress(json.dumps(thedict).encode())
elif json_it:
return json.dumps(thedict)
else:
return thedict | b6158427c653a00cc6953ce9f0b0a0fb4881bd7a | 705,835 |
import argparse
def parse_arguments():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(
description="Python script for minimizing unit cell."
)
subparser = parser.add_subparsers(dest='command')
subparser.required = True
yaml_parse = subparser.add_parser("yaml")
xml_parse = subparser.add_parser("xml")
yaml_parse.add_argument(
'--input',
"-i",
type=str,
help="Input yaml file",
required=True
)
xml_parse.add_argument(
'--nvt',
dest='nvt',
action='store_true',
default=False,
required=False,
help="Perform md in nvt only."
)
xml_parse.add_argument(
'--input',
"-i",
type=str,
help="Input xml file",
required=True
)
xml_parse.add_argument(
'--pdb',
"-p",
type=str,
help="Input pdb file",
required=True
)
xml_parse.add_argument(
'--prefix',
"-pre",
type=str,
help="Output prefix for csv and dcd files.",
default="xtal_md",
required=False
)
xml_parse.add_argument(
'--nanoseconds',
"-ns",
type=int,
help="Production length in nanoseconds.",
required=False,
default=100
)
xml_parse.add_argument(
'--replicates',
"-r",
type=int,
help="Number of replicates to generate.",
required=False,
default=10
)
xml_parse.add_argument(
'--temperature',
"-t",
type=float,
help="Target temperature in md run.",
required=False,
default=298.15
)
return parser.parse_args() | 11e59e4c3f35a042794d07ea6294c4e015245798 | 705,836 |
def compoundedInterest(fv, p):
"""Compounded interest
Returns: Interest value
Input values:
fv : Future value
p : Principal
"""
i = fv - p
return i | 00f7fd1f141293afe595393eca23c308d3fdd7d0 | 705,837 |
def get_volumetric_scene(self, data_key="total", isolvl=0.5, step_size=3, **kwargs):
"""Get the Scene object which contains a structure and a isosurface components
Args:
data_key (str, optional): Use the volumetric data from self.data[data_key]. Defaults to 'total'.
isolvl (float, optional): The cuoff for the isosurface to using the same units as VESTA so e/bhor
and kept grid size independent
step_size (int, optional): step_size parameter for marching_cubes_lewiner. Defaults to 3.
**kwargs: kwargs for the Structure.get_scene function
Returns:
[type]: [description]
"""
struct_scene = self.structure.get_scene(**kwargs)
iso_scene = self.get_isosurface_scene(
data_key=data_key,
isolvl=isolvl,
step_size=step_size,
origin=struct_scene.origin,
)
struct_scene.contents.append(iso_scene)
return struct_scene | 836fb5f3158ed5fe55a2975ce05eb21636584a95 | 705,838 |
def reduce_aet_if_dry(aet, wat_lev, fc):
""" Reduce actual evapotranspiration if the soil is dry. If the water level
in a cell is less than 0.7*fc, the rate of evapo-transpiration is
reduced by a factor. This factor is 1 when wat_lev = 0.7*fc and
decreases linearly to reach 0 when wat_lev = 0 i.e. where
wat_lev < 0.7*fc, apply a correction factor of
wat_lev/(0.7*fc) to the aet grid.
Args:
aet: "Raw" actual evapotranspiration grid.
wat_lev: Water level grid
fc: Soil field capacity grid.
Returns:
Array (modified AET grid with AET reduced where necessary).
"""
# Get a boolean array showing which cells need correcting
bool_array = wat_lev < (0.7*fc)
# Calculate a correction factor for all cells, but subtract 1 from answer
cor_facts_minus1 = (wat_lev / (0.7*fc)) - 1
# Multiplying bool_array by cor_facts_minus1 gives a grid with values of
# (cor_fact - 1) for cells which need correcting and zero otherwise. Add 1
# to this to get a grid of cor_facts and ones
cor_facts = (bool_array * cor_facts_minus1) + 1
return aet*cor_facts | 170462a23c3903a390b89963aa6ce21839e5d44b | 705,839 |
def tan(x):
"""Return the tangent of *x* radians."""
return 0.0 | 51a8f497b2cc81cfd0066a7f8f5b1afef362941e | 705,840 |
def format_to_str(*a, **kwargs):
""" Formats gotten objects to str. """
result = ""
if kwargs == {}:
kwargs = {'keepNewlines': True}
for x in range(0, len(a)):
tempItem = a[x]
if type(tempItem) is str:
result += tempItem
elif type(tempItem) in [list, dict, tuple]:
result += str(tempItem) # pformat(tempItem)
elif hasattr(tempItem, "itemType"):
result += "<" + tempItem.itemType + ":" + tempItem.itemModelPointer + ">"
else:
result += str(tempItem)
if x < len(a) - 1:
result += " "
if not kwargs['keepNewlines']:
result = result.replace("\n", "*nl*")
return result | 066262f6059a7f146026b1bc638b9119e2c34718 | 705,841 |
import re
def list_to_sentences(string):
""" Splits text at newlines and puts it back together after stripping new-
lines and enumeration symbols, joined by a period.
"""
if string is None:
return None
lines = string.splitlines()
curr = ''
processed = []
for line in lines:
stripped = line.strip()
# empty line
if 0 == len(stripped):
if curr:
processed.append(re.sub(r'\.\s*$', '', curr))
curr = ''
# beginning a new fragment
elif not curr or 0 == len(curr):
curr = re.sub(r'^[-\d\.\(\)]+\s*', '', stripped)
# new line item? true when it starts with "-", "1." or "1)" (with
# optional dash) or if the indent level is less than before (simple
# whitespace count) (NO LONGER IMPLEMENTED)
elif re.match(r'^-\s+', stripped) \
or re.match(r'^\d+\.\s+', stripped) \
or re.match(r'^(-\s*)?\d+\)\s+', stripped):
if curr:
processed.append(re.sub(r'\.\s*$', '', curr))
curr = re.sub(r'^(-|(\d+\.)|((-\s*)?\d+\)))\s*', '', stripped)
# append to previous fragment
else:
curr = '%s %s' % (curr, stripped)
if curr:
processed.append(re.sub(r'\.\s*$', '', curr))
sentences = '. '.join(processed) if len(processed) > 0 else ''
if len(sentences) > 0:
sentences += '.'
return sentences | 3f155bf501d78cb9263a9cbb0b6d7e4102daeb53 | 705,842 |
import os
from pathlib import Path
def create_directory(list_path_proj: list, dir_name: str):
"""
:return:
Directory created at
c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-project_name-olap/queries/
c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-two-olap/queries/
"""
list_path_proj_with_dir = []
print(f'\nDirectory created at')
for path_olap in list_path_proj:
dir = os.path.join(path_olap + dir_name)
Path(dir).mkdir(parents=True, exist_ok=True)
list_path_proj_with_dir.append(dir)
print(dir)
return list_path_proj_with_dir | 99719b6c0f64c474d67b6ec896b281088e58ef3f | 705,843 |
def dictize_params(params):
"""
Parse parameters into a normal dictionary
"""
param_dict = dict()
for key, value in params.iteritems():
param_dict[key] = value
return param_dict | 4847815622b0855b1056361bff7f7ee02fe6d97a | 705,844 |
def get_python3_status(classifiers):
"""
Search through list of classifiers for a Python 3 classifier.
"""
status = False
for classifier in classifiers:
if classifier.find('Programming Language :: Python :: 3') == 0:
status = True
return status | b4bf347dc0bbf3e9a198baa8237f7820cbb86e0b | 705,845 |
def lookup_listener(param):
"""
Flags a method as a @lookup_listener. This method will be updated on the changes to the lookup. The lookup changes
when values are registered in the lookup or during service activation.
@param param: function being attached to
@return:
"""
def decor(func):
if not hasattr(func, "lookup_decor"):
func.lookup_decor = [param]
else:
func.lookup_decor.append(param)
return func
return decor | 5d053e20ca8c2316aa46f27809b8e0ae59077d32 | 705,846 |
def ratings_std(df):
"""calculate standard deviation of ratings from the given dataframe
parameters
----------
df (pandas dataframe): a dataframe cotanis all ratings
Returns
-------
standard deviation(float): standard deviation of ratings, keep 4 decimal
"""
std_value = df['ratings'].std()
std_value = round(std_value,4)
return std_value | b1bf00d25c0cee91632eef8248d5e53236dd4526 | 705,847 |
import hashlib
def _hash(file_name, hash_function=hashlib.sha256):
"""compute hash of file `file_name`"""
with open(file_name, 'rb') as file_:
return hash_function(file_.read()).hexdigest() | 463d692116fbb85db9f1a537cbcaa5d2d019ba05 | 705,848 |
def _get_perf_hint(hint, index: int, _default=None):
"""
Extracts a "performance hint" value -- specified as either a scalar or 2-tuple -- for
either the left or right Dataset in a merge.
Parameters
----------
hint : scalar or 2-tuple of scalars, optional
index : int
Indicates whether the hint value is being extracted for the left or right Dataset.
0 = left, 1 = right.
_default : optional
Optional default value, returned if `hint` is None.
Returns
-------
Any
The extracted performance hint value.
"""
if hint is None:
return _default
elif isinstance(hint, tuple):
return hint[index]
else:
return hint | d67a70d526934dedaa9f571970e27695404350f2 | 705,849 |
def synchronized_limit(lock):
"""
Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/
"""
def wrap(f):
def synchronize(*args, **kw):
if lock[1] < 10:
lock[1] += 1
lock[0].acquire()
try:
return f(*args, **kw)
finally:
lock[1] -= 1
lock[0].release()
else:
raise Exception('Too busy')
return synchronize
return wrap | a28adfca434b7feaa5aa33c2ba4d1ed2e48cf916 | 705,850 |
def most_interval_scheduling(interval_list):
"""
最多区间调度:优先选择'end'值小的区间
Args:
interval_list(list): 区间列表
Returns:
scheduling_list(list): 去重实体列表
"""
scheduling_list = list()
sorted_interval_list = sorted(interval_list,
key=lambda x: x['end'])
size = len(sorted_interval_list)
scheduling_list.append(sorted_interval_list[0])
for i in range(1, size):
if scheduling_list[-1]['end'] <= sorted_interval_list[i]['start']:
scheduling_list.append(sorted_interval_list[i])
return scheduling_list | 82b1d051221043025497c95d9657245b5b507bde | 705,851 |
def find_keys(info: dict) -> dict:
"""Determines all the keys and their parent keys.
"""
avail_keys = {}
def if_dict(dct: dict, prev_key: str):
for key in dct.keys():
if key not in avail_keys:
avail_keys[key] = prev_key
if type(dct[key]) == dict:
if_dict(dct[key], key + '[].')
elif type(dct[key]) == list:
for item in dct[key]:
if type(item) == dict:
if_dict(item, key + '[].')
if_dict(info, '')
# print(avail_keys)
return avail_keys | 8d0bed361767d62bbc3544efdfe47e8e1065f462 | 705,852 |
def valid(exc, cur1, cur2=None, exclude=None, exclude_cur=None):
"""
Find if the given exc satisfies currency 1
(currency 2) (and is not exclude) (and currency is not exclude)
"""
if exclude is not None and exc == exclude:
return False
curs = [exc.to_currency, exc.from_currency]
if exclude_cur is not None and exclude_cur in curs:
return False
if cur2 is not None:
return cur1 in curs and cur2 in curs
return cur1 in curs | 84a37e669fee120aed8fbc57ab13d5f70f583cf4 | 705,853 |
def species_thermo_value(spc_dct):
""" species enthalpy at 298
"""
return spc_dct['H298'] | 7684b0ace0fa9717cb1cc3ea83bb6be8099c4bf6 | 705,854 |
def sentences_from_doc(ttree_doc, language, selector):
"""Given a Treex document, return a list of sentences in the given language and selector."""
return [bundle.get_zone(language, selector).sentence for bundle in ttree_doc.bundles] | d9c09249171d5d778981fb98a8a7f53765518479 | 705,855 |
import math
def demo_func(par):
"""Test function to optimize."""
x = par['x']
y = par['y']
z = par['z']
p = par['p']
s = par['str']
funcs = {
'sin': math.sin,
'cos': math.cos,
}
return (x + (-y) * z) / ((funcs[s](p) ** 2) + 1) | 5899be5709c4a6ecf09cf9852c1b7569d85616b3 | 705,856 |
def file_to_list(filename):
"""
Read in a one-column txt file to a list
:param filename:
:return: A list where each line is an element
"""
with open(filename, 'r') as fin:
alist = [line.strip() for line in fin]
return alist | 33bee263b98c4ff85d10191fa2f5a0f095c6ae4b | 705,857 |
def gen_model_forms(form, model):
"""Creates a dict of forms. model_forms[0] is a blank form used for adding
new model objects. model_forms[m.pk] is an editing form pre-populated
the fields of m"""
model_forms = {0: form()}
for m in model.objects.all():
model_forms[m.pk] = form(instance=m)
return model_forms | 28bf3f007a7f8f971c18980c84a7841fd116898f | 705,858 |
def as_pandas(cursor, coerce_float=False):
"""Return a pandas `DataFrame` out of an impyla cursor.
This will pull the entire result set into memory. For richer pandas-like
functionality on distributed data sets, see the Ibis project.
Parameters
----------
cursor : `HiveServer2Cursor`
The cursor object that has a result set waiting to be fetched.
coerce_float : bool, optional
Attempt to convert values of non-string, non-numeric objects to floating
point.
Returns
-------
DataFrame
"""
from pandas import DataFrame # pylint: disable=import-error
names = [metadata[0] for metadata in cursor.description]
return DataFrame.from_records(cursor.fetchall(), columns=names,
coerce_float=coerce_float) | e1a9f5ba9b589a9c94f6df1a379833d8d7176d2b | 705,859 |
def get_domain_name_for(host_string):
"""
Replaces namespace:serviceName syntax with serviceName.namespace one,
appending default as namespace if None exists
"""
return ".".join(
reversed(
("%s%s" % (("" if ":" in host_string else "default:"), host_string)).split(
":"
)
)
) | 6084e299f31d9c2eb922783d0488e9672051443f | 705,860 |
def cleanFAAText(origText):
"""Take FAA text message and trim whitespace from end.
FAA text messages have all sorts of trailing whitespace
issues. We split the message into lines and remove all
right trailing whitespace. We then recombine them into
a uniform version with no trailing whitespace.
The final line will not have a newline character at the
end.
Args:
origText (str): Message text as it comes from the FAA.
Returns:
str: Cleaned up text as described above.
"""
lines = origText.split('\n')
numLines = len(lines)
# Remove empty line at end if present
if lines[-1] == '':
numLines -= 1
for i in range(0, numLines):
lines[i] = lines[i].rstrip()
newText = '\n'.join(lines).rstrip()
return newText | ea9882e24c60acaa35cae97f8e95acb48f5fd2a6 | 705,861 |
from bs4 import BeautifulSoup
import re
def get_additional_rent(offer_markup):
""" Searches for additional rental costs
:param offer_markup:
:type offer_markup: str
:return: Additional rent
:rtype: int
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
table = html_parser.find_all(class_="item")
for element in table:
if "Czynsz" in element.text:
return int(("".join(re.findall(r'\d+', element.text))))
return | 8836beda16e21fe214344d647de9260195afa6a7 | 705,862 |
def _read_hyperparameters(idx, hist):
"""Read hyperparameters as a dictionary from the specified history dataset."""
return hist.iloc[idx, 2:].to_dict() | b2a036a739ec3e45c61289655714d9b59b2f5490 | 705,863 |
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result | 525ea19b78cb2a360165085720d42df58aa72500 | 705,864 |
def p1_marker_loc(p1_input, board_list, player1):
"""Take the location of the marker for Player 1."""
# verify if the input is not in range or in range but in a already taken spot
while p1_input not in range(1, 10) or (
p1_input in range(1, 10) and board_list[p1_input] != " "
):
try:
p1_input = int(
input("Player 1: Where would you like to place the marker (1 - 9)? ")
)
# if a marker is already placed on that board location, display a message
# warning player 1 and ask for their input again
if board_list[p1_input] != " ":
print(
"There is already a marker there, please choose another location."
)
input("Press Enter to continue. ")
print()
# input the player for another location for the marker
continue
except ValueError:
print("This is not a number, please try again!")
print()
print(f"Player 1 is placing {player1} in position {p1_input}.")
# return the variable to reassign it locally on the game_logic() function
return p1_input | ea8cfd35e56d7e34efa7319667f1a655b597cf39 | 705,865 |
import re
def cleanupString(string, replacewith="_", regex="([^A-Za-z0-9])"):
"""Remove all non-numeric or alphanumeric characters"""
# Please don't use the logging system here. The logging system
# needs this method, using the logging system here would
# introduce a circular dependency. Be careful not to call other
# functions that use the logging system.
return re.sub(regex, replacewith, string) | b327879a345a4236b871f824937997f6bd43d55b | 705,866 |
import requests
from bs4 import BeautifulSoup
def make_request(method, url, **kwargs):
"""Make HTTP request, raising an exception if it fails.
"""
request_func = getattr(requests, method)
response = request_func(url, **kwargs)
# raise an exception if request is not successful
if not response.status_code == requests.codes.ok:
response.raise_for_status()
return BeautifulSoup(response.text) | 1f47b178b66efe31fd78a4affc76a87d5be428bc | 705,867 |
import time
def timestamp(format_key: str) -> str:
"""
格式化时间
:Args:
- format_key: 转化格式方式, STR TYPE.
:Usage:
timestamp('format_day')
"""
format_time = {
'default':
{
'format_day': '%Y-%m-%d',
'format_now': '%Y-%m-%d-%H_%M_%S',
'unix_now': '%Y-%m-%d %H:%M:%S',
}
}
return time.strftime(format_time['default'][format_key], time.localtime(time.time())) | dab77afb630193d45fbc5b07c08fd82c3dfa3050 | 705,868 |
def test_bus(test_system):
"""Create the test system."""
test_system.run_load_flow()
return test_system.buses["bus3"] | fea4880446059171dae5d6fffc24bdc98eede5cd | 705,869 |
def disassemble_pretty(self, addr=None, insns=1,
arch=None, mode=None):
"""
Wrapper around disassemble to return disassembled instructions as string.
"""
ret = ""
disas = self.disassemble(addr, insns, arch, mode)
for i in disas:
ret += "0x%x:\t%s\t%s\n" % (i.address, i.mnemonic, i.op_str)
return ret | 39bddf246b880decbc84015ef20c5664f88d917e | 705,870 |
def _type_of_plot(orientation, n_var, i, j):
"""internal helper function for determining plot type in a corner plot
Parameters
----------
orientation : str
the orientation
options: 'lower left', 'lower right', 'upper left', 'upper right'
i, j : int
the row, column index
Returns
-------
plot type : str
'remove' : do not show this plot
'same' : the axes are the same
'compare' : compare the two different axes
"""
if orientation == "lower left":
if j > i:
return i, j, "remove"
elif j == i:
return i, j, "same"
else: # j < i
return i, j, "compare"
elif orientation == "lower right":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'remove'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
elif orientation == "upper left":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'compare'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'remove'
elif orientation == "upper right":
raise ValueError("not yet supported orientation")
# if j < i:
# return i, j, 'remove'
# elif j == i:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
else:
raise ValueError("not supported orientation") | 9629af21f1995ccd1b582d4f9a7b1ecf2c621c84 | 705,871 |
def IOU(a_wh, b_wh):
"""
Intersection over Union
Args:
a_wh: (width, height) of box A
b_wh: (width, height) of box B
Returns float.
"""
aw, ah = a_wh
bw, bh = b_wh
I = min(aw, bw) * min(ah, bh)
area_a = aw * ah
area_b = bw * bh
U = area_a + area_b - I
return I / U | 92580147eac219d77e6c8a38875c5ee809783790 | 705,872 |
from datetime import datetime
def date_from_string(date_str, format_str):
"""
returns a date object by a string
"""
return datetime.strptime(date_str, format_str).date() | 7ba2fa5652264c62e2a6711210a39613cf565e37 | 705,873 |
import re
def fix_sensor_name(name):
"""Cleanup sensor name, returns str."""
name = re.sub(r'^(\w+)-(\w+)-(\w+)', r'\1 (\2 \3)', name, re.IGNORECASE)
name = name.title()
name = name.replace('Acpi', 'ACPI')
name = name.replace('ACPItz', 'ACPI TZ')
name = name.replace('Coretemp', 'CoreTemp')
name = name.replace('Cpu', 'CPU')
name = name.replace('Id ', 'ID ')
name = name.replace('Isa ', 'ISA ')
name = name.replace('Pci ', 'PCI ')
name = name.replace('Smc', 'SMC')
name = re.sub(r'(\D+)(\d+)', r'\1 \2', name, re.IGNORECASE)
name = re.sub(r'^K (\d+)Temp', r'AMD K\1 Temps', name, re.IGNORECASE)
name = re.sub(r'T(ccd\s+\d+|ctl|die)', r'CPU (T\1)', name, re.IGNORECASE)
name = re.sub(r'\s+', ' ', name)
return name | 6a346ece5f03c60a2b5d23d5a66c52735aef2939 | 705,874 |
import re
def check_pre_release(tag_name):
"""
Check the given tag to determine if it is a release tag, that is, whether it
is of the form rX.Y.Z. Tags that do not match (e.g., because they are
suffixed with someting like -beta# or -rc#) are considered pre-release tags.
Note that this assumes that the tag name has been validated to ensure that
it starts with something like rX.Y.Z and nothing else.
"""
release_re = re.compile('^r[0-9]+\\.[0-9]+\\.[0-9]+')
return False if release_re.match(tag_name) else True | 8e24a0a61bfa6fe84e936f004b4228467d724616 | 705,875 |
def _get_target_connection_details(target_connection_string):
"""
Returns a tuple with the raw connection details for the target machine extracted from the connection string provided
in the application arguments. It is a specialized parser of that string.
:param target_connection_string: the connection string provided in the arguments for the application.
:return: A tuple in the form of (user, password, host, port) if a password is present in the connection string or
(user, host, port) if a password is not present
"""
password = None
connection_string_format_error = 'Invalid connection string provided. Expected: user[/password]@host[:port]'
if '@' not in target_connection_string:
raise TypeError(connection_string_format_error)
connection_string_parts = target_connection_string.split('@')
if len(connection_string_parts) != 2:
raise TypeError(connection_string_parts)
authentication_part = connection_string_parts[0]
target_part = connection_string_parts[1]
if '/' in authentication_part:
auth_parts = authentication_part.split('/')
if len(auth_parts) != 2:
raise TypeError(connection_string_format_error)
user, password = auth_parts
else:
user = authentication_part
if ':' in target_part:
conn_parts = target_part.split(':')
if len(conn_parts) != 2:
raise TypeError(connection_string_format_error)
host, port = conn_parts
try:
port = int(port)
except ValueError:
raise TypeError(connection_string_format_error)
else:
host = target_part
port = 22
if not len(user) or not len(host):
raise TypeError(connection_string_format_error)
if password:
return user, password, host, int(port)
else:
return user, host, int(port) | 5e6ee870c0e196f54950f26ee6e551476688dce9 | 705,876 |
from typing import List
def convert(day_input: List[str]) -> List[List[str]]:
"""Breaks down the input into a list of directions for each tile"""
def dirs(line: str) -> List[str]:
dirs, last_c = [], ''
for c in line:
if c in ['e', 'w']:
dirs.append(last_c + c)
last_c = ''
else:
last_c = c
return dirs
return [dirs(line) for line in day_input] | fd1d683e69dbff8411cecdaa184355f2311d3e8a | 705,877 |
import codecs
def read(filepath):
"""Read file content from provided filepath."""
with codecs.open(filepath, encoding='utf-8') as f:
return f.read() | bff53fbb9b1ebe85c6a1fa690d28d6b6bec71f84 | 705,878 |
import inspect
def is_bound_builtin_method(meth):
"""Helper returning True if meth is a bound built-in method"""
return (inspect.isbuiltin(meth)
and getattr(meth, '__self__', None) is not None
and getattr(meth.__self__, '__class__', None)) | a7a45f0f519119d795e91723657a1333eb6714e4 | 705,879 |
import torch
def create_fourier_heatmap_from_error_matrix(
error_matrix: torch.Tensor,
) -> torch.Tensor:
"""Create Fourier Heat Map from error matrix (about quadrant 1 and 4).
Note:
Fourier Heat Map is symmetric about the origin.
So by performing an inversion operation about the origin, Fourier Heat Map is created from error matrix.
Args:
error_matrix (torch.Tensor): The size of error matrix should be (H, H/2+1). Here, H is height of image.
This error matrix shoud be about quadrant 1 and 4.
Returns:
torch.Tensor (torch.Tensor): Fourier Heat Map created from error matrix.
"""
assert len(error_matrix.size()) == 2
assert error_matrix.size(0) == 2 * (error_matrix.size(1) - 1)
fhmap_rightside = error_matrix[1:, :-1]
fhmap_leftside = torch.flip(fhmap_rightside, (0, 1))
return torch.cat([fhmap_leftside[:, :-1], fhmap_rightside], dim=1) | 25a4a4e2aa2ffda317f28d85c3798682fd72c466 | 705,880 |
def same_strange_looking_function(param1, callback_fn):
"""
This function is documented, but the function is identical to some_strange_looking_function
and should result in the same hash
"""
tail = param1[-1]
# return the callback value from the tail of param whatever that is
return callback_fn(tail) | 438becf6803e6b25a200a34e18eb648aaa4b6fbb | 705,881 |
def vocabulary_size(tokens):
"""Returns the vocabulary size count defined as the number of alphabetic
characters as defined by the Python str.isalpha method. This is a
case-sensitive count. `tokens` is a list of token strings."""
vocab_list = set(token for token in tokens if token.isalpha())
return len(vocab_list) | 5e26e1be98a3e82737277458758f0fd65a64fe8f | 705,882 |
def get_transit_boundary_indices(time, transit_size):
""" Determines transit boundaries from sorted time of transit cut out
:param time (1D np.array) sorted times of transit cut out
:param transit_size (float) size of the transit crop window in days
:returns tuple:
[0] list of transit start indices (int)
[1] list of sequence lengths (int) of each transit
"""
sequence_lengths = []
transit_start_indices = [0]
for i, t in enumerate(time):
if t - time[transit_start_indices[-1]] > transit_size:
sequence_lengths.append(i - transit_start_indices[-1])
transit_start_indices.append(i)
# last length is from last transit start til the end of the array
sequence_lengths.append(len(time) - transit_start_indices[-1])
return transit_start_indices, sequence_lengths | cd3775d72690eb4539e0434b0ac7f715d14374a6 | 705,883 |
def _computePolyVal(poly, value):
"""
Evaluates a polynomial at a specific value.
:param poly: a list of polynomial coefficients, (first item = highest degree to last item = constant term).
:param value: number used to evaluate poly
:return: a number, the evaluation of poly with value
"""
#return numpy.polyval(poly, value)
acc = 0
for c in poly:
acc = acc * value + c
return acc | 0377ba0757439409824b89b207485a99f804cb41 | 705,885 |
def odd_desc(count):
"""
Replace ___ with a single call to range to return a list of descending odd numbers ending with 1
For e.g if count = 2, return a list of 2 odds [3,1]. See the test below if it is not clear
"""
return list(reversed(range(1,count*2,2))) | 2f90095c5b25f8ac33f3bb86d3f46e67932bc78a | 705,886 |
def outside_range(number, min_range, max_range):
"""
Returns True if `number` is between `min_range` and `max_range` exclusive.
"""
return number < min_range or number > max_range | dc3889fbabb74db38b8558537413ebc5bc613d05 | 705,887 |
def make_inverter_path(wire, inverted):
""" Create site pip path through an inverter. """
if inverted:
return [('site_pip', '{}INV'.format(wire), '{}_B'.format(wire)),
('inverter', '{}INV'.format(wire))]
else:
return [('site_pip', '{}INV'.format(wire), wire)] | 066c4bbad0f65fec587b12fc7a2947246401b877 | 705,888 |
from typing import Any
def device_traits() -> dict[str, Any]:
"""Fixture that sets default traits used for devices."""
return {"sdm.devices.traits.Info": {"customName": "My Sensor"}} | 1ccaeac4a716706915654d24270c24dac0210977 | 705,889 |
import sqlite3
def get_prof_details(prof_id):
"""
Returns the details of the professor in same order as DB.
"""
cursor = sqlite3.connect('./db.sqlite3').cursor()
cursor.execute("SELECT * FROM professor WHERE prof_id = ?;", (prof_id))
return cursor.fetchone() | 668652474009abdda36d3e97fb5d30074f0a2755 | 705,890 |
def available_help(mod, ending="_command"):
"""Returns the dochelp from all functions in this module that have _command
at the end."""
help_text = []
for key in mod.__dict__:
if key.endswith(ending):
name = key.split(ending)[0]
help_text.append(name + ":\n" + mod.__dict__[key].__doc__)
return help_text | 9afa1525c016aa74dd4b3eb91851890da3590524 | 705,891 |
import math
import base64
import os
def newid(length=16):
"""
Generate a new random string ID.
The generated ID is uniformly distributed and cryptographically strong. It is
hence usable for things like secret keys and access tokens.
:param length: The length (in chars) of the ID to generate.
:type length: int
:returns: A random string ID.
:rtype: str
"""
l = int(math.ceil(float(length) * 6. / 8.))
return base64.b64encode(os.urandom(l))[:length].decode('ascii') | b287a929f0dde6244b66bb8d9d9289b97f2d090b | 705,892 |
import torch
def skewness_fn(x, dim=1):
"""Calculates skewness of data "x" along dimension "dim"."""
std, mean = torch.std_mean(x, dim)
n = torch.Tensor([x.shape[dim]]).to(x.device)
eps = 1e-6 # for stability
sample_bias_adjustment = torch.sqrt(n * (n - 1)) / (n - 2)
skewness = sample_bias_adjustment * (
(torch.sum((x.T - mean.unsqueeze(dim).T).T.pow(3), dim) / n)
/ std.pow(3).clamp(min=eps)
)
return skewness | ae0bdea16c1461a2e407ed57279557bc8c7f56de | 705,893 |
def get_parser_args(args=None):
"""
Transform args (``None``, ``str``, ``list``, ``dict``) to parser-compatible (list of strings) args.
Parameters
----------
args : string, list, dict, default=None
Arguments. If dict, '--' are added in front and there should not be positional arguments.
Returns
-------
args : None, list of strings.
Parser arguments.
Notes
-----
All non-strings are converted to strings with :func:`str`.
"""
if isinstance(args,str):
return args.split()
if isinstance(args,list):
return list(map(str,args))
if isinstance(args,dict):
toret = []
for key in args:
toret += ['--%s' % key]
if isinstance(args[key],list):
toret += [str(arg) for arg in args[key]]
else:
val = str(args[key])
if val: toret += [val]
return toret
return args | 41b607a6ebf12526efcd38469192b398419327bf | 705,894 |
def set_name_line(hole_lines, name):
"""Define the label of each line of the hole
Parameters
----------
hole_lines: list
a list of line object of the slot
name: str
the name to give to the line
Returns
-------
hole_lines: list
List of line object with label
"""
for ii in range(len(hole_lines)):
hole_lines[ii].label = name + "_" + str(ii)
return hole_lines | a57667f269dac62d39fa127b2a4bcd438a8a989b | 705,895 |
import logging
import os
def print_listdir(x):
"""."""
log = logging.getLogger('SIP.workflow.function')
log.info('HERE A')
print('Task id = {} {}'.format(x, os.listdir('.')))
return x, os.listdir('.') | 738fa091d5f7f9bca0bf43edfbf09eafcba87ba3 | 705,896 |
def contains_digit(s):
"""Find all files that contain a number and store their patterns.
"""
isdigit = str.isdigit
return any(map(isdigit, s)) | 941bcee8b6fbca6a60a8845f88a3b5765e3711bb | 705,897 |
def preprocess_text(sentence):
"""Handle some weird edge cases in parsing, like 'i' needing to be capitalized
to be correctly identified as a pronoun"""
cleaned = []
words = sentence.split(' ')
for w in words:
if w == 'i':
w = 'I'
if w == "i'm":
w = "I'm"
cleaned.append(w)
return ' '.join(cleaned) | 4e1d69eaf0adc1ede6bc67563e499602e320e76b | 705,898 |
def num_channels_to_num_groups(num_channels):
"""Returns number of groups to use in a GroupNorm layer with a given number
of channels. Note that these choices are hyperparameters.
Args:
num_channels (int): Number of channels.
"""
if num_channels < 8:
return 1
if num_channels < 32:
return 2
if num_channels < 64:
return 4
if num_channels < 128:
return 8
if num_channels < 256:
return 16
else:
return 32 | e2095fba2b1b9cdada72d354ddcd781d99e4aa48 | 705,900 |
def is_reserved(word):
"""
Determines if word is reserved
:param word: String representing the variable
:return: True if word is reserved and False otherwise
"""
lorw = ['define','define-struct']
return word in lorw | 0b0e3706bcafe36fc52e6384617223078a141fb2 | 705,901 |
def x_span_contains_y(x_spans, y_spans):
"""
Return whether all elements of y_spans are contained by some elements of x_spans
:param x_spans:
:type x_spans:
:param y_spans:
:type y_spans:
"""
for i, j in y_spans:
match_found = False
for m, n in x_spans:
if i >= m and j <= n:
match_found = True
break
# If this particular x_span found
# a match, keep looking.
if match_found:
continue
# If we find an element that doesn't
# have a match, return false.
else:
return False
# If we have reached the end of both loops, then
# all elements match.
return True | c366a5a5543e2fe9f6325cd3d31eccffb921693c | 705,902 |
import argparse
def parse_args():
"""Parse arguments and return them
:returns: argparse object
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='configuration file',
required=True)
return parser.parse_args() | d0fc1399c058f53558e08f13811c9709e518fd84 | 705,903 |
import csv
def read_csv_from_file(file):
"""
Reads the CSV data from the open file handle and returns a list of dicts.
Assumes the CSV data includes a header row and uses that header row as
fieldnames in the dict. The following fields are required and are
case-sensitive:
- ``artist``
- ``song``
- ``submitter``
- ``seed``
Other fields are ultimately preserved untouched in the output CSV.
If the CSV doesn't have a header row, uses the following hardcoded list:
- ``order``
- ``seed``
- ``submitter``
- ``year``
- ``song``
- ``artist``
- ``link``
If a tab character is present in the first row, assumes the data is
tab-delimited, otherwise assumes comma-delimited.
:returns: All parsed data from the already-opened CSV file given, as a list
of dicts as generated by `csv.DictReader`
"""
data = list(file)
delimiter = "\t" if "\t" in data[0] else ","
# Look for a header row
reader = csv.reader([data[0]], delimiter=delimiter)
row = next(reader)
for col in row:
try:
int(col)
# Found an integer, no headers present
headers = ["order", "seed", "submitter", "year", "song", "artist", "link"]
break
except ValueError:
pass
else:
# Unable to find an integer here, must be a header row
# Pop the header row off the data list and create a new reader just to
# parse that row
data.pop(0)
headers = row
return list(csv.DictReader(data, fieldnames=headers, delimiter=delimiter)) | 89cfce0be6270076230051a6e852d1add3f4dcaf | 705,904 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.