content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_base_url(hostname):
"""
Constructs the GitHub API url with the given hostname.
:param str hostname: Hostname to construct the API endpoint url.
:returns: None
:raises: None
"""
if hostname and hostname.startswith('http'):
return hostname
else:
return "https://{hostname}/api/v3".format(hostname=hostname)
return hostname | b0a13d3054fd48a9970c639ea8730d24a67a09ed | 704,331 |
def getTabData(header, index):
"""Get the table data at index from above tables generator.
Expects the header string and the index of the table."""
tabStart = header.index('{', index) + 1 #start of the table, first letter after {.
tabEnd = header.index('}', tabStart) #last of the array.
return header[tabStart:tabEnd] | 738e6f1532129043e67e1e639a66d12e43d1e9a6 | 704,332 |
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in kw.items():
if f(v):
d[k] = v
return d | 03df2c26975f114db1b6f258a4324d0d12109126 | 704,333 |
def topological_sort(self):
"""
Returns:
List(int): Topological sort of vertices of a graph.
"""
topo_order = self.depth_first_search()[1]
position = [-1] * len(self.v)
for i, v in enumerate(topo_order):
position[v] = i
for u, v in self.e:
if position[u] > position[v]:
return None
return topo_order | ccd21c7486c3b32da1203015e77c24530d1f5e56 | 704,334 |
import time
def wait_for_job(res, ping_time=0.5):
"""
Blocks execution and waits for an async Forest Job to complete.
:param JobResult res: The JobResult object to wait for.
:param ping_time: The interval (in seconds) at which to ping the server.
:return: The completed JobResult
"""
while not res.is_done():
res.get()
time.sleep(ping_time)
return res | 1a7202f58affa97b0001b246fb7cd187d6a59f44 | 704,335 |
def numeric_type(param):
"""
Checks parameter type
True for float; int or null data; false otherwise
:param param: input param to check
"""
if ((type(param) == float or type(param) == int or param == None)):
return True
return False | a5f67a30b3128c1214d8825abbc6ae5170680d80 | 704,336 |
def search_data_start(file, identifier, encoding):
"""
Returns the line of an identifier for the start of the data in a file.
"""
if identifier is None:
return 0
search = open(file, encoding=encoding)
i = 1
for line in search:
if identifier in line:
search.close()
return i
i += 1 | 2c3c903df2162b9f6fe5452b75c4f7ac06ddd194 | 704,337 |
def get_page_generator(s,max_items=0):
"""Get the generator that returns the Page objects
that we're interested in, from Site s.
"""
page_generator = s.allpages()
if(max_items>0):
page_generator.set_maximum_items(max_items)
return page_generator | d53a890523c999df878fecc71ef1dbd8d17c188c | 704,338 |
import os
def file_basename_no_extension(filename):
""" Returns filename without extension
>>> file_basename_no_extension('/home/me/file.txt')
'file'
>>> file_basename_no_extension('file')
'file'
"""
base = os.path.basename(filename)
name, extension = os.path.splitext(base)
return name | d4512a06ecc861d2b9e992691fbabb11b9e6e958 | 704,339 |
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences | d74857a4931d162b9573b1b086a8720563b4fd41 | 704,340 |
def get_folder_offset(folder_list, folder_name):
"""
Check whether there is already a folder named 'folder_name' and if so, increment a counter.
"""
isExists = False
# Check whether a folder with the same name already exists
for folder in folder_list:
if folder['title'] == folder_name:
#print('title: %s, id: %s' % (folder['title'], folder['id']))
isExists = True
print('Folder', folder_name, 'already exists. Adding an index...')
break
if not isExists :
return 0
# Increment a counter until finding a folder name that doesn't exist (with pattern 'name_i')
i=1
while 1 :
isNew = True
for folder in folder_list:
if folder['title'] == folder_name + '_' + str(i) :
#print('title: %s, id: %s' % (folder['title'], folder['id']))
isNew = False
# Increment the counter
i+=1
# Move on to next iteration
continue
if(isNew):
break
return i | b620e6ee6819b2b1aac6b3fd6a05775cde34838e | 704,341 |
def param_to_string(metric) -> str:
"""Convert a list / tuple of parameters returned from IE to a string"""
if isinstance(metric, (list, tuple)):
return ', '.join([str(x) for x in metric])
else:
return str(metric) | 54476f88936336728ba73425bb57860e17fb7561 | 704,342 |
from sys import stdout
import os
def get_first_last_line(filePath, encoding=stdout.encoding):
"""Return the first and the last lines of file
The existence of filePath should be check beforehand.
Args:
filePath (str): the path of the file
encoding (str): the encoding of the file. Default stdout.encoding
Returns
two strings (unstripped)
"""
with open(filePath, "rb") as f:
first = f.readline() # Read the first line.
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
# ...jump back the read byte plus one more.
f.seek(-2, os.SEEK_CUR)
last = f.readline() # Read last line.
# encode string
return str(first, encoding), str(last, encoding) | 0d2da4c861e118b3b4d29bfc95ac3d56df7804e6 | 704,343 |
def fitness_func_large(vector):
""" returns a very large number for fitness"""
return 9999999999999999999 | 08e6f43c5f891fe7138dfc7b1d0809ba048bf070 | 704,345 |
def remove_prefix(s, pre):
"""
Remove prefix from the beginning of the string
Parameters:
----------
s : str
pre : str
Returns:
-------
s : str
string with "pre" removed from the beginning (if present)
"""
if pre and s.startswith(pre):
return s[len(pre):]
return s | 6bae14cddd38fcfabfb0fadb9f4dbeaea81ff4ac | 704,346 |
def block_distance(p1, p2):
"""
Returns the Block Distance of a particular point from rest of the points in dataset.
"""
distance = 0
for i in range(len(p1)-1):
distance += abs(p1[i]-p2[i])
return distance | 29d7febe0bd8fcdfc16cbc27c7f3490f265c9daf | 704,347 |
def remove_empty(df):
"""
Drop all rows and columns that are completely null.
Implementation is shamelessly copied from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501
Functional usage example:
.. code-block:: python
df = remove_empty(df)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).remove_empty()
:param df: The pandas DataFrame object.
:returns: A pandas DataFrame.
"""
nanrows = df.index[df.isnull().all(axis=1)]
df.drop(index=nanrows, inplace=True)
nancols = df.columns[df.isnull().all(axis=0)]
df.drop(columns=nancols, inplace=True)
return df | c2ea9fc13bfa57bc357a83c607bfe9ce9348fb2e | 704,348 |
from typing import Optional
def calculate_serving_size_weight(
weight: Optional[float], number_of_servings: Optional[float]
) -> Optional[float]:
"""
Given a weight (representing the total weight of the
component included in a recipe) and a number of servings
(how many servings of the component are included),
returns a number representing the weight of
just one serving of the component.
"""
if weight is not None and number_of_servings is not None:
return weight/number_of_servings
else:
return None | b22732a60f1f6000277861a615c78e785b4757bb | 704,349 |
def format_key(key):
"""
Format the key provided for consistency.
"""
if key:
return key if key[-1] == "/" else key + "/"
return "/" | 8b5e41bb76c524ec8c45a22ad0dae84c84ed530b | 704,350 |
from typing import Dict
def _define_problem_with_groups(problem: Dict) -> Dict:
"""
Checks if the user defined the 'groups' key in the problem dictionary.
If not, makes the 'groups' key equal to the variables names. In other
words, the number of groups will be equal to the number of variables, which
is equivalent to no groups.
Parameters
----------
problem : dict
The problem definition
Returns
-------
problem : dict
The problem definition with the 'groups' key, even if the user doesn't
define it
"""
# Checks if there isn't a key 'groups' or if it exists and is set to 'None'
if 'groups' not in problem or not problem['groups']:
problem['groups'] = problem['names']
elif len(problem['groups']) != problem['num_vars']:
raise ValueError("Number of entries in \'groups\' should be the same "
"as in \'names\'")
return problem | ab29954f3349509a9153219d040feb8fa3125ec7 | 704,351 |
def gaspari_cohn_mid(z,c):
"""
Gaspari-Cohn correlation function for middle distances (between c and 2*c)
Arguments:
- z: Points to be evaluated
- c: Cutoff value
"""
return 1./12*(z/c)**5 - 0.5*(z/c)**4 + 5./8*(z/c)**3 \
+ 5./3*(z/c)**2 - 5*z/c - 2./3*c/z + 4 | 0852e84c1ce10856d69420fcc585054488591e73 | 704,352 |
def send(channel, apdu: list) -> bytes:
"""
Send APDU to the channel and return the data if there are no errors.
"""
data, sw1, sw2 = channel.transmit(apdu)
# success
if [sw1, sw2] == [0x90, 0x00]:
return bytes(data)
# signals that there is more data to read
elif sw1 == 0x61:
# print("[=] More data to read:", sw2)
return send(channel, [0x00, 0xC0, 0x00, 0x00, sw2]) # GET RESPONSE of sw2 bytes
elif sw1 == 0x6C:
# print("[=] Resending with Le:", sw2)
return send(channel, apdu[0:4] + [sw2]) # resend APDU with Le = sw2
# probably error condition
else:
print(
"Error: %02x %02x, sending APDU: %s"
% (sw1, sw2, " ".join(["{:02x}".format(x) for x in apdu]).upper())
)
channel.disconnect()
exit(1) | bb3a1e52b6fdb5480b23f0646e768a7f90500acd | 704,353 |
import sys
import ast
import configparser
def __parse_options(config_file, section, options):
""" Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
"""
configuration = {}
for option in options:
try:
if option.get('type') == 'str':
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
elif option.get('type') == 'int':
try:
configuration[option.get('key')] = \
config_file.getint(section, option.get('option'))
except ValueError:
print('Error: Expected an integer value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'float':
try:
configuration[option.get('key')] = \
config_file.getfloat(section, option.get('option'))
except ValueError:
print('Error: Expected an float value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'bool':
try:
configuration[option.get('key')] = \
config_file.getboolean(section, option.get('option'))
except ValueError:
print('Error: Expected an boolean value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'dict':
configuration[option.get('key')] = \
ast.literal_eval(
config_file.get(section, option.get('option')))
else:
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
except configparser.NoOptionError:
if option.get('required'):
print('Missing [{0}] option "{1}" in configuration'.format(
section, option.get('option')))
sys.exit(1)
return configuration | 6a2813a336aee3e1696caeb148aaac98c7dd6621 | 704,354 |
import math
def DrawTextBar(value, max_value, max_width=53):
"""Return a simple ASCII bar graph, making sure it fits within max_width.
Args:
value: integer or float representing the value of this bar.
max_value: integer or float representing the largest bar.
max_width: How many characters this graph can use (int)
Returns:
string
"""
hash_width = max_value / max_width
return int(math.ceil(value/hash_width)) * '#' | 7f4b267527317cbceddadc9f7a0307f8ec430bb4 | 704,355 |
def _FindBinmanNode(dtb):
"""Find the 'binman' node in the device tree
Args:
dtb: Fdt object to scan
Returns:
Node object of /binman node, or None if not found
"""
for node in dtb.GetRoot().subnodes:
if node.name == 'binman':
return node
return None | bf924d173a1adf81c1705ad1ea1fae490567a317 | 704,356 |
def make_list_accos():
"""Return the acco numbers as list
Returns:
list: List with acco numbers
"""
list_saharas= list(range(1,23))
list_kalaharis =list(range (637,656))
list_balis = list(range (621,627))
list_waikikis = list(range(627,637))
list_serengeti = list(range(659,668))
return list_saharas + list_kalaharis+list_balis+list_waikikis+list_serengeti | 9f2ac7aa4f78588013160f94374e602832b61771 | 704,357 |
import os
import re
def find_files(directory='.', pattern='.*', recursive=True):
"""Search recursively for files matching a pattern"""
if recursive:
return (os.path.join(directory, filename)
for directory, subdirectories, filenames in os.walk(directory)
for filename in filenames if re.match(pattern, filename))
else:
return (os.path.join(directory, filename)
for filename in os.listdir(directory)
if re.match(pattern, filename)) | a725a30df0783badd90357e5ce917dd37cf99426 | 704,358 |
def _host_is_same(host1: str, host2: str) -> bool:
"""Check if host1 and host2 are the same."""
return host1.split(":")[0] == host2.split(":")[0] | 0bd9393786801d0f69d4982fc9f8edce378e9656 | 704,359 |
def get_filter_type_choices():
"""
Get a tuple of filter types
:return: tuple with filter types
"""
return ('', 'Select one'), ('Filter Types', [('storlet', 'Storlet'), ('native', 'Native')]) | 21f4173b1aafa35b4c877d6f844349c2907932a8 | 704,360 |
import math
def round_half_up(n: float, decimals: float = 0) -> float:
"""This function rounds to the nearest integer number (e.g 2.4 becomes 2.0 and 2.6 becomes 3);
in case of tie, it rounds up (e.g. 1.5 becomes 2.0 and not 1.0)
Args:
n (float): number to round
decimals (int): number of decimal figures that we want to keep; defaults to zero
Returns:
rounded_number (float): input number rounded with the desired decimals
"""
multiplier = 10 ** decimals
rounded_number = math.floor(n * multiplier + 0.5) / multiplier
return rounded_number | e0aab5cba456b4ffe6fab11a21b97fe4e17b045a | 704,362 |
from typing import Set
def _possible_edges(n1: Set, n2: Set, directed: bool, self_loops: bool = False):
"""Compute the number of possible edges between two sets."""
a = n1.intersection(n2)
e = (len(n1) - len(a)) * (len(n2) - len(a))
if directed:
e *= 2
if self_loops:
e += len(n1) + len(n2) - len(a)
return e | 4cf21d9521c3d071d7d1376bd917f2ec39435108 | 704,363 |
import argparse
def get_raygen_argparser():
"""
Get the command line input/output arguments passed in to `raygen`.
"""
parser = argparse.ArgumentParser(
description='A simple static site generator, for those who want to fully the generation of blog.'
)
parser.add_argument(
'--server',
help='run the server.',
action="store_true"
)
parser.add_argument(
'-p', '--port',
type=int,
default=8080,
help='Port number to serve files on.'
)
return parser | da3993c1d98be1a3ecf5cec2942b4cda5edb3a8d | 704,364 |
def default_category_orders() -> dict:
"""Returns the default dictionary of category orders"""
day_order = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
weekend_order = ["Weekday", "Weekend"]
season_order = ["Spring", "Summer", "Autumn", "Winter"]
month_order = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
category_orders = {
"dayofweek": day_order,
"weekend": weekend_order,
"season": season_order,
"month": month_order,
}
return category_orders | 4110287bc30445f27c7c3d0c38cb662d769a5217 | 704,365 |
import re
def is_guid(techfin_tenant):
"""Validate guid arg
Args:
tenant (str): techfin tenant id
Returns:
bool: true if is valid guid value
"""
c = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}', re.I)
res = c.match(techfin_tenant)
return res | 7242f0da279375ab5873670ffef1fd4aa8749546 | 704,366 |
def shell_short(unordered, ordered):
"""
Startig at the bottom of the stack:
1 - If the name is in the correct position move to the next
2 - If it is not in the position remove it, move all other names
one positions down and got to 1
sort all the removed positions, these names are the result
"""
unordered = unordered[::-1]
ordered = ordered[::-1]
names = {}
for i, name in enumerate(ordered):
names[name] = i
# Stack using id instead of names
stack = [names[n] for n in unordered]
# Extract numbers that need reorderin
reorder = []
for i in range(len(stack)):
if stack[i] != i-len(reorder):
reorder.append(stack[i])
return [ordered[n] for n in sorted(reorder)] | df57eb0bee03159ac6b698bf0377efec48355e76 | 704,367 |
def __standard_cand_fun(candidates):
"""
Convert candidates from the forms accepted by :py:fun:`recommend` into
a standard form, a function that takes a user and returns a candidate
list.
"""
if isinstance(candidates, dict):
return candidates.get
elif candidates is None:
return lambda u: None
else:
return candidates | ad206802bfbcd0ec8f4601ebc043f8d468709c75 | 704,368 |
def dBrickId(brickId):
"""Return box id if valid, raise an exception in other case"""
if brickId >= 0 and brickId <= 15:
return brickId
else:
raise ValueError(
'{} is not a valid Brick Id, Brick Id must be between 0-15'.format(
brickId)) | 10e0f27f179dcd54c5cc4967ea960b77a4c5a924 | 704,370 |
import hashlib
def hash(text, digest_alg = 'md5'):
"""
Generates hash with the given text using the specified
digest hashing algorithm
"""
if not isinstance(digest_alg,str):
h = digest_alg(text)
else:
h = hashlib.new(digest_alg)
h.update(text)
return h.hexdigest() | 386268086a55b8e622c00b407cabd3207bb94ffb | 704,371 |
def merge_params(params, config):
"""Merge CLI params with configuration file params. Configuration params
will overwrite the CLI params.
"""
return {**params, **config} | a1dc002a900968e6cf7c5ba401519759e6ef485e | 704,372 |
def init_rate():
""" This rate indicates the recorded positions' intervals. """
rate = float(0.1) # (Hz)
return rate | e6e9c6439fe4288c24be18bb098f1844aed9fc64 | 704,373 |
def get_gamma(y1, y2, gamma1, gamma2):
"""一般部位及び大部分がガラスで構成されていないドア等の開口部における日除けの効果係数 (-)
Args:
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
gamma1(float): データ「日除けの効果係数」より算出した値
gamma2(float): データ「日除けの効果係数」より算出した値
Returns:
float: 一般部位及び大部分がガラスで構成されていないドア等の開口部における日除けの効果係数
"""
gamma = (gamma2 * (y1 + y2) - gamma1 * y1) / y2
return gamma | 6503a957bc7d5daee1926aaa23694b4550733f6d | 704,374 |
def tasks(tmpdir):
"""
Set up a project with some tasks that we can test displaying
"""
task_l = [
"",
" ^ this is the first task (released)",
" and it has a second line",
" > this is the second task (committed)",
" . this is the third task (changed, not yet committed)",
" - this is the fourth task (not yet made)",
" and this one has a second line also",
" + fifth task -- completed",
" < sixth task -- moved elsewhere",
" x seventh task -- abandoned",
"",
]
# project dir
prjdir = tmpdir.join("myproj")
# .project file
prjdir.join(".project").ensure()
# DODO file with content
dodo = prjdir.join("DODO")
dodo.write("\n".join(task_l) + "\n")
data = {
'tmpdir': tmpdir,
'prj': prjdir,
'dodo': dodo,
}
return data | 64104bde2aab55021cf0d49fbb1d47670d0e4e0d | 704,376 |
import os
def get_input_source_directory(config):
"""
Given a configuration object, returns the directory of the input file(s).
"""
options = config.commandline
if options.multilocus:
# multilocus dataset: assume directory is given as input source
return os.path.abspath(options.input)
else:
# single locus dataset: return directory nanme
return os.path.dirname(os.path.abspath(options.input)) | 28f1b7d90ba45c812a36e8d16dde4813d5b4090d | 704,377 |
def singer_map(pop, rate):
"""
Define the equation for the singer map.
Arguments
---------
pop: float
current population value at time t
rate: float
growth rate parameter values
Returns
-------
float
scalar result of singer map at time t+1
"""
return rate * (7.86 * pop - 23.31 * pop ** 2 + 28.75 * pop ** 3 - 13.3 * pop ** 4) | 84aba1d96304b67fba1b4a0e7a909e23121a3d6b | 704,378 |
import json
def format_rpc_response(data, exception=None):
"""
Formats a response from a RPC Manager.
It provides the data and/or a serialized exception so it can be
re-created by the caller.
:param Any data: A JSON Serializable object.
:param Exception exception: An Exception object
:return str: JSON Response.
"""
exception_data = None
if exception:
args = exception.__getargs__() if hasattr(exception, "__getargs__") else exception.args
kwargs = exception.__getkwargs__() if hasattr(exception, "__getkwargs__") else {}
if kwargs is None: kwargs = {}
try:
module = exception.__module__
except:
module = None
exception_data = {
'exception': type(exception).__name__,
'message': str(exception),
'args': args,
"kwargs": kwargs,
'module': module,
}
return json.dumps({
'data': data,
'exception': exception_data
}) | c900e2512fd486c91789ab4312883061553a2fb1 | 704,379 |
import logging
def select_best_haplotype_match(all_matches):
"""Returns the best HaplotypeMatch among all_matches.
The best matching HaplotypeMatch is the one with the lowest match_metrics
score.
Args:
all_matches: iterable[HaplotypeMatch]. An iterable of HaplotypeMatch objects
we want to select the best match from.
Returns:
The best matching HaplotypeMatch object.
"""
sorted_matches = sorted(all_matches, key=lambda x: x.match_metrics)
best = sorted_matches[0]
equivalents = [
f for f in all_matches if f.match_metrics == best.match_metrics
]
# redacted
if len(equivalents) > 1:
for i, f in enumerate(equivalents):
extra_info = 'best' if i == 0 else i
logging.warning('Equivalent match to best: %s [%s]', f, extra_info)
return equivalents[0] | 0e40fef830055e5cd297b0f00672d8b0caedc62e | 704,380 |
def get_tokens_list_from_column_list(column_name_list: list,
delimiter: str = '!!') -> list:
"""Function that returns list of tokens present in the list of column names.
Args:
column_name_list: The list of column name strings.
delimiter: delimiter seperating tokens within single column name string.
Returns:
A list of tokens present in the list of column names.
"""
tokens = []
for column_name in column_name_list:
for tok in column_name.split(delimiter):
if tok not in tokens:
tokens.append(tok)
return tokens | 66e2c3c280188d2cc3e8df35e0112095f3244918 | 704,381 |
def _dens0(S,T):
"""Density of seawater at zero pressure"""
# --- Define constants ---
a0 = 999.842594
a1 = 6.793952e-2
a2 = -9.095290e-3
a3 = 1.001685e-4
a4 = -1.120083e-6
a5 = 6.536332e-9
b0 = 8.24493e-1
b1 = -4.0899e-3
b2 = 7.6438e-5
b3 = -8.2467e-7
b4 = 5.3875e-9
c0 = -5.72466e-3
c1 = 1.0227e-4
c2 = -1.6546e-6
d0 = 4.8314e-4
# --- Computations ---
# Density of pure water
SMOW = a0 + (a1 + (a2 + (a3 + (a4 + a5*T)*T)*T)*T)*T
# More temperature polynomials
RB = b0 + (b1 + (b2 + (b3 + b4*T)*T)*T)*T
RC = c0 + (c1 + c2*T)*T
return SMOW + RB*S + RC*(S**1.5) + d0*S*S | a0df8ba385c18fbb7f51088cac2ec842bdef308f | 704,382 |
def validate_state(state):
"""
State validation rule.
Property: LifecyclePolicy.State
"""
VALID_STATES = ("ENABLED", "DISABLED")
if state not in VALID_STATES:
raise ValueError("State must be one of : %s" % ", ".join(VALID_STATES))
return state | 5dcc3d2c8bf9242d8090aef0933f26d2ffa1821d | 704,384 |
def load_variable_config(project_config):
"""Extract the variable configuration out of the project configuration.
Args:
project_config (dict-like): Project configuration.
Returns:
dict: Variable dictionary with name: [levels] (single level will have a list containing None.)
"""
# Extract the different rank variables
v2ds = project_config['variables_2d']
v3ds = project_config['variables_3d']
# Create a dictionary of variables to process keyed to an empty list of levels for 2D
variables = {v2d: [None] for v2d in v2ds}
# Add in the 3D variables, with levels this time
for v3d, levels in v3ds.items():
variables[v3d] = levels
return variables | 37caccfa5f9c3a724e61233610c3e4a3e9938695 | 704,385 |
import torch
def cross_op_torch(r):
"""
Return the cross operator as a matrix
i.e. for input vector r \in \R^3
output rX s.t. rX.dot(v) = np.cross(r, v)
where rX \in \R^{3 X 3}
"""
if len(r.shape) > 1:
rX = torch.zeros(r.shape[0], 3, 3).to(r)
rX[..., 0, 1] = -r[..., 2]
rX[..., 0, 2] = r[..., 1]
rX[..., 1, 2] = -r[..., 0]
rX = rX - rX.transpose(-1, -2)
else:
rX = torch.zeros(3, 3).to(r)
rX[0, 1] = -r[2]
rX[0, 2] = r[1]
rX[1, 2] = -r[0]
rX = rX - rX.T
return rX | 04f926f00f6ed58bee3feae80ef573f5a8822d20 | 704,386 |
def __all_paths_between_acceptance_states(Dfa):
"""Generates for each front acceptance state a copy of the complete
graph which can be reached inside 'Dfa' starting from it until the next
acceptance state.
RETURNS: List of DFAs containing a tail for each found acceptance states.
"""
def _get_branch(Dfa, acceptance_si):
result = Dfa.clone_subset(acceptance_si, Dfa.get_successors(acceptance_si))
# Clone acceptance state as init state, which does not accept.
# Take over all transitions of the acceptance state.
new_state = result.get_init_state().clone()
new_state.set_acceptance(False)
result.set_new_init_state(new_state)
# Original acceptance only remains in place, if it is the target of a
# transition.
if not result.has_transition_to(acceptance_si):
result.delete_state(acceptance_si)
return result
return [
_get_branch(Dfa, acceptance_si)
for acceptance_si in Dfa.acceptance_state_index_list()
] | 136ea0a999bff3b930ad4e05107042da36262d13 | 704,387 |
def hello():
"""
An op definition. This example op outputs a single string.
For more hints about writing Dagster ops, see our documentation overview on Ops:
https://docs.dagster.io/concepts/ops-jobs-graphs/ops
"""
return "Hello, Dagster!" | cf701323e751122823f22bad864f7b1f0d700a97 | 704,388 |
def mentionable():
"""
:return: boolean
True, if there is something mentionable to notify about
False, if not
"""
# need to be implemented
return False | d1dac607efb512771677aa7e8dd42a2c21251833 | 704,389 |
import os
import fnmatch
def findFileTypes(wpath, type ='*.txt', verbose=False):
""" to find all the files in wpath and below with file names matching fname
"""
alist=sorted(os.walk(wpath))
if verbose:
print(' getting file list')
listPath = []
listFile = []
fileName = []
for (dirpath,dirnames,filenames) in alist:
if len(dirnames) == 0:
for f in filenames:
if fnmatch.fnmatch(f, type):
file=os.path.join(dirpath,f)
if os.path.isfile(file):
listFile.append(file)
listPath.append(dirpath)
fileName.append(f)
else:
for f in filenames:
if fnmatch.fnmatch(f, type):
file=os.path.join(dirpath,f)
if os.path.isfile(file):
listFile.append(file)
listPath.append(dirpath)
fileName.append(f)
if verbose:
nfiles=len(listFile)
print((' nfiles = %i'%(nfiles)))
return {'fullName':listFile, 'fileName':fileName} | b424d8bb93bfa5540847a308de9259c5ba14a048 | 704,390 |
import argparse
def _parse_args(argv=None):
"""Parse command-line args."""
def _positive_int(value):
"""Define a positive integer ArgumentParser type."""
value = int(value)
if value <= 0:
raise argparse.ArgumentTypeError(
"Value must be positive, {} was passed.".format(value))
return value
parser = argparse.ArgumentParser()
parser.add_argument(
"--file_pattern",
required=True,
help="File pattern for amazon qa files on Google cloud storage.",
)
parser.add_argument(
"--output_dir",
required=True,
help="Output directory to write the dataset on Google cloud storage.",
)
parser.add_argument(
"--max_words",
type=_positive_int,
default=59,
help="Maximum number of words a Q or A can have to be included.",
)
parser.add_argument(
"--min_words",
type=_positive_int,
default=4,
help="Minimum number of words a Q or A must have to be included.",
)
parser.add_argument(
"--train_split",
default=0.9, type=float,
help="The proportion of data to put in the training set.",
)
parser.add_argument(
"--num_shards_test",
default=10,
type=_positive_int,
help="The number of shards for the test set.",
)
parser.add_argument(
"--num_shards_train",
default=100,
type=_positive_int,
help="The number of shards for the train set.",
)
return parser.parse_known_args(argv) | abb5d64089e200592f057ee1356d135328196dab | 704,391 |
def get_worker_class(global_conf, message):
"""Returns class of worker needed to do message's work"""
worker_type = 'worker-%s' % (message.body['worker_type'])
if worker_type not in global_conf:
raise RuntimeError("Invalid worker type '%s'" % (worker_type))
conf = global_conf[worker_type]
import_target, class_name = conf['class'].rsplit('.', 1)
module = __import__(import_target, fromlist=[import_target])
return getattr(module, class_name) | 3f975caf97827fcfaf7d74141ea651c302e4781c | 704,392 |
def convertVoltage(raw_voltage):
""" Ground is 1
1.8 is 4095
"""
converted_voltage = (raw_voltage/4095)*1.8
return "%.3f" % converted_voltage | 4f404ff02449a231521f80a2b9a4ae443880e1b3 | 704,393 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Tuple
from typing import Iterable
def solve_nr(
f: Callable[[float], float],
df: Callable[[float], float],
estimate: float,
eps: Optional[float]=1.0e-6,
max_num_iter=100,
throw_if_failed_converge=True,
return_vector=False) -> Union[Tuple[float, int], Iterable[float]]:
"""
Solves f(x) = 0 using Newton-Raphson method
:param f: function of x to solve
:param df: derivative of f(x)
:param estimate: initial estimate
:param eps: max absolute error; if None, will continue calculating until max_num_iter is reached
:param max_num_iter: Max number of iterations
:param throw_if_failed_converge: if True, will throw if fails to converge in max_num_iter (unless eps is None)
:param return_vector: if true, returns vector of all iterated values
:return: x, number of iterations; or vector of all iterations if return_vector is True
"""
if max_num_iter < 1:
raise ValueError('max_num_iter must be at least 1')
x = estimate
xv = [] if return_vector else None
n = 0
for n in range(1, max_num_iter + 1):
if xv is not None:
xv.append(x)
residue = f(x) / df(x)
x -= residue
if eps and (abs(residue) < eps):
break
else:
if throw_if_failed_converge and eps:
raise RuntimeError('Failed to converge in %i iterations' % n)
if xv is not None:
return xv
else:
return x, n | c6ab8b6bb27f8b9be9c31fe7cbd58300637d9fef | 704,394 |
import json
def get_data(source):
"""fungsi ambil data pegawai, jadwal, judul, liburan"""
with open(source, 'r') as srce:
return json.load(srce) | 964efdabcbd21486985bbc9189c5d07dbc800dd6 | 704,395 |
def merge_config_dictionaries(*dicts):
"""
Merges n dictionaries of configuration data
:param list<dicts>:
:return dict:
"""
res_dict = {}
if isinstance(dicts, list):
if len(dicts) == 1 and isinstance(dicts[0], dict):
return dicts[0]
else:
for dictionary in dicts:
if isinstance(dictionary, dict):
res_dict.update(dictionary)
return res_dict | c9711e897d5c7caa47a21f3e901025c91862327f | 704,396 |
import calendar
import pytz
def epoch(dt):
"""
Returns the epoch timestamp of a timezone-aware datetime object.
"""
return calendar.timegm(dt.astimezone(pytz.utc).timetuple()) | 027ea75bf75b6bb6b4da14b2bed1afc363a9121a | 704,397 |
def set_produce_compilation_cache(enabled: bool) -> dict:
"""Forces compilation cache to be generated for every subresource script.
Parameters
----------
enabled: bool
**Experimental**
"""
return {"method": "Page.setProduceCompilationCache", "params": {"enabled": enabled}} | 3d2dd7fa6c8d04713ace26c666d9b00407a5a586 | 704,398 |
def raw_input(prompt=None): # real signature unknown; restored from __doc__
"""
raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading.
"""
return "" | ad09db4416e3705a34e4fc88c7df569693608c80 | 704,400 |
import zlib
import os
def get_checksum32(oqparam):
"""
Build an unsigned 32 bit integer from the input files of the calculation
"""
# NB: using adler32 & 0xffffffff is the documented way to get a checksum
# which is the same between Python 2 and Python 3
checksum = 0
for key in sorted(oqparam.inputs):
fname = oqparam.inputs[key]
if not fname:
continue
elif key == 'source': # list of fnames and/or strings
for f in fname:
data = open(f, 'rb').read()
checksum = zlib.adler32(data, checksum) & 0xffffffff
elif os.path.exists(fname):
data = open(fname, 'rb').read()
checksum = zlib.adler32(data, checksum) & 0xffffffff
else:
raise ValueError('%s does not exist or is not a file' % fname)
return checksum | c7aaf8d6aeefaa4b3f97dcac535c959b4ba06579 | 704,401 |
import asyncio
def create_future(*, loop):
""" Helper for `create a new future`_ with backward compatibility for Python 3.4
.. _create a new future: https://goo.gl/YrzGQ6
"""
try:
return loop.create_future()
except AttributeError:
return asyncio.Future(loop=loop) | 1708ac124c46fa81b7ff3ca1d7b685e4835cd53a | 704,402 |
def clean_column(df, column):
"""
Function to return clean column text. Pass each cell to a cleaner
and return the cleaned text for that specific column
:params:
--------
:df dataframe(): containing the column
:column str(): in which column the text is located
:returns:
---------
:list(): of str containing the cleaned text
"""
# Remove all the NaN values and transform it into a list
return list(df[column].dropna().values) | 095a854c452f87b9a960eabb81ace5c18814f266 | 704,404 |
def assign_number_to_top_categories(paths):
"""Assign numbers to the top categories
returned by split_path for consistency"""
cats = {}
def assign_number(path):
name = path[0][1]
n = cats.setdefault(name, len(cats) + 1)
return [(n, name)] + path[1:]
return map(assign_number, paths) | 0027986bd9097819b76ef9358f3fb0b491456b48 | 704,405 |
def sort_keywords(scores):
"""
:param scores: A dictionary of lemmas and their corresponding scores,
assigned by the pagerank algorithm
:return: The same dictionary, sorted in descending order
"""
sorted_lemmas = [lemma for lemma in sorted(scores, key=scores.get, reverse=True)]
return sorted_lemmas | ef4349976e755fb5d0d95b0ee98c5184fbf055f2 | 704,406 |
def file_readlines(fn):
"""Open file with name `fn`, return open(fn).readlines()."""
fd = open(fn, 'r')
lst = fd.readlines()
fd.close()
return lst | 2594e6763b566f4e83844f2f4457bcc8ea3663a5 | 704,407 |
from datetime import datetime
def JIRATOSQLdatetimeformat(datetime_in):
"""
removes certain characters from fields returned by Jira requests, in order to facilitate insertion into SQL tables
would need to be written differently for a production application, to handle escape characters etc. more intelligently
parameters:
str_in (string): the string from Jira that needs characters removed
returns:
string: the string with characters removed
"""
datetime_out = datetime.strptime(datetime_in, "%Y-%m-%dT%H:%M:%S.%f%z").strftime("%Y-%m-%d %H:%M:%S")
return datetime_out | d75df0f925e4a3ed104ca98f8ef4ea0ae1d0557b | 704,408 |
import json
def apply_lookup(dataframe, group_type):
"""
converts df[group_type] from ids to names
dataframe : df
group_type : string
returns : df
"""
print("applying look up")
print("working with {}".format(group_type))
df = dataframe.copy(deep=True)
df[group_type] = (
df[group_type].astype("str", copy=False).dropna(inplace=False)
) # some columns were mixed type
# load the lookup table
path_to_save = "./data/lookups/{}_lookup.json".format(group_type)
with open(path_to_save) as f:
lookup = json.load(f)
lookup["nan"] = "None"
# explode by group_type
df[group_type] = df[group_type].map(lambda x: x.split(","))
df_exp = df.explode(group_type)
# apply lookup
df_exp[group_type] = df_exp[group_type].map(lambda x: lookup[x])
# implode
df_imp = (
df_exp[["bgg_id", group_type]]
.groupby("bgg_id")
.agg(lambda x: ",".join(x))
.reset_index()
)
# join back
df[group_type] = df_imp[group_type]
print("finished with {}".format(group_type))
print("finished applying look up")
return df | a41749555f106b4477414cc837f1c0cd56128acc | 704,409 |
def unescape(value, escape = "\\"):
"""
Unescapes the provided string value using the provided escape
character as the reference for the unescape operation.
This is considered to be a very expensive operation and so it
should be used carefully.
:type value: String
:param value: The string value that is going to be unescape.
:rtype: String
:return: The final unescaped value.
"""
result = []
iterator = iter(value)
for char in iterator:
if char == escape:
try:
result.append(next(iterator))
except StopIteration:
result.append(escape)
else:
result.append(char)
return "".join(result) | 28aaebbfc5ea0022ce519a3ef91988504ea345f4 | 704,410 |
def __num_elems(shape):
"""Returns the number of elements in the given shape
Args:
shape: TensorShape
Return:
tot_elems: int
"""
tot_elems = 1
for s in shape:
tot_elems *= int(s)
return tot_elems | fd4f72394b22c98e6bedb545d7d11b8bfae11add | 704,411 |
def adoptionSearch(cursor, search):
# search = data_params['search']
"""Return the result based on Mo's search input"""
lower_search = search.lower()
query = f"SELECT DISTINCT Adopter.email, AdoptionApplication.application_num, AdoptionApplication.date, " \
f"AdoptionApplication.co_applicant_first_name, AdoptionApplication.co_applicant_last_name, " \
f"Adopter.first_name, Adopter.last_name, Adopter.state, Adopter.city, Adopter.street, Adopter.zip_code, Adopter.cell_phone " \
f"FROM AdoptionApplication LEFT JOIN Adopter ON AdoptionApplication.adopter = Adopter.email " \
f"WHERE AdoptionApplication.STATUS = 'approved' AND (LOWER( Adopter.last_name ) LIKE '%{lower_search}%' OR LOWER( AdoptionApplication.co_applicant_last_name ) LIKE '%{lower_search}%');"
cursor.execute(query)
query_result = cursor.fetchall()
return query_result | 22388799f65bef447c80c3da5c8f656705cba27e | 704,412 |
import sys
def _read(f):
"""Read a file's contents, autodetecting whether the arg is a file or filename,
and treating '-' as as indication to read from stdin."""
if type(f) is str:
if f == "-":
return sys.stdin.read()
else:
with open(f, "r") as ff:
return ff.read()
else:
return f.read() | 51966b1a28d4c3d9b0bd037a8ffe037e901f58e5 | 704,413 |
def getHandValue(cards):
"""Returns value of cards."""
value = 0
numberOfAces = 0
for card in cards:
rank = card[0]
if rank == 'A':
numberOfAces += 1
elif rank in ('K','Q','J'):
value += 10
else:
value += int(rank)
value += numberOfAces
for i in range(numberOfAces):
if value + 10 <= 21:
value += 10
return value | b40d45db627add8376ff9135229688114f81be83 | 704,414 |
def get_greppable(string):
"""Simply produces a string that -- when grepped -- will omit listing the grep process in a grep listing.
"""
return string.replace(string[0], '[%s]' % string[0], 1) | 65be4daa5650605ca3d95720d74a7a1137b5f4d7 | 704,416 |
def players_player_id_get(player_id): # noqa: E501
"""Retrieve a single player's record
Returns a player record # noqa: E501
:param player_id: ID of player to return
:type player_id: str
:rtype: Player
"""
return 'do some magic!' | d9c2c92dbba3d139b2b5188e8722a0add7668393 | 704,417 |
from typing import Callable
from typing import Sequence
from typing import Hashable
from typing import List
def _stemmatological_costs_factory(
max_del_len: int = 5, frag_start: float = 10.0, frag_end: float = 10.0
) -> Callable:
"""
Define and return a function for computing candidate costs for a "stemmatological" distance matrix.
:param max_del_len: The maximum length of deletion block.
:param frag_start:
:param frag_end:
:return:
"""
def _stemmatological_costs(
seq_x: Sequence[Hashable],
seq_y: Sequence[Hashable],
d: List[List[float]],
i: int,
j: int,
):
"""
Computes candidate costs for an entry of a "stemmatological" distance matrix.
This internal function will compute the candidate costs for an entry
(i, j) in terms of the Levenshtein distance matrix (seq_a, seq_b),
each cost corresponding to one of the available edit operations.
:param seq_x: The first sequence to be compared.
:param seq_y: The second sequence to be compared.
:param d: The "starting matrix" for the cost computation.
:param i: The index of `seq_x` to be considered.
:param j: The index of `seq_y` to be considered.
:return: A tuple with the costs for deletion, insertion, and substitution.
"""
substitution_cost = 0 if seq_x[i - 1] == seq_y[j - 1] else 1
costs = [
d[i][j - 1] + 1,
d[i - 1][j - 1] + substitution_cost,
]
m = len(seq_x)
lower = round(m * frag_start / 100.0)
upper = round(m * (100 - frag_end) / 100.0)
# Delete consecutive block of n
for n in range(1, min(max_del_len, i)):
# Discount bulk deletion near ends
if i <= lower or i >= upper:
costs.append(d[i - n][j] + 0.5)
else:
costs.append(d[i - n][j] + 1)
return costs
return _stemmatological_costs | 2be18ea378fb70b7efc511d3d5572ef8b7638a9c | 704,418 |
def build_info_str(username: str, name_len: int, remaining_chip: int, action: str,
chip: int, is_waiting: bool, countdown: int) -> str:
"""Build a string to explain action of a user
Args:
username (str): user name
name_len (int): characters to show the name
remaining_chip (int): remaining chip of the user
action (str): the action being taken, should be one of the
following: check, bet, raise, all-in, fold
The differences of `bet` and `raise` are that `bet` is
the first put-chip action, while `raise` is another
put-chip action against prior `bet`
chip (int): the chip of an action, only meaningful when
`action` is `bet`, `raise` or `all-in`
is_waiting (bool): a flag that indicate if this user is in
execution position
countdown (int): the countdown of waiting, only meaningful
when `is_waiting` is `True`
Return:
info_str (str): a string to explain action of a user
"""
info = f"{username:{name_len}} (${str(remaining_chip) + ')':<5} {action}"
if action in ("bet", "raise", "all-in"):
info += f" ${chip} "
if is_waiting:
info += f"{countdown}s"
info = "-> " + info
else:
info = " " + info
return info | 1ecbb6c33d54a55500d51ce09cf9740ac28def96 | 704,419 |
import os
def dataInput(fileName):
"""Reads a line of data from a file and returns either a string for
when there is only a single line of data or a list when there is
more than one line.
"""
data = []
if os.path.isfile(fileName):
file = open(fileName, "r")
data = file.readlines()
else:
# To make configuration easier, it creates emtpy files
# If it cannot find them.
file = open(fileName, "w")
file.write(os.linesep)
file.close()
# Turn the raw data into something usable.
if len(data) < 1:
result = ""
elif len(data) == 1:
result = data[0]
else:
result = []
for line in data:
result.append(line.strip(os.linesep))
return result | 421f0a3661463d89d6f5edf001dcb798d78711a6 | 704,420 |
def get_similarity_score(dict1, dict2, dissimilarity = False):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary of words or n-grams for one text
dict2: frequency dictionary of words or n-grams for another text
dissimilarity: Boolean, optional parameter. Default to False.
If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead.
Returns:
int, a percentage between 0 and 100, inclusive
representing how similar the texts are to each other
The difference in text frequencies = DIFF sums words
from these three scenarios:
* If a word or n-gram occurs in dict1 and dict2 then
get the difference in frequencies
* If a word or n-gram occurs only in dict1 then take the
frequency from dict1
* If a word or n-gram occurs only in dict2 then take the
frequency from dict2
The total frequencies = ALL is calculated by summing
all frequencies in both dict1 and dict2.
Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity
is False, otherwise returns 100*(DIFF/ALL)
"""
DIFF = 0
for i in dict1:
x = False
#Boolean used to not add repeated frequencies as it will be seen later
for j in dict2:
if i == j:
#use of == instead of i in j as for example word "meme" could
#be in "memes" and would therefore cause a problem
DIFF += abs(dict1[i] - dict2[j])
#if the word/n-gram appears in both dictionnaires then
#the absolute value of the difference between the frequencies
#in each dictionnary is added to DIFF
x = True
if x == False:
#Boolean used so that frequencies of a word/n-gram are not added again
#and again to DIFF
DIFF += dict1[i]
for j in dict2:
x = False
#same use of boolean for same reasons as previou for loop
for i in dict1:
if i == j:
#use of == due to the same reason
x = True
#this time the absolute value of the difference between the
#frequencies doesn't have to be added as it already has been
if x == False:
DIFF += dict2[j]
ALL = 0
for i in dict1:
ALL += dict1[i]
#all the frequencies of the first dictionnary are added to ALL
for j in dict2:
ALL += dict2[j]
#same occurs as in the previous loop but for the second dictionnary
#Depending on the input of dissimilarity this will occur
if dissimilarity == False:
result = round(100*(1 - (DIFF/ALL)))
#similarity between the dictionnaries of word/n-grams is the result
else:
result = round(100*(DIFF/ALL))
#dissimilarity between the dictionnaries of word/n-grams is the result
return result | 31e8602d6ef098a58a8eaf497badebf2e19288eb | 704,421 |
def predict_fn(input_data, model):
"""Predict using input and model"""
return model(input_data) | 00f7bf0bd71f70833f8f77b16ffa62559747e915 | 704,422 |
def get_unnormalized_text(words):
""" Returns the (unnormalized) text composed from the given words."""
return "".join([x.unnormalized_with_whitespaces for x in words]) | 162854d917ee4d49c3b2b824abc07697ac4f05ba | 704,423 |
import json
def get_ability_icons(champion, input_path):
"""
This function takes a champion and input path strings as input and returns a
dictionary of png file paths with keys corresponding to the following
abilities: Passive, Q, W, E, and R
"""
global ability_icon_paths
ability_icon_paths = dict()
# Rek'Sai appears to be the exception in naming conventions
if champion == 'Reksai':
champion = 'RekSai'
# Read champ-specific json
with open(f"{input_path}{champion}.json") as f:
data = json.load(f)
P_png = data['data'][champion]['passive']['image']['full']
Q_png = data['data'][champion]['spells'][0]['image']['full']
W_png = data['data'][champion]['spells'][1]['image']['full']
E_png = data['data'][champion]['spells'][2]['image']['full']
R_png = data['data'][champion]['spells'][3]['image']['full']
ability_icon_paths['Passive'] = f"data/dragontail-11.1.1/11.1.1/img/passive/{P_png}"
ability_icon_paths['Q'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{Q_png}"
ability_icon_paths['W'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{W_png}"
ability_icon_paths['E'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{E_png}"
ability_icon_paths['R'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{R_png}"
return ability_icon_paths | e33c01bedcd8bf20959978df2bc2b33b934e2181 | 704,425 |
import requests
import os
def fetch_lightcurve_dr2(gaia_id, output_dir='../data/'):
""" Fetch Gaia Lightcurve for a Gaia Source ID (of a variable star) from Gaia DR2 Data Link
Returns path of csv file stored for given source
Args:
gaia_id (string): String. Gaia Source ID of the variable star you need to fetch the lightcurve from DR1 for
[output_dir] (string): Optional. String. By default, the csv files for the lightcurves are stored in the subfolder data/. To change the default path, enter a new path for the folder to save the lightcurve
Returns:
String. Gives back the path/to/lightcurve/filename.csv where the lightcurve is stored. Returns empty string if no lightcurve is fetched.
"""
url='https://gea.esac.esa.int/data-server/data?ID=Gaia+DR2+'+gaia_id+'&RETRIEVAL_TYPE=EPOCH_PHOTOMETRY&FORMAT=CSV'
save_path=output_dir+gaia_id+'_data_dr2.csv'
read_data = requests.get(url, allow_redirects=True)
if(len(read_data.content)==0):
print('Could not fetch lightcurve from DR2 for Gaia Source ID '+gaia_id)
return ''
#assert len(read_data.content)!=0, 'Could not fetch lightcurve from DR2 for Gaia Source ID '+gaia_id
if not os.path.exists(output_dir):
os.makedirs(output_dir)
open(save_path, 'wb').write(read_data.content)
return save_path | 18fb18ccb7cfcd2bbb8f826cbdf592ca59356ef9 | 704,426 |
def _l1_regularization(l1, model):
"""Computes the L1 regularization for the given model
Args:
l1 (float): L1 parameter
model (:obj:`torch.nn.Module`): Model to use
Returns:
float: L1 loss (i.e. l1 * l1_norm(params))
"""
l1_loss = sum(param.norm(1) for param in model.parameters())
return l1 * l1_loss | 32826672a7de00f8a0412e2496e6ebfea213b502 | 704,427 |
def find_bands(bands, target_avg, target_range, min_shows):
"""
Searches dictionary of bands with band name as keys and
competition scores as values for bands that are within the
range of the target average and have performed the minimum
number of shows. Returns a list of bands that meet the search
criteria.
Parameters:
bands: Dictionary with band name as keys and scores as values.
target_avg: Tuple containing the average to look for and the
amount of scores to look at.
target_range: Range to look away from the target average.
min_shows: Minimum number of shows to be eligible.
Returns:
List of bands that meet the search criteria.
>>> DCI = {'Blue Devils': [98.2, 97.1, 99.1, 97.3, 98.2], \
'Blue Coats': [98, 96.5, 97.2, 93, 92.1, 92, 97.4], \
'Carolina Crown': [75.7, 82.8, 86.1, 98.2], \
'The Cadets': [96.1, 93.4, 81, 78, 57.9, 86, 71.2, 35.5], \
'Mandarins': [89.3, 88.1, 85.6, 83.8, 79.1, 88.4, 75.7], \
'Little Rocks':[42], \
'Logan Colts':[98.2, 84.4, 69.2, 42, 84]}
>>> find_bands(DCI, (0, 10), 30, 2)
[]
>>> find_bands(DCI, (90, 5), 5, 7)
['Mandarins']
>>> find_bands(DCI, (70, 8), 10, 5)
['The Cadets', 'Logan Colts']
>>> find_bands(DCI, (95, 3), 5, 4)
['Blue Devils', 'Blue Coats', 'The Cadets']
# My doctests
>>> find_bands(DCI, (42, 10), 1, 1)
['Little Rocks']
>>> find_bands(DCI, (87, 10), 5, 5)
['Mandarins']
>>> DCI2 = {'UCSD': [100, 99, 100, 100, 100, 100], \
'UCLA': [50, 49, 100, 100, 100], \
'UCD': [90, 90, 87, 45, 79]}
>>> find_bands(DCI2, (95, 3), 5, 4)
['UCSD']
>>> find_bands(DCI2, (75, 5), 10, 4)
['UCLA', 'UCD']
"""
search_range = [target_avg[0] - target_range, target_avg[0] + target_range]
lower_bound = search_range[0]
upper_bound = search_range[1]
noted_scores = target_avg[1]
score_index = 1
in_range = lambda avg: (avg >= lower_bound and avg <= upper_bound)
score_avg = lambda scores, kept_scores: sum(scores) / len(scores) \
if len(scores) <= kept_scores \
else sum(scores[0:kept_scores]) / kept_scores
return list(map(lambda name: name[0], \
list(filter(lambda band: \
in_range(score_avg(band[score_index], noted_scores)), \
filter(lambda band: True if len(band[score_index]) >= min_shows \
else False, list(bands.items())))))) | 1b2b93f0a1d4236ad62102205606eff8afb3802a | 704,428 |
def get_named_targets():
""" Return a list of named target date ranges """
return ["std_train", "std_val", "std_test", "std_ens", "std_all", \
"std_future", "std_contest_fri", "std_contest", "std_contest_daily", "std_contest_eval", \
"std_contest_eval_daily", "std_paper", "std_paper_daily"] | 23a15efff1facc5028e980d659ca6d2f61cdddf0 | 704,429 |
def get_data_colums(epoch):
"""Return the data columns of a given epoch
:param epoch: given epoch in a numpy array, already readed from .csv
"""
ID = epoch[:,0];
RA = epoch[:,1];
RA_err = epoch[:,2];
Dec = epoch[:,3];
Dec_err = epoch[:,4];
Flux = epoch[:,5];
Flux_err = epoch[:,6];
if epoch.shape[1] > 7:
Neighbr = epoch[:,7];
Nhbr1_d = epoch[:,8];
Nhbr2_d = epoch[:,9];
return ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err, Neighbr, Nhbr1_d, Nhbr2_d ;
else:
return ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err; | ba497f0aacf8356b80c8c433af05716b90519665 | 704,430 |
def bin_position(max_val):
"""returns position features using some symbols. Concatenate them at the end of
sentences to represent sentence lengths in terms of one of the three buckets.
"""
symbol_map = {0: " `", 1: " _", 2: " @"}
if max_val <= 3:
return [symbol_map[i] for i in range(max_val)]
first = max_val // 3
second = 2 * first
return [" `"] * first + [" _"] * (second - first) + [" @"] * (max_val - second) | 2c6caf100c07d56211ba8f8bfcef103dd623c6f5 | 704,431 |
import os
def f_split_path(fpath, normpath=True):
"""
Splits path into a list of its component folders
Args:
normpath: call os.path.normpath to remove redundant '/' and
up-level references like ".."
"""
if normpath:
fpath = os.path.normpath(fpath)
allparts = []
while 1:
parts = os.path.split(fpath)
if parts[0] == fpath: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == fpath: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
fpath = parts[0]
allparts.insert(0, parts[1])
return allparts | 0a0fafe2263cb77727609053866f7c1b95fa12d0 | 704,432 |
def qbinomial(n, k, q = 2):
"""
Calculate q-binomial coefficient
"""
c = 1
for j in range(k):
c *= q**n - q**j
for j in range(k):
c //= q**k - q**j
return c | 43c167aa506bd9ee6b87163d10da5b02e297e067 | 704,434 |
def _tiramisu_parameters(preset_model='tiramisu-67'):
"""Returns Tiramisu parameters based on the chosen model."""
if preset_model == 'tiramisu-56':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 12,
'layers_per_block': 4
}
elif preset_model == 'tiramisu-67':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 16,
'layers_per_block': 5
}
elif preset_model == 'tiramisu-103':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 16,
'layers_per_block': [4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]
}
else:
raise ValueError(f'Tiramisu model {preset_model} not available.')
return parameters | 74e2dadf2a6af864b3f9dfec6241bf71833676f8 | 704,435 |
def bond_yield(price, face_value, years_to_maturity, coupon=0):
"""
"""
return (face_value / price) ** (1 / years_to_maturity) - 1 | 4c4a90f0fb29564acdad05138ca17932da39eb61 | 704,436 |
from typing import Counter
def classifyChord(chordPosition):
"""
:param chordPosition:所有音符的位置,所有音符都是在不同弦上,并且各位置的距离是限制在人类手掌范围内的。
例如:([6, 5], [5, 7], [4, 7], [3, 5], [2, 5]),表示6弦5品,5弦7品,4弦7品,3弦5品,2弦5品
:return:和弦类型,是一个列表,用来表示所有非空弦音,从低品到高品对应的个数。
例如:输入([6, 5], [5, 7], [4, 7], [3, 5], [2, 5]),返回[[5,3],[7,2]]
和弦分类决定了和弦的处理方式
"""
frets = []
for item in chordPosition:
if item[1] != 0:
frets.append(item[1])
return Counter(frets).most_common() | 1c9af3737f2e4ba2437a457e74f37fbbe1ff0406 | 704,438 |
def update_active_boxes(cur_boxes, active_boxes=None):
"""
Args:
cur_boxes:
active_boxes:
Returns:
"""
if active_boxes is None:
active_boxes = cur_boxes
else:
active_boxes[0] = min(active_boxes[0], cur_boxes[0])
active_boxes[1] = min(active_boxes[1], cur_boxes[1])
active_boxes[2] = max(active_boxes[2], cur_boxes[2])
active_boxes[3] = max(active_boxes[3], cur_boxes[3])
return active_boxes | dfa1c9b32b9af9c6c9a1fb321f907dad51f9cca0 | 704,439 |
import torch
def ones(shape, dtype=None):
"""Wrapper of `torch.ones`.
Parameters
----------
shape : tuple of ints
Shape of output tensor.
dtype : data-type, optional
Data type of output tensor, by default None
"""
return torch.ones(shape, dtype=dtype) | a234936baa16c8efdc63e903d8455895ab7f2f0c | 704,440 |
import random
def rollDie():
"""returns a random int between 1 and 6"""
return random.choice([1, 2, 3, 4, 5, 6]) | 27a3d3586fe313d78a5aea6dab8d10c58e76df56 | 704,441 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.