content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import re
def get_test_args(args_line):
"""Returns the list of arguments from provided text line."""
try:
args_line, _ = args_line.split('#', 1) # Strip optional ending comment
except ValueError:
pass
return re.split(r'(?<!\\) ', args_line.strip()) | 9129bf3d773165dc5bf0bc723b2a12b82a2c5517 | 19,002 |
import pickle
def load_pickle(pickle_file_name):
"""Loads a pickle from given pickle_file_name."""
with open(pickle_file_name, "rb") as f:
data = pickle.load(f)
print("Pickle file loaded: {}".format(pickle_file_name))
return data | 8bbf04e2c1a1859b89c2cede128381a6c7ebadc6 | 19,006 |
def get_image_sizes(metadata):
"""
image_sizes.txt has the structure
<path>,<w>,<h>
path/to/image1.jpg,500,300
path/to/image2.jpg,1000,600
path/to/image3.jpg,500,300
...
"""
image_sizes = {}
with open(metadata.image_sizes) as f:
for line in f.readlines():
image_id, ws, hs = line.strip('\n').split(',')
w, h = int(ws), int(hs)
image_sizes[image_id] = (w, h)
return image_sizes | a3910f22938aa0a635f5c3e424891dd7c76ce41a | 19,009 |
from typing import Counter
def word_form_hapaxes(tokens):
"""
Takes a list of tokens and returns a list of the
wordform hapaxes (those wordforms that only appear once)
For wordforms this is simple enough to do in plain
Python without an NLP package, especially using the Counter
type from the collections module (part of the Python
standard library).
"""
counts = Counter(tokens)
hapaxes = [word for word in counts if counts[word] == 1]
return hapaxes | 9ea3f56ebd2967baf427abe1c1604fb8d4063046 | 19,012 |
import math
def vectorDistance(vector1, vector2):
"""Return the distance between vectors."""
return math.sqrt(
math.pow((vector2.x - vector1.x), 2) +
math.pow((vector2.y - vector1.y), 2)) | 61db7f6258b6704d44cf8d630b4dd33f13be8911 | 19,021 |
def tmp(tmpdir_factory):
"""Create a common temp directory."""
return str(tmpdir_factory.mktemp('tmp_test')) | 3901ede63f669752622f7273f876347218b0011c | 19,022 |
def count_linestokens(line):
"""
计算句子token个数(包括符号)
:param line: 句子
:return: line_num
"""
line_num = len(line.split(' '))
return line_num | d80868a3ab56fab5fe3d1cb04c8cee05cf7e7e5d | 19,029 |
def tokenize(text):
"""Tokenize a passage of text, i.e. return a list of words"""
text = text.replace('.', '')
return text.split(' ') | 497be6a4b8a7fd6ef0cc715b6343d9eec42e70f9 | 19,030 |
def _get_cell(row, name, decimal=False):
"""Retrieves a cell from a row by name."""
for cell in row.cells:
if cell.name == name:
if decimal:
return cell.decimal_value[0]
return cell.string_value[0] | 16e0c5475121a3d01fdef1ca2f16e3a6e30cccba | 19,033 |
def is_label_definition(line):
"""Returns if the line is a LABEL node."""
return line.startswith("LABEL ") | e508f0987204e01bc97ef52dda189171e0c7befb | 19,038 |
def nfa_word_acceptance(nfa: dict, word: list) -> bool:
""" Checks if a given word is accepted by a NFA.
The word w is accepted by a NFA if exists at least an
accepting run on w.
:param dict nfa: input NFA;
:param list word: list of symbols ∈ nfa['alphabet'];
:return: *(bool)*, True if the word is accepted, False otherwise.
"""
current_level = set()
current_level = current_level.union(nfa['initial_states'])
next_level = set()
for action in word:
for state in current_level:
if (state, action) in nfa['transitions']:
next_level.update(nfa['transitions'][state, action])
if len(next_level) < 1:
return False
current_level = next_level
next_level = set()
if current_level.intersection(nfa['accepting_states']):
return True
else:
return False | ca552fe4061f7c87fc0a9991a9626baca4b63fc6 | 19,041 |
def dt2str(dt):
"""Convert datetime.timedelta to human readable format"""
if dt.days:
return '{}D'.format(dt.days)
if dt.seconds > 3600:
return '{}H'.format(dt.seconds // 3600)
return '{}M'.format(dt.seconds // 60) | b87b17707cc647876f14da11c4bfabe95736afae | 19,048 |
def _rho1(basis_states):
"""State ρ₁ from the "3states" functional"""
d = len(basis_states) # dimension of logical subspace
return sum(
[
(2 * (d - i) / (d * (d + 1))) * psi * psi.dag()
for (i, psi) in enumerate(basis_states)
# note that i is 0-based, unlike in the paper
]
) | 939f9835f011fcf3457f7a3cb0f63b446d2cd093 | 19,054 |
def str_to_bool(bool_as_string: str) -> bool:
"""
A converter that converts a string representation of ``True`` into a boolean.
The following (case insensitive) strings will be return a ``True`` result:
'true', 't', '1', 'yes', 'y', everything else will return a ``False``.
:param bool_as_string: The string to be converted to a bool.
:type bool_as_string: str
:rtype: bool
:raises TypeError: Raised when any type other than a string is passed.
"""
if not isinstance(bool_as_string, str):
raise TypeError("Only string types supported")
return bool_as_string.lower() in ["true", "t", "1", "yes", "y"] | 62fef04039d66d3530e4cac42bf3c152d8f891e6 | 19,056 |
def split_conn_PFI(connection):
"""Return PFI input number of a connection string such as 'PFI0' as an
integer, or raise ValueError if format is invalid"""
try:
return int(connection.split('PFI', 1)[1])
except (ValueError, IndexError):
msg = "PFI connection string %s does not match format 'PFI<N>' for integer N"
raise ValueError(msg % str(connection)) | b0d22bf7c4bcaafe087d9d3a982d4b17f10cd017 | 19,059 |
def format_block(block: list) -> str:
"""
Transforms [1, 2, 3, 4, 5, 6, 7, 8] -> 1 2 3
4 5 6
7 8 9
"""
return "\n".join(" ".join(str(v) for v in block[i*3:i*3+3])
for i in range(3)) | 10c69f39ab26451a5fa29aa3232c0f3ca932deb6 | 19,060 |
def num_diffs(state):
"""
Takes a state and returns the number of differences between
adjacent entries.
num_diffs(str) -> int
"""
differences = 0
for i in range(0, len(state) - 1):
if state[i] != state[i+1]:
differences += 1
return differences | 8ad010412a66badfa1ebf142429b5fd9752c78ee | 19,062 |
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass for py2 & py3
This code snippet is copied from six."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("temporary_class", None, {}) | 513f9e212bd8f689b09c74867876d51ab1ac544b | 19,072 |
def format_time(seconds):
"""
format seconds to time string
:param seconds: seconds, in float format
:return: formatted time string
"""
h = int(seconds // 3600)
m = int(seconds % 3600 // 60)
s = seconds % 60
if h:
time_str = '{:d}h {:d}min {:.02f}s'.format(h, m, s)
elif m:
time_str = '{:d}min {:.02f}s'.format(m, s)
else:
time_str = '{:.02f}s'.format(s)
return time_str | 8c8ebeb8074f4a2f2f8c0e69497ba9a02744fab0 | 19,073 |
def quat_negate(q):
"""
return -q.
"""
return q*-1.0 | 57ae0dc235d6b80f41abe6506c33b9a024a4bcee | 19,076 |
def _SplitAndPad(s):
"""Splits a string on newlines, and pads the lines to be the same width."""
split_string = s.split('\n')
width = max(len(stringpiece) for stringpiece in split_string) + 1
padded_strings = [
stringpiece.ljust(width, ' ') for stringpiece in split_string
]
return padded_strings | f51976a558c6c76b26d9cf6e9302ce9d31436aed | 19,083 |
def check_language_presence(match) -> bool:
"""Checks if some group is present inside the match object"""
try:
match.group('lang')
return True
except IndexError:
return False | 7a0fe185af00bbd222e7d3706ff822eb94ebba2b | 19,085 |
import six
def to_unicode(value):
"""
Converts string to unicode:
* Decodes value from utf-8 if it is a byte string
* Otherwise just returns the same value
"""
if isinstance(value, six.binary_type):
return value.decode('utf-8')
return value | 1bca40318e9e311f5ad26cf7035cef656f14cb0e | 19,086 |
def strip_balanced_edge_parens(s):
"""
Return a string where a pair of balanced leading and trailing parenthesis is
stripped.
For instance:
>>> strip_balanced_edge_parens('(This is a super string)')
'This is a super string'
>>> strip_balanced_edge_parens('(This is a super string')
'(This is a super string'
>>> strip_balanced_edge_parens('This is a super string)')
'This is a super string)'
>>> strip_balanced_edge_parens('(This is a super (string')
'(This is a super (string'
>>> strip_balanced_edge_parens('(This is a super (string)')
'(This is a super (string)'
"""
if s.startswith('(') and s.endswith(')'):
c = s[1:-1]
if '(' not in c and ')' not in c:
return c
return s | 3e603fbd48ec9a107ab23a2c7a0282efb9f1ee36 | 19,087 |
def hex_to_rgb(hexcode):
"""
Convert Hex code to RGB tuple
"""
return (int(hexcode[-6:-4], 16), int(hexcode[-4:-2], 16), int(hexcode[-2:], 16)) | 0bf09edac600dcf1e6e0bd527d3e48d702a98add | 19,090 |
from typing import Union
from typing import List
def add_n(obj: Union[int, List], n: int) -> Union[int, List]:
"""Return a new nested list where <n> is added to every item in <obj>.
>>> add_n(10, 3)
13
>>> add_n([1, 2, [1, 2], 4], 10)
[11, 12, [11, 12], 14]
"""
if isinstance(obj, int):
return obj + n
else:
new_list = []
for sublist in obj:
new_list.append(add_n(sublist, n))
return new_list | 803d730e005aa81afcdeae48819aeb312b27e3f7 | 19,094 |
def splitdefines(txt):
"""split the code into two lists of defines and code"""
pre = []
c = []
for line in txt.split("\n"):
if line.startswith("#"):
pre.append(line)
else:
c.append(line)
return pre, c | bcd82feb293116f39cd1d514484847ad0d20bbe5 | 19,101 |
def linear_search(item, my_list):
"""
Searching position by position
:param item: the number to look for
:param my_list: a list of integers
:return: either True or False if the item is in the list or not.
"""
found = False
for i in range(len(my_list)):
if item == my_list[i]:
found = True
return found | 463c23c85626be396c06f56d913fca9b5972fc0e | 19,110 |
from typing import Optional
def build_cfn(
ret: str,
name: str,
*,
params: Optional[list[str]] = None,
body: Optional[list[str]] = None,
vret: Optional[str] = None,
) -> str:
"""Builds a Cpp function"""
if body:
body = [" " + line for line in body]
if ret == "string":
ret = "std::string"
return "\n".join(
[
# returntype functionName(type varname, for all params)
f'{ret} {name}({", ".join(params or [])})',
"{", # {
(";\n".join(body) + ";" if body else ""), # function body
(f" return {vret};" if ret != "void" else ""),
"};\n", # return varname; # };
]
) | 5ab97857c14aac2f7e3d2fc3093339dde4f89917 | 19,111 |
import random
import string
def random_schema_name() -> str:
"""Generate a random PostgreSQL schema name for testing."""
return 'temp_{name}'.format(
name=''.join(
(random.choice(string.ascii_lowercase) for _ in range(10)), # noqa:S311
),
) | 350ea729e34d1f8262f39f4a1985ab75b421cf26 | 19,112 |
from typing import Iterable
def row2string(row: Iterable[float], sep: str = ', ') -> str:
"""
Converts a one-dimensional iterable of floats to string.
Parameters
----------
row: list or tuple or 1-D ndarray
sep: str
string separator between elements (default: ', ')
Returns
-------
string representation of a row
"""
return sep.join("{0}".format(item) for item in row) | afdd62d390e336e774e5ef80244adac1751202f9 | 19,113 |
import asyncio
def send_message(connection, *messages, **kwargs):
"""
Sends a message to a connected player.
:param connection: The connection to send the message to.
:param messages: The message(s) to send.
:return: A Future for the message(s) being sent.
"""
return asyncio.ensure_future(connection.send_message(*messages, **kwargs)) | 779ad884112dbf98e9eb78705edc0bb9dd0f93c7 | 19,116 |
def calculate_city_state_qty_delta(df):
"""
This function creates the specific market growth (city + state observation) rate using quantity by doing the following:
1. Creates the city_state_qty_delta_pop feature out of the quantity_of_mortgages_pop
2. Creates the city_state_qty_delta_nc feature out of the quantity_of_mortgages_nc
3. Returns the df with the new features
"""
# create city_state_qty_delta_pop
df["city_state_qty_delta_pop"] = df.sort_values(["year"]).groupby(["city", "state"])[["quantity_of_mortgages_pop"]].pct_change()
# create city_state_qty_delta_nc
df["city_state_qty_delta_nc"] = df.sort_values(["year"]).groupby(["city", "state"])[["quantity_of_mortgages_nc"]].pct_change()
return df | 1603e51ff64ad6f232dc483af9b947378a70b886 | 19,120 |
def falsy_to_none_callback(ctx, param, value): # noqa: U100
"""Convert falsy object to ``None``.
Some click arguments accept multiple inputs and instead of ``None`` as a default if
no information is passed, they return empty lists or tuples.
Since pytask uses ``None`` as a placeholder value for skippable inputs, convert the
values.
Examples
--------
>>> falsy_to_none_callback(None, None, ()) is None
True
>>> falsy_to_none_callback(None, None, []) is None
True
>>> falsy_to_none_callback(None, None, 1)
1
"""
return value if value else None | 9168083987696f7749ca8444356d32f08bde7e91 | 19,122 |
def create_voting_dict(strlist):
"""
:param strlist: strlist: list of strings representing voting records. Each line space delimited {last_name} {party} {state} {votes...}
:return: Dictionary mapping last name to list of integer votes: -1, 0, 1 (no, abstain, yes)
>>> strlist = ['Lesko D MD 1 0 -1 0 1', 'Klein R MA 0 1 1 -1 0']
>>> voting_dict = create_voting_dict(strlist)
>>> voting_dict['Lesko'][2] == -1
True
>>> voting_dict['Klein'][4] == 0
True
"""
voting_dict = dict()
for line in strlist:
elements = line.split(' ')
voting_dict[elements[0]] = [int(e) for e in elements[3:]]
return voting_dict | 7b7add11aec2aea7e6929491cee8e5e44baeb6e2 | 19,127 |
def create_constant_tensor(shape, value):
"""Creates tensor with `shape`, filled with `value`."""
if len(shape) == 1:
return [value] * shape[0]
return [create_constant_tensor(shape[1:], value) for _ in range(shape[0])] | eacb2f8c5937d0fcfe4eaea4c5ed9bd0e5e4875d | 19,129 |
def gen_cat(df):
"""
Generate a data frame only including catgorical variables.
Parameters
----------
df : pandas data frame
whole data frame.
Returns
-------
df_new: pandas data frame
new data frame only including categorical variables.
"""
feat_cat = ['derived_msa_md', 'county_code',
'conforming_loan_limit',
'derived_race', 'derived_sex',
'hoepa_status',
'interest_only_payment',
'balloon_payment', 'occupancy_type',
'total_units', 'applicant_race_1', 'applicant_sex',
'applicant_age_above_62', 'co_applicant_age_above_62',
'derived_loan_product_type',
'lien_status', 'open_end_line_of_credit',
'business_or_commercial_purpose'
]
df_new = df[feat_cat]
return df_new | 479fde5dd0f25edaed67483eed1bada5b0e17d7e | 19,131 |
def the_box(box, width=3, height=3):
"""Return all coordinates of the fields of the given box number.
Args:
box (int): The number of the box.
width (int): The width of the sudoku.
height (int): The height of the sudoku.
Returns:
list: The coordinates of the box with the given number.
Raises:
ValueError: If the box number is invalid.
Example::
>>> the_box(0, width=3, height=3)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]
"""
if not 0 <= box < width * height:
raise ValueError(
"box must be less equal 0 and less than %d" % width * height)
x = (box % height) * width
y = (box // height) * height
return [(y + i, x + j) for i in range(height) for j in range(width)] | e099b13a55310488809149d00df9719998a99191 | 19,134 |
def mass2_from_m1_q(mass1, q):
"""Return the secondary mass given samples for mass1 and mass ratio
"""
return mass1 * q | 1c902c8564fbeca7b96c6e73384b4b690b2a348d | 19,135 |
from typing import Type
from typing import get_origin
import enum
def is_enum_type(type_: Type) -> bool:
"""Return True if the input is and :class:`enum.Enum`."""
return get_origin(type_) is None and issubclass(type_, enum.Enum) | 857e7a0ec5e1c4051551a6322293a71ef659010f | 19,136 |
def clean_line(line):
""" Cleans a single line
"""
ret = line.replace('-', ' ').replace('.', '. ').strip()
return ret | 833580237ffcbf607bd2ac619b721f9d260d3aae | 19,139 |
def parse_line(line):
"""parse composition line by deleting whitespace
and separating the isotope and atomic density
Parameters
----------
line: str
line of isotope and composition
Returns
-------
tuple : (str, float)
(isotope, atomic density)
"""
# remove whitespace in front
line = line.lstrip()
isotope, atom_density = line.split(" ")
return (isotope, float(atom_density)) | bf695c505e5bb56edfd64f44b6958227737b8c1b | 19,141 |
from typing import Optional
from typing import Dict
from typing import Any
def init_manifest(
package_name: str, version: str, manifest_version: Optional[str] = "ethpm/3"
) -> Dict[str, Any]:
"""
Returns an initial dict with the minimal requried fields for a valid manifest.
Should only be used as the first fn to be piped into a `build()` pipeline.
"""
return {
"name": package_name,
"version": version,
"manifest": manifest_version,
} | dfdf014e1b6a9f0e9c8ba7ea6fed064809580f4e | 19,145 |
def format_line(data, linestyle):
"""Formats a list of elements using the given line style"""
return linestyle.begin + linestyle.sep.join(data) + linestyle.end | 6a49a80f876ffe8a8f38e6e987051a0247858c6c | 19,148 |
def extract(string, start='(', stop=')'):
"""
Extract the string that is contained between start and stop strings
:param string: str, string to process
:param start: str, start string
:param stop: str, stop string
:return: str, extracted string
"""
try:
return string[string.index(start) + 1:string.index(stop)]
except Exception:
return string | 75ac7aa9291b63a18c2bea2632099983137e5b7a | 19,149 |
def get_energy_flows(fl):
"""
Subsets the flow list for all energy flows
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
list_of_flows = ['Uranium','Biomass','Hardwood','Softwood','Wood']
flows = fl[(fl["Unit"]=="MJ") | (fl['Flowable'].isin(list_of_flows))]
#Peat is captured in USGS_mineral_resource_flows so exclude here
flows = flows[flows['Flowable']!='Peat']
flows = flows[flows["Context"].str.startswith("resource")]
return flows | ee0f7ed5d5b843386630901f6cd418844e9d2438 | 19,150 |
def seconds_to_milliseconds(seconds: float) -> int:
"""
Converts from seconds to milliseconds.
:param seconds: time in seconds
:return: converted time rounded to nearest millisecond
"""
return int(seconds * 1000) | b47c0e7d57fea9103d826cd7d2cab51d3ded124a | 19,159 |
def fit_range(x, inmin, inmax, outmin, outmax):
"""Maps a value from an interval to another
Args:
x (int or float): the input value
inmin (int or float): The minimum of the input range
inmax (int or float): The maximum of the input range
outmin (int or float): The minimum of the desired range
outmax (int or float): The maximum of the desired range
Returns:
int or float: the computed value
"""
return (x-inmin) * (outmax-outmin) / (inmax-inmin) + outmin | b37a88b1dc0e9e6b4c83d74232a63bbbdcf13243 | 19,176 |
def device_id_generate_doc_template_values(url_root):
"""
Show documentation about deviceIdGenerate
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "voter_device_id": string (88 characters long),\n' \
'}'
template_values = {
'api_name': 'deviceIdGenerate',
'api_slug': 'deviceIdGenerate',
'api_introduction':
"Generate a transient unique identifier (voter_device_id - stored on client) "
"which ties the device to a persistent voter_id (mapped together and stored on the server)."
"Note: This call does not create a voter account -- that must be done in voterCreate.",
'try_now_link': 'apis_v1:deviceIdGenerateView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
}
return template_values | ed0648038f26207682ff25dac76cf1bd1c0b30b9 | 19,177 |
def downsize_image(image, n):
"""
Downsizes an image by selecting every n pixel, it might be possible to
resize to an arbitary resolution, but I didn't want to deal with
interpolation and strange behaviour with semi-transparent pixels.
"""
return image[::n, ::n] | 102fb43400a739ea6b353187eb3ecd6a1607d345 | 19,180 |
def data_loss(df_clean, df_raw):
"""
This function returns the data loss in percent.
"""
return f"{round((df_clean.shape[0]/df_raw.shape[0])*100,3)}% data loss" | 0da1e75643c18b57f1952ec0b94aa996a0d1707f | 19,182 |
def GetRoleName(effective_ids, project):
"""Determines the name of the role a member has for a given project.
Args:
effective_ids: set of user IDs to get the role name for.
project: Project PB containing the different the different member lists.
Returns:
The name of the role.
"""
if not effective_ids.isdisjoint(project.owner_ids):
return 'Owner'
if not effective_ids.isdisjoint(project.committer_ids):
return 'Committer'
if not effective_ids.isdisjoint(project.contributor_ids):
return 'Contributor'
return None | bc0440b12779ea1caaccb868493ed05516fb2738 | 19,184 |
def create_obj(conn, new_object):
"""
Create a new object into the allObjects table.
Parametesr
new_object (tuple): a tuple containing (name, )
Returns
obj_id (int): idea of the new created object in the database
"""
sql = ''' INSERT INTO allObjects(name)
VALUES(?) '''
cur = conn.cursor()
cur.execute(sql, new_object)
conn.commit()
return cur.lastrowid | d4995345ba6d37b5d45a9ab424e9fd212bd712b8 | 19,185 |
def five_by_five_shape(n):
"""
Determines shape of five by five view, allowing for fewer than 25 observations.
Parameters:
n: length of subject list to display
Returns:
Dimensions of grid/subplots as (nrows, ncols)
"""
if n // 5 == 0:
return (1, n % 5)
elif n % 5 > 0:
return ((n // 5) + 1, 5)
else:
return (n // 5, 5) | d42bfff12064ecfb733c3068fffb87e75fcfee3c | 19,191 |
def update_aliases(df, new_aliases=None, inplace=False):
"""
For each object:
1. Add new aliases if any
2. Make sure unique pipe-separated list with OBJECT first in list
Args:
df (pd.DataFrame): dataframe with an ALIASES column
new_aliases (Series, array, list): aliases to append for each object.
If Series, make sure indexes match the dataframe
inplace (bool): If true, edit df directly, otherwise return a copy
"""
if not inplace:
df = df.copy()
# Make sure all strings
df.ALIASES = df.ALIASES.fillna('')
# Add new aliases
if new_aliases is not None:
new_aliases = new_aliases.fillna('')
df.ALIASES = df.ALIASES.str.cat(new_aliases, sep='|')
# Make sure 2MASS and Gaia included
aliases = df.ALIASES.copy()
mask_gaia = df.GAIADR2ID.notnull()
aliases[mask_gaia] = aliases[mask_gaia].str.cat(
'Gaia DR2 ' + df.GAIADR2ID[mask_gaia]
)
mask_2mass = df['TWOMASSID'].notnull()
aliases[mask_2mass] = aliases[mask_2mass].str.cat(
'2MASS J' + df['TWOMASSID'][mask_2mass]
)
# Make sure object is not current list, and that list is unique
objs = df.OBJECT.tolist()
aliases = df.ALIASES.apply( # Remove object name and empty strings
lambda row: [val for val in row.split('|')
if val not in objs + ['']
]
)
aliases = aliases.apply(set).str.join('|') # Unique and join with pipe
# Add object name as first alias
al_mask = aliases.str.len() > 0
df.loc[al_mask, 'ALIASES'] = df.OBJECT.str.cat(aliases, sep='|')
df.loc[~al_mask, 'ALIASES'] = df.OBJECT.copy()
return None if inplace else df | 70d6629027b66fb4eedd31bf054b0e3eaea68a89 | 19,193 |
def _get_BD_range(x):
"""Getting the BD range from a fraction ID
(eg., "1.710-1.716").
Parameters
----------
x : str
fraction ID
Returns
-------
tuple -- BD start, middle end
"""
if x.startswith('-'):
[start,_,end] = x.rpartition('-')
else:
[start,_,end] = x.partition('-')
if start.startswith('-inf'):
end = round(float(end),3) - 0.001
end = round(end,3)
mid = end
elif end.endswith('inf'):
start = round(float(start),3) + 0.001
start = round(start, 3)
mid = start
else:
start = round(float(start),3)
end = round(float(end),3)
mid = round((end - start)/2 + start,3)
return start, mid, end | 5a54452446696e5ae2230981683b4e52de7c7ae0 | 19,197 |
def _chi2_ls(f):
"""Sum of the squares of the residuals.
Assumes that f returns residuals.
Minimizing this will maximize the likelihood for a
data model with gaussian deviates.
"""
return 0.5 * (f ** 2).sum(0) | af223f48bc0beffa9a99fba872c769a94fbe3235 | 19,200 |
def num_words(tokens):
"""Given list of words, return no. of words (int)"""
return len(tokens) | 89388c467380803e834d2ef287d33d17b882d666 | 19,208 |
def is_in_group(user, group_name):
"""Take a user and a group name, and returns `True` if the user is in that group."""
return user.groups.filter(name=group_name).exists() | 797853cd5000cb1404545e3f20d38703c7a058dd | 19,209 |
def _propagate_pair(pair, go_dict):
"""Propagates a pair of annotations.
For a given pair of annotation terms, the GO annotations will
be replaced by a set of their (recursive) child terms (including itself).
Other types of annotations are left untouched, but converted to a 1-member
set.
Parameters
----------
pair : tuple
A sorted tuples of annotation terms, one for the host and one for
the pathogen, e.g. ('h@GO:0030133', 'p@IPR009304')
go_dict : dict
A dictionary containing the GO hierarchy. Constructed via the
obo_tools.importOBO() function in the goscripts package.
Returns
-------
tuple
A sorted tuple of annotation term sets, one for the host and
one for the pathogen. Each element in the tuple consists of a set of
terms, e.g. the GO term itself and all of its descendants.
"""
# create empty list to store the propagated (child) annotations for the
# two parent annotations in the pair (in same order as original pair)
propagated_pair = []
# for both annotations in the pair, propagate through GO hierarchy
for term in pair:
# only for GO terms, not IPR
if 'GO' in term:
prefix = term[:2]
go_object = go_dict.get(term[2:])
# append original annotation if it can't be found in GO dict
if not go_object:
propagated_pair.append([term])
else:
# store all child terms of parent term in a list
child_terms = [
prefix + i for i in go_object.recursive_children
]
# add parent term itself and remove duplicates
propagation_set = set(child_terms) | set([term])
# add propagated annotations to storage list
propagated_pair.append(propagation_set)
else:
# store original term if it's not a GO term
propagated_pair.append({term})
# convert the length-2 list of annotation lists (1 for each parent
# annotation) to a tuple
# # e.g. ('h@GO:0060384', 'p@GO:0016787') ->
# ({'h@GO:0098546', 'h@GO:0030553', 'h@GO:0035438', 'h@GO:0030552',
# 'h@GO:0061507'},
# {'h@GO:0098546', 'h@GO:0030553', 'h@GO:0035438',# 'h@GO:0030552',
# 'h@GO:0061507'})
return tuple(propagated_pair) | b4745e2a01075c92c862ad23defe17a674b0317e | 19,211 |
def accuracy(predictions, targets):
"""Computes raw accuracy (True Predictions) / (All Predictions)
Args:
predictions (list): a list of predicted labels
targets (list): a list of gold labels
Returns:
float: the raw accuracy between the predictions and the gold labels
"""
assert len(predictions) == len(targets)
count_pos = 0
for predic, gold in zip(predictions, targets):
if predic == gold:
count_pos += 1
return float(count_pos) / len(targets) | 0547e2f5fba2c858dbcd85720f18541b9e2a7f98 | 19,214 |
from operator import mul
from fractions import Fraction
from functools import reduce
def numCombs(n, k):
"""
n choose k algorithm
"""
return int( reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1) ) | bee91e3afc5b20e6f93d6e299f33bc23654318ec | 19,215 |
def hasrepression(graph, cycle):
"""Return whether the cycle (list of node IDs) in the graph (NetworkX DiGraph) includes any repressions."""
return any(graph.edges[cycle[i], cycle[(i + 1) % len(cycle)]]['repress'] for i in range(len(cycle))) | 1eaa9638512ed5b283290fedf9bc9ba499c13748 | 19,216 |
def expiration_date_filter(all_options, expirationDate):
"""Takes a list of ALL_OPTIONS, and returns a list of only those with the argument EXPIRATIONDATE"""
return [option for option in all_options if option['expiration_date'] == expirationDate] | 4a05dbdeae84639db624c673046bdd1783adf88d | 19,217 |
def onko_pituus_oikein(hetu):
"""Tarkistaa henkilötunnuksen pituuden po. 11 merkkiä
Args:
hetu (string): Henkilötunnus
Returns:
boolean: True: pituus oikein, False: pituus väärin
"""
# Lasketaan henkilötunnuksen pituus
pituus = len(hetu)
# tapa 1
if pituus == 11:
pituus_ok = True
else:
pituus_ok = False
# tapa 2
pituus_ok = (pituus == 11)
return pituus_ok | ddea3dcba79f40c1711623fcc7b6ef0e9066e831 | 19,226 |
def ReadAllPoints(fcsvFilePath):
"""Read a list of tuples from a Slicer FCSV file. Tuple order is
(name, description, x, y, z).
"""
lst = []
with open(fcsvFilePath, 'r') as f:
for line in f:
line = line.strip()
if (line[0] == '#'):
continue
tokens = line.split(',')
if (tokens[0].startswith('vtkMRMLMarkupsFiducialNode')):
lst.append((tokens[11], tokens[12], tokens[1], tokens[2], tokens[3]))
return lst | 28abd8526b06c1459299bcb2cfaeeb2217dd1336 | 19,227 |
import colorsys
def hsv_to_rgb(h, s, v):
"""Converts a (hue, saturation, value) tuple to a (red, green blue) tuple.
Args:
h, s, v: the HSV values
Returns:
an R, G, B tuple
"""
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return (int(255 * r), int(255 * g), int(255 * b)) | 0d348dfad8972b84688e11e68c9ac24dcf86dcce | 19,228 |
def validUTF8(data):
"""
0. UTF-8 Validation
Return: True if data is a valid UTF-8 encoding, else return False
- A character in UTF-8 can be 1 to 4 bytes long
- The data set can contain multiple characters
- The data will be represented by a list of integers
- Each integer represents 1 byte of data, therefore you only need to
handle the 8 least significant bits of each integer
"""
successive_10 = 0
for b in data:
# b = bin(b).replace('0b', '').rjust(8, '0')[-8:]
b = format(b, '#010b')[-8:]
if successive_10 != 0:
successive_10 -= 1
if not b.startswith('10'):
return False
elif b[0] == '1':
successive_10 = len(b.split('0')[0])
if successive_10 == 1 or successive_10 > 4:
return False
successive_10 -= 1
return True if successive_10 == 0 else False | c229465c4c5b2b8096c0fe8e94db1025e129ded4 | 19,230 |
def absolute_difference_distance(x: float, y: float) -> float:
"""Calculate distance for `get_anomalies_density` function by taking absolute value of difference.
Parameters
----------
x:
first value
y:
second value
Returns
-------
result: float
absolute difference between values
"""
return abs(x - y) | 45f6e55dad7dac292d450122d33b82d16cc32e23 | 19,235 |
def create_parameter_string(command_string):
"""Create the parameter string. The parameter string is everything
after the first space character. All other space characters are
removed."""
parameter_string = "".join(command_string.split(" ")[1:])
return parameter_string | 80631d706cbe199d055eaac747912308be350581 | 19,239 |
def edges_flux_to_node_flux(G, attribute_name='flux'):
"""Sum all flux from incoming edges for each node in networkx object"""
node_fluxes = {}
for node in G.nodes:
node_flux = sum([edge[2] for edge in list(G.in_edges(node, data=attribute_name)) if edge[2]])
node_fluxes[node] = node_flux
return node_fluxes | 8e70e44b38e2f8e2b48b070bb03234d2df75e810 | 19,244 |
def compose_redis_key(vim_name, vdu_uuid):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
vdu_uuid (str): The VDU uuid (NFVI based)
Returns:
str: the key for redis
"""
return "{}:{}".format(vim_name.lower(), vdu_uuid) | 41d62d8d73979ae176349d9339edbf99c991cb07 | 19,245 |
def get_file_extension(gdalformat):
"""
A function to get the extension for a given file format
(NOTE, currently only KEA, GTIFF, HFA, PCI and ENVI are supported).
:return: string
"""
ext = ".NA"
if gdalformat.lower() == "kea":
ext = ".kea"
elif gdalformat.lower() == "gtiff":
ext = ".tif"
elif gdalformat.lower() == "hfa":
ext = ".img"
elif gdalformat.lower() == "envi":
ext = ".env"
elif gdalformat.lower() == "pcidsk":
ext = ".pix"
else:
raise Exception("The extension for the gdalformat specified is unknown.")
return ext | 222da48a8994307c675d519d0e81e02febbc63f6 | 19,249 |
def create_survey_filename(url: str, ext: str = "csv") -> str:
"""Return a filename for a survey."""
return f"export_survey_{url.split('/')[-1]}.{ext}" | 25551a7a73cddee37214a36030a58e18519cef1c | 19,252 |
import yaml
def _yaml_to_dict(yaml_filename):
"""Reads and stores a yaml file as a dictionary
Args:
yaml_filename (str):
The filename of the yaml file to read.
Returns:
input_dict (dict):
The result of reading the yaml file and translating
its structure into a dictionary.
"""
try:
# Open the yaml file and import the contents into a
# dictionary with the same structure
with open(yaml_filename) as fp:
input_dict = yaml.load(fp, Loader=yaml.FullLoader)
except:
raise ValueError("Could not open file %s" % (yaml_filename))
return input_dict | f5bfb960fcf817522a9cb49cb2877c44e9fb4d9f | 19,253 |
def gapBetweenRanges(rangeA,rangeB):
"""\
Returns the gap between two ranges of values, or zero if there is no gap.
The sign of the returned value indicates which range is below the other.
For example:
* The gap between (0,10) and (15,25) is -5
* The gap between (0,10) and (9,20) is 0
* The gap between (20,30) and (10,18) is 2
:param rangeA: a tuple (lo,hi) representing a range of values.
:param rangeB: a tuple (lo,hi) representing a range of values.
:returns: zero if two ranges overlap; otherwise the gap separating them.
If rangeA is below range B then the value will be negative.
If rangeA is above range B then the value will be positive.
"""
aLo,aHi = rangeA
bLo,bHi = rangeB
if aLo > bHi:
return aLo-bHi
elif aHi < bLo:
return aHi-bLo
else:
return 0 | 81db7f95a3cc5fcac5944f7b579ecf06c96b86c8 | 19,256 |
import functools
def RequireAuth(handler):
"""Decorator for webapp2 request handler methods.
Only use on webapp2.RequestHandler methods (e.g. get, post, put),
and only after using a 'Check____Auth' decorator.
Expects the handler's self.request.authenticated to be not False-ish.
If it doesn't exist or evaluates to False, 403s. Otherwise, passes
control to the wrapped handler.
"""
@functools.wraps(handler)
def wrapper(self, *args, **kwargs):
"""Does the real legwork and calls the wrapped handler."""
if not getattr(self.request, 'authenticated', None):
self.abort(403)
else:
handler(self, *args, **kwargs)
return wrapper | edb8f223318d0b22511d6307ed6c35bc83a4ae4a | 19,259 |
def ManifestXml(*args):
"""Joins arbitrary XML and wraps it in a <manifest> element."""
xml = '\n'.join(args)
return '<?xml version="1.0" encoding="UTF-8"?><manifest>%s</manifest>' % xml | db1e86b02b58236ed45607114253e40df47b0069 | 19,261 |
def output_json(ips):
"""Returns a dict to output the IP Addresses as JSON"""
headers = ips[0]
ips = ips[1:]
return [dict(zip(headers, map(str, items))) for items in ips] | 4e80151573f969ce31da5ef266630cdd079a8289 | 19,265 |
def middle(seq):
"""Return middle item of a sequence, or the first of two middle items."""
return seq[(len(seq) - 1) // 2] | 736b6aed9a8e03f0581f2d7892c5f370f0886285 | 19,268 |
def make_duration(val):
"""
Converts a string in `hh:mm:ss` representation
to the equivalent number of seconds
Args:
val(str): input string in format `hh:mm:ss`
"""
if val == "00:00:00":
return 0
elif val is None:
return 0
parts = val.split(':')
if len(parts) != 3:
return 0
return int(parts[0]) * 3600 + int(parts[1]) * 60 + int(parts[2]) | 78087038716f85a9e3a292148f87f118e3a13eee | 19,269 |
import hashlib
def get_md5(input_file, chunk_size=1024 * 16):
"""Get the md5 of a file without reading entire file in to memory."""
m = hashlib.md5()
while 1:
chunk = input_file.read(chunk_size)
if not chunk:
break
m.update(chunk)
return m.hexdigest() | e878fe004006ce25ea16d37f11bb3205696d90c5 | 19,270 |
def merge_ar_ssfr(ar_df, ssfr_mosdef_merge_no_dups):
"""Merges the ar_df with the ssfr_mosdef_merge_no_dups dataframe
Parameters:
Returns:
ar_ssfr_merge (pd.DataFrame): Pandas dataframe of the ssfr info, mosdef_df info, and duplicates removed
"""
ar_ssfr_merge = ar_df.merge(ssfr_mosdef_merge_no_dups, how='left', left_on=[
'field', 'v4id'], right_on=['FIELD_STR', 'V4ID'])
return ar_ssfr_merge | 052efd614062a19bc39b61e014b1e1112bf36e37 | 19,271 |
def get_hero_image_url(hero_name, image_size="lg"):
"""
Get a hero image based on name and image size
"""
if hero_name.startswith("npc_dota_hero_"):
hero_name = hero_name[len("npc_dota_hero_"):]
valid_sizes = ['eg', 'sb', 'lg', 'full', 'vert']
if image_size not in valid_sizes:
raise ValueError("Not a valid hero image size")
return "http://media.steampowered.com/apps/dota2/images/heroes/{}_{}.png".format(
hero_name, image_size) | 5ecedd354fe8d7fadf7a5fe59f71861bc82d6a30 | 19,276 |
def to_lower_case(given: str) -> str:
"""Returns 'given' in lower case
>>> to_lower_case("0D")
'0d'
"""
return given.lower() | 23e8298f7f4e33b827a76a7c17d1e5468f6d5fd1 | 19,278 |
from openpyxl.styles import Alignment
def prepare_excel(workbook, filters=True):
"""
Formats the excel a bit in order to be displayed nicely
workbook: openpyxl workbook
filters: If True enable excel filtering headers
returns: formated workbook
"""
# openpyxl is an extra requirement
for worksheet in workbook:
# estimate column width
for col in worksheet.columns:
max_length = 0
column = col[0].column_letter
for cell in col:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
adjusted_width = (max_length + 3) * 1.2
worksheet.column_dimensions[column].width = min([adjusted_width, 50])
# enable excel filters
if filters is True:
worksheet.auto_filter.ref = f"A1:{column}1"
# enable word wrap
for row in worksheet.iter_rows():
for cell in row:
cell.alignment = Alignment(wrap_text=True)
if isinstance(cell.value, str):
cell.value = cell.value.strip()
if cell.value.isdigit():
cell.value = int(cell.value)
return workbook | e9f4b20747a5d8d3fca804117c1e4e3950dc1d45 | 19,279 |
from datetime import datetime
def _n64_to_datetime(n64):
"""
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9) | 1d46e67edb29da3c0c340bfcc346e80c0fd36541 | 19,280 |
def _get(redis, key):
""" Get the current hits per rolling time window.
:param redis: Redis client
:param key: Redis key name we use to keep counter
:return: int, how many hits we have within the current rolling time window
"""
return redis.zcard(key) | 288d17e0ef4c0d667d984c7f462a2c07d6c66147 | 19,286 |
def github_uri(plug_name):
"""
Just make the standard public https uri.
"""
return 'https://github.com/' + plug_name | 8d9941eeb6820bc32e4faa188ad5b7ca2156b0e9 | 19,288 |
def points(a, b, answer_given):
"""Check answer. Correct: 1 point, else 0"""
true_answer = a*b
if answer_given == true_answer:
print('Correct!')
return 1
else:
print('Sorry! Correct answer was: {:d}'.format(true_answer))
return 0 | 2e0ac980b6cc140dd4cd812bd59f7e25cd12d865 | 19,289 |
def readInventory(filename):
"""Function to read Serpent bumat files
Parameter
---------
filename : str
path to the bumatfile to be read
Returns
-------
inventory : dict
dictionary to store the inventory. keys are ZAID identifiers (str), values
are atom densities (str) in b^{-1}cm^{-1}
"""
mat=open(filename)
matfile=mat.readlines()
mat.close()
inventory={}
for line in matfile[6:]:
x=line.strip().split()
inventory[x[0][:-4]]=x[1]
return inventory | a0756d334566fa16341bf67d021fb014899d9a83 | 19,290 |
def get_pokemon_type(pokemon_types):
"""Asks the user for a type of pokemon and displays the names of all
pokemon with that type.
Implicitly converts the type entered by the user to lower case.
If the user enters an invalid type, warns the user and repeats until they
enter a valid one.
Args:
pokemon_types: a list of pokemon types sorted in alphabetic order
Returns:
the pokemon type chosen by the user
"""
# This is the prompt you'll display to the user to ask for their choice.
# Don't modify it!
prompt = ('enter a type from one of the following: \n{0}\n'.format(
', '.join(pokemon_types)))
# This is the prompt you'll display to the user if they enter something
# invalid. Don't modify it!
warning = 'Unrecognized type'
choice = input(prompt)
choice = choice.lower()
condition = True
while condition:
if choice in pokemon_types:
condition = False
else:
print(warning)
choice = input(prompt)
choice = choice.lower()
return choice | 571dbe5d04c749d83bf2bbcd7e6ee3b3f6bf1a62 | 19,293 |
def clip(x):
"""Limit the number in range [0, 1].
if x < 0, x = 0
x > 1, x = 1
otherwise x = x
"""
return max(0, min(1, x)) | f6f4fbde059ee4b71587f1b095012f9083a057b0 | 19,294 |
def exists(env):
"""Returns true if tool exists."""
# NOTE: SCons requires the use of this name, which fails gpylint.
return env.Detect('distcc') | 339fd7c09dcaee8bc53beaa87fb83481a5db836e | 19,312 |
import pathlib
def get_source_id_from_file_path(file_path: pathlib.Path) -> str:
"""Extrapolates the source id from the file path.
To retrieve the source id from the file name, the function uses the fact that the
ICE uses a consistent naming convention consisting of the file type accompanied by
the source id and the date the data in the file was generated.
(e.g. COREREF_207_20201023.txt.bz2).
Parameters
----------
file_path: str
The path to the file for which the source id has to be extrapolated.
Returns
-------
str
The source id.
"""
file_name = file_path.name.split(".")[0]
name_components = file_name.split('_')
return name_components[1] | 03588ef925e3de688451bedd922b3ab29a8042e5 | 19,313 |
def _prep_vars(variables: list):
"""
Convert from a list to a comma separated string
:param variables: list of vars
:return: comma separated string
"""
# if vars is not a string, assume it's a list of multiple strings
out = ""
for i in range(len(variables) - 1):
out += variables[i]
out += ","
out += variables[-1]
return out | e5a8585f3c7ae8edd67e3893ab0bd17b035d17e3 | 19,316 |
import logging
def get_logger(log_file=None):# {{{
"""Set logger and return it.
If the log_file is not None, log will be written into log_file.
Else, log will be shown in the screen.
Args:
log_file (str): If log_file is not None, log will be written
into the log_file.
Return:
~Logger
* **logger**: An Logger object with customed config.
"""
# Basic config
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Add filehandler
if log_file is not None:
file_handler = logging.FileHandler(log_file, mode='w')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(
logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
)
logger.addHandler(file_handler)
return logger | 25cbf7d9cd9150ee5b86929c9ab56d748ae0fdc3 | 19,318 |
def is_unique_msg(msg, previous_msg_ids, previous_run_time):
"""
Determines if message is unique given previous message ids, and that it's greater than previous run time
:param msg: raw Message object
:param previous_msg_ids: set of previously fetched message ids
:param previous_run_time: previous run time string
:return: True if message is unique
"""
message_dict = msg.get("message", {})
if message_dict:
msg_id = message_dict.get("messageId")
msg_pub_time = message_dict.get("publishTime", "")
return msg_id not in previous_msg_ids and msg_pub_time > previous_run_time
return False | 844e814e5473dc9da63238d12a8e70d7d469b355 | 19,320 |
import re
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code | 522f1e19f8304d60106625af2c426e9f9b78643a | 19,321 |
def eqn14_rule(model,storage_r,g,strg_tech,t,tm,s):
""" General retrieval rate constraint."""
return model.retrieval_rate[storage_r,g,strg_tech,t,tm,s] <= model.MAX_RETRIEVAL_RATE[strg_tech] | e471c34b4a038df6547a52dc351ebe54c70c516f | 19,326 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.