content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_guess(guesses):
"""
This function will get the user's guess.
"""
# Get the user's guess.
guess = input("------\nGuess a letter: ")
# Check if the user has already guessed the letter.
if guess in guesses:
print("You've already guessed that letter.")
return get_guess(guesses)
# Return the guess.
return guess | 04b559d3850421ef91fa1ce5d9850b2f4852f917 | 8,180 |
def get_longest_key(tuple_of_tuples):
"""
Why is this needed? Because sometimes we want to know how long a CharField
should be -- so let's have it as long as the longest choice available.
(For example, when we have a radio button and we want to store a single value.)
INPUT=(
('short', 'blahblahblah'),
('longer', 'blahblahblah'),
('longest', 'blahblahblah')
)
OUTPUT=len(longest)
USAGE:
BLAH_CHOICES=(...)
blah=CharField(max_length=get_longest_key(BLAH_CHOICES))
"""
return max(len(i) for i in dict(tuple_of_tuples).values()) | a0a55ced79bb6e27edb82790ee7ea4d1c1efc3c7 | 8,188 |
def mass_within_region(gals, x_bound, y_bound):
"""
Calculate the total mass and number of galaxies within a specified region.
Parameters
----------
gals: list of ``Galaxy`` class instances.
Galaxies that we're calculating the mass for.
x_bound, y_bound: [float, float]
The minimum and maximum bounds that define the region we're averaging
inside.
Returns
-------
mass_in_region: float
The total galaxy mass within the specified region.
num_gals_in_region: int
The number of galaxies within the specified region.
"""
# Initialize our counters.
mass_in_region = 0.0
num_gals_in_region = 0
region_bounds = [x_bound, y_bound]
for gal in gals:
gal_pos = [gal.x, gal.y]
in_region = True
# We're going to go through each dimension of the galaxy and ask if the position is
# inside the region.
for region_bound, dim_pos in zip(region_bounds, gal_pos):
# Galaxy is outside the region. Flag it and move to the next galaxy.
if dim_pos < region_bound[0] or dim_pos > region_bound[1]:
in_region = False
break
# Galaxy was in the region, add it.
if in_region:
mass_in_region += gal.mass
num_gals_in_region += 1
return mass_in_region, num_gals_in_region | b9d1564a88239ab33402255c44646101f8116060 | 8,189 |
def get_json_for_r_log_entry(req, x_set):
"""Returns a dict used to match r_log entries returned through API."""
return {
"request": req.to_dct(),
"x_set": x_set
} | 989650af29c25f3d11346945d905d48af6718d25 | 8,193 |
def _get_sub_types_of_compositional_types(compositional_type: str) -> tuple:
"""
Extract the sub-types of compositional types.
This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]).
:param compositional_type: the compositional type string whose sub-types are to be extracted.
:return: tuple containing all extracted sub-types.
"""
sub_types_list = list()
if compositional_type.startswith("Optional") or compositional_type.startswith(
"pt:optional"
):
sub_type1 = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
sub_types_list.append(sub_type1)
if (
compositional_type.startswith("FrozenSet")
or compositional_type.startswith("pt:set")
or compositional_type.startswith("pt:list")
):
sub_type1 = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
sub_types_list.append(sub_type1)
if compositional_type.startswith("Tuple"):
sub_type1 = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
sub_type1 = sub_type1[:-5]
sub_types_list.append(sub_type1)
if compositional_type.startswith("Dict") or compositional_type.startswith(
"pt:dict"
):
sub_type1 = compositional_type[
compositional_type.index("[") + 1 : compositional_type.index(",")
].strip()
sub_type2 = compositional_type[
compositional_type.index(",") + 1 : compositional_type.rindex("]")
].strip()
sub_types_list.extend([sub_type1, sub_type2])
if compositional_type.startswith("Union") or compositional_type.startswith(
"pt:union"
):
inside_union = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
while inside_union != "":
if inside_union.startswith("Dict") or inside_union.startswith("pt:dict"):
sub_type = inside_union[: inside_union.index("]") + 1].strip()
rest_of_inside_union = inside_union[
inside_union.index("]") + 1 :
].strip()
if rest_of_inside_union.find(",") == -1:
# it is the last sub-type
inside_union = rest_of_inside_union.strip()
else:
# it is not the last sub-type
inside_union = rest_of_inside_union[
rest_of_inside_union.index(",") + 1 :
].strip()
elif inside_union.startswith("Tuple"):
sub_type = inside_union[: inside_union.index("]") + 1].strip()
rest_of_inside_union = inside_union[
inside_union.index("]") + 1 :
].strip()
if rest_of_inside_union.find(",") == -1:
# it is the last sub-type
inside_union = rest_of_inside_union.strip()
else:
# it is not the last sub-type
inside_union = rest_of_inside_union[
rest_of_inside_union.index(",") + 1 :
].strip()
else:
if inside_union.find(",") == -1:
# it is the last sub-type
sub_type = inside_union.strip()
inside_union = ""
else:
# it is not the last sub-type
sub_type = inside_union[: inside_union.index(",")].strip()
inside_union = inside_union[inside_union.index(",") + 1 :].strip()
sub_types_list.append(sub_type)
return tuple(sub_types_list) | 4fb1e67f8b6db717ccdf8c33e0b2458baf98c661 | 8,194 |
def count_lines(file):
"""Given a file, returns the number of lines it contains.
The current file position should be preserved as long as the file
supports tell() and seek()."""
old_position = file.tell()
file.seek(0)
count = 0
while file.readline() != '':
count += 1
file.seek(old_position)
return count | 53c1578d96f7bf031c4a8a5131739e36d35be5e7 | 8,196 |
import threading
def is_main_thread() -> bool:
"""
Check if we are in the main thread.
"""
return threading.main_thread() == threading.current_thread() | 1f91ae9e2d5b395cd995efcc2f87002ade53e6a9 | 8,199 |
from typing import List
from typing import Tuple
def fast_text_prediction_to_language_code(res: List[Tuple[str, str]]) -> List[str]:
"""Convert fastText language predictions to language codes"""
labels, _ = res
return [tmp[tmp.rfind("_") + 1 :] for tmp in labels] | 3a39c3d416b4f66d1519284496fbaab940b202fc | 8,200 |
def select(element, selector):
"""Syntactic sugar for element#cssselect that grabs the first match."""
matches = element.cssselect(selector)
return matches[0] | a99c684073fe898bc297dd747cff4abf3c0cb524 | 8,206 |
def is_in_bbox(x, y, bbox):
"""
Answers True or Folse if the x, y is inside the BBOX.
"""
xMin, yMin, xMax, yMax = bbox
if xMin <= x <= xMax and yMin <= y <= yMax:
return True
return False | 911089af818c5e15e6ba857b1dd46f0182b1ea31 | 8,207 |
def convert_tuple_to_8_int(tuple_date):
""" Converts a date tuple (Y,M,D) to 8-digit integer date (e.g. 20161231).
"""
return int('{0}{1:02}{2:02}'.format(*tuple_date)) | 8584bb9ade995e95d12c9d09c4a6d52f7df44f5d | 8,208 |
def get_request_raw_header(request, name) :
""" Return raw header value of request by header name """
name = name.lower()
for header in request.raw_headers :
if header[0].decode("utf-8").lower() == name:
return header[1].decode("utf-8")
return "" | 37abaac86ae770354bacd6a96326d8b43f54999a | 8,211 |
def import_class(class_path):
"""Imports a class using a type string.
:param class_path: Type string of the class.
:type class_path: str
:rtype: type
"""
components = class_path.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | bcfeed25c2b5f6672df63e63a031cfa580c0e275 | 8,214 |
def create_system_id(os_string, architecture):
"""
Create a system-ID by joining the OS-String and the architecture with a hyphen.
Args:
os_string (str):
The Operating system string.
architecture (str):
The Architecture string.
Returns:
The System-ID string.
"""
system_id_format = '{os_string} {architecture}'
return system_id_format.format(os_string=os_string.replace('_', ' '),
architecture=architecture) | 7ae682e2d57784ca771c1e50b7e980b56f631947 | 8,217 |
def circular_array_rotation(a, k, queries):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/circular-array-rotation/problem
John Watson knows of an operation called a right circular rotation on an array of integers. One rotation operation
moves the last array element to the first position and shifts all remaining elements right one. To test Sherlock's
abilities, Watson provides Sherlock with an array of integers. Sherlock is to perform the rotation operation a number
of times then determine the value of the element at a given position.
For each array, perform a number of right circular rotations and return the value of the element at a given index.
For example, array a = [3, 4, 5], number of rotations, k = 2 and indices to check, m = [1, 2].
First we perform the two rotations:
[3, 4, 5] -> [5, 3, 4] -> [4, 5, 3]
Now return the values from the zero-based indices and as indicated in the array.
a[1] = 5
a[2] = 3
Args:
a (list): array of integers to rotate
k (int): the number of times to shift the array right
queries (list): list of indices to query on the newly shifted array
Returns:
list: a list of values returned from the queries
"""
query_values = []
for query in queries:
query_values.append(a[(query-k) % len(a)])
return query_values | 940e193fec0ad1f78c499ee8604e418dd0261109 | 8,219 |
from typing import Dict
def normalize_score_dict(input_dict: Dict[str, float], exponent=1) -> Dict[str, float]:
"""Takes a dictionary of scores and applies L1-normalization (dividing each value by the sum).
This is the simplest way of turning a collection of scores into a probability distribution.
The exponent can be used to make the normalization use L2 or some other norm.
"""
total_weight = sum([pow(val, exponent) for val in input_dict.values()])
norm = pow(total_weight, 1/exponent)
output_dict = {key: value / norm if total_weight > 0 else 1 / len(input_dict)
for key, value in input_dict.items()}
return output_dict | 6a8d65d42d7f356b23a0e814841e8005f7daff30 | 8,222 |
def residuals(fit, obs):
"""Calculate residuals for fit compared to observed data
:fit: list of discrete fit data points
:obs: list of observed data points
:returns: fit minus observed data points
"""
return fit-obs | 46c5eac3620ab8bce58502822aa7d8824bed0988 | 8,227 |
def abs_of_difference(num1: int, num2: int) -> int:
"""
precondition: parameters must be numbers
calculate difference between num1 and num2 and compute absolute value of the result
>>> abs_of_difference(-4,5)
9
>>> abs_of_difference(1,-3)
4
>>> abs_of_difference(2,2)
0
"""
return abs(num1 - num2) | 6cb2ead6a3ed5899a84280f7356a44d515952c7b | 8,232 |
def validate(value, ceiling):
"""
Checks if val is positive and less than the ceiling value.
:param value: The value to be checked (usually an int)
:param ceiling: The highest value "val" can be. (usually an int)
:return: True if val is less than ceiling and not negative
"""
value = int(value)
return 0 <= value < ceiling | 0736674f74a38e0a583eeb2fae2ee1f441c1fda7 | 8,243 |
from typing import Dict
def calculate_frequency(lst: list) -> Dict[str, int]:
"""Calculate the frequency from a list."""
frequencies = {}
for item in lst:
if item in frequencies:
frequencies[item] += 1
else:
frequencies[item] = 1
return frequencies | b38deeddca3d57a9e4545a51feec0c245adce9b8 | 8,248 |
def get_pbs_node_requirements(sys_settings,node_count):
"""Get the cpu and memory requirements for a given number of nodes
Args:
sys_settings (dict): System settings dict, as supplied from config_manager
node_count (int): Number of whole nodes on target system
Returns:
dict: ncpus and mem for target number of nodes
"""
ncpus = node_count * sys_settings['REMOTE_SETTINGS']['PBS_SETTINGS']['CORES_PER_NODE']
mem_per_node = sys_settings['REMOTE_SETTINGS']['PBS_SETTINGS']['MEM_PER_NODE']
mem = '%s%s' % (int(node_count * mem_per_node[0]), mem_per_node[1])
return dict(ncpus=ncpus,mem=mem) | f4fd12dee6608a87e6c8b0f2f56e245e6be7c0fc | 8,257 |
def myreplace(old, new, s):
"""Replace all occurrences of old with new in s."""
result = " ".join(s.split()) # firsly remove any multiple spaces " ".
return new.join(result.split(old)) | 4355251d3c52424041ce86b2449843c565beb301 | 8,262 |
def find_permutation(s, pattern):
"""Find presence of any permutation of pattern in the text as a substring.
>>> find_permutation("oidbcaf", "abc")
True
>>> find_permutation("odicf", "dc")
False
>>> find_permutation("bcdxabcdy", "bcdxabcdy")
True
>>> find_permutation("aaacb", "abc")
True
"""
k = len(pattern)
if k > len(s):
return False
count = {}
for c in pattern:
count[c] = count.get(c, 0) + 1
matches = 0
win_start = 0
for win_end, next_char in enumerate(s):
if next_char in count:
count[next_char] -= 1
if count[next_char] == 0:
matches += 1
if win_end >= k - 1:
if matches == len(count):
return True
first_char = s[win_start]
if first_char in count:
if count[first_char] == 0:
matches -= 1
count[first_char] += 1
win_start += 1
return False | ba823208ed92fc3da9725081f3dfca87c5a47875 | 8,268 |
import random
def __random_positions(max_position):
"""Generates two random different list positions given a max position.
The max position is exclusive.
...
Args:
max_position(integer): The maximum position
"""
[lhs, rhs] = random.sample(range(0, max_position - 1), 2)
return (lhs, rhs) | ee35a6ed30400c885b7769962de483c34cc0ab41 | 8,274 |
def create_deployment(inputs,
labels,
blueprint_id,
deployment_id,
rest_client):
"""Create a deployment.
:param inputs: a list of dicts of deployment inputs.
:type inputs: list
:param labels: a list of dicts of deployment labels.
:type labels: list
:param blueprint_id: An existing blueprint ID.
:type blueprint_id: str
:param deployment_id: The deployment ID.
:type deployment_id: str
:param rest_client: A Cloudify REST client.
:type rest_client: cloudify_rest_client.client.CloudifyClient
:return: request's JSON response
:rtype: dict
"""
return rest_client.deployments.create(
blueprint_id, deployment_id, inputs, labels=labels) | 84b7a13c0ef6b67b20755bc92ecc6814db0be5b0 | 8,275 |
import re
def split_sentences(text, delimiter="\n"):
""" Split a text into sentences with a delimiter"""
return re.sub(r"(( /?[.!?])+ )", rf"\1{delimiter}", text) | 7f69afaa8add8947073d1bf4133da145fba81cac | 8,276 |
def git_errors_message(git_info):
"""Format a list of any git errors to send as slack message"""
git_msg = [
{
"color": "#f2c744", "blocks": [
{
"type": "divider"
},
{
"type": "section", "text": {"type": "mrkdwn", "text": ":github: *Git errors*"}
}
]
}
]
for item in git_info:
name = item['branch']
info = item['error']
git_info = [
{
"type": "section", "text": {"type": "mrkdwn", "text": f"error pushing branch: {name} ```{info}```"}
}
]
git_msg[0]['blocks'].extend(git_info)
return git_msg | 192c1f5c85c4615828873795eb8f71d8ced4080e | 8,280 |
def DecodePublic(curve, bb):
"""
Decode a public key from bytes. Invalid points are rejected. The
neutral element is NOT accepted as a public key.
"""
pk = curve.Decode(bb)
if pk.is_neutral():
raise Exception('Invalid public key (neutral point)')
return pk | 7bf8d7eb129fe640475fbf404354c204f3242954 | 8,283 |
def compact(it):
"""Filter false (in the truth sense) elements in iterator."""
return filter(bool, it) | 0d84d2e7c35447969bfb3b357389e416c71b40bd | 8,284 |
import ast
def _parse_mock_imports(mod_ast, expanded_imports):
"""Parses a module AST node for import statements and resolves them against
expanded_imports (such as you might get from _expand_mock_imports).
If an import is not recognized, it is omitted from the returned dictionary.
Returns a dictionary suitable for eval'ing a statement in mod_ast, with
symbols from mod_ast's imports resolved to real objects, as per
expanded_imports.
"""
ret = {}
for node in mod_ast.body:
if isinstance(node, ast.Import):
for alias in node.names:
if alias.name in expanded_imports:
ret[alias.asname or alias.name] = expanded_imports[alias.name]
elif isinstance(node, ast.ImportFrom):
if node.level == 0:
for alias in node.names:
fullname ='%s.%s' % (node.module, alias.name)
if fullname in expanded_imports:
ret[alias.asname or alias.name] = expanded_imports[fullname]
return ret | 50cb8e4e2469b7bf63deacf0a5085afcded0b5e3 | 8,287 |
def SortByName(list_response):
"""Return the list_response sorted by name."""
return sorted(list_response, key=lambda x: x.name) | 5cb48e536790c9f246aebe2a38bba77f329c5ae9 | 8,290 |
import re
def unindent(string):
"""Remove the initial part of whitespace from string.
>>> unindent("1 + 2 + 3\\n")
'1 + 2 + 3'
>>> unindent(" def fun():\\n return 42\\n")
'def fun():\\n return 42'
>>> unindent("\\n def fun():\\n return 42\\n")
'def fun():\\n return 42'
>>> unindent(" def fun():\\n return 42\\n\\n")
'def fun():\\n return 42'
"""
string = re.sub(r'^\n*', '', string.rstrip()) # ignore leading and trailing newlines
match = re.match(r'^([\t ]+)', string)
if not match:
return string
whitespace = match.group(1)
lines = []
for line in string.splitlines(True):
if line.startswith(whitespace):
lines.append(line[len(whitespace):])
else:
return string
return ''.join(lines) | 1179d4d4a67380b95d46b3234e4b09fb83f36041 | 8,292 |
def is_tag_exists(
lf_client,
account_id: str,
key: str,
) -> bool:
"""
Check if an Lake Formation tag exists or not
Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lakeformation.html#LakeFormation.Client.get_lf_tag
"""
try:
lf_client.get_lf_tag(CatalogId=account_id, TagKey=key)
return True
except Exception as e:
if str(e).endswith("GetLFTag operation: Tag key does not exist"):
return False
else:
raise e | 9f323ee97d4dfc67b183f5bcdc46229fd328eff6 | 8,294 |
import collections
def build_node_statistics(nodes, images):
"""Build a dictionary of cache statistics about a group of nodes."""
# Generic statistics applicable to all groups of nodes.
node_statistics = {
'provisioned': len(list(filter(lambda n: n.provisioned, nodes))),
'not provisioned': len(list(filter(lambda n: not n.provisioned,
nodes))),
'available (not cached)': len(list(filter(lambda n: n.can_cache(),
nodes))),
'cached (includes \'caching\')':
len(list(filter(lambda n: n.cached and not n.provisioned, nodes))),
'total': len(nodes),
'images': collections.defaultdict(lambda: 0)
}
image_names_by_uuid = {image.uuid: image.name for image in images}
# Build statistics around which images are cached.
for node in nodes:
if (node.cached and
not node.provisioned and
node.cached_image_uuid is not None):
# If we don't know the name of the image, just return the UUID.
image_name = image_names_by_uuid.get(node.cached_image_uuid,
node.cached_image_uuid)
node_statistics['images'][image_name] += 1
return node_statistics | 6f1b6a7128a088168f3d31054458275d5f13df53 | 8,296 |
def render_instruction(name, content):
""" Render an arbitrary in-line instruction with the given name and
content """
return ':{}: {}'.format(name, content) | 2200ab1c865dbad871395476fc2227cc8995b3d1 | 8,297 |
def parse_agent_req_file(contents):
"""
Returns a dictionary mapping {check-package-name --> pinned_version} from the
given file contents. We can assume lines are in the form:
datadog-active-directory==1.1.1; sys_platform == 'win32'
"""
catalog = {}
for line in contents.splitlines():
toks = line.split('==', 1)
if len(toks) != 2 or not toks[0] or not toks[1]:
# if we get here, the requirements file is garbled but let's stay
# resilient
continue
name, other = toks
version = other.split(';')
catalog[name] = version[0]
return catalog | 7d3516327ffaf147ee85c150921c876ba5ee2980 | 8,299 |
def make_etag(hasher):
"""Build etag function based on `hasher` algorithm."""
def etag(buf):
h = hasher()
for chunk in buf:
h.update(chunk)
return '"' + h.hexdigest() + '"'
return etag | 31963ebacf886298a60e383d2a0c278293a59012 | 8,301 |
def indexof(ilist, item):
""" returns the index of item in the list """
i = 0
for list_item in ilist:
list_item = list_item.strip()
if list_item == item:
return i
i += 1
print("ERROR failed to parse config, can't find item:", item)
exit(-4) | e6b24d8b6455433f7b6afd824045fbec0dfabfc6 | 8,302 |
def operate_status_template(open_now: bool) -> tuple:
"""Flex Message 餐廳營業狀況
Args:
open_now (bool): 營業中
Returns:
tuple: (營業狀況, 營業文字顏色)
"""
if open_now:
operate_status = {
"type": "text",
"text": "營業中",
"size": "xs",
"color": "#ffffff",
"align": "center",
"gravity": "center",
}
operate_color = "#9ACD32"
else:
operate_status = {
"type": "text",
"text": "休息中",
"size": "xs",
"color": "#ffffff",
"align": "center",
"gravity": "center",
}
operate_color = "#FF6347"
return (operate_status, operate_color) | 366d291b5e847192401f76f0c8a560c02452cef7 | 8,305 |
def paramset_to_rootnames(paramset):
"""
Generates parameter names for parameters in the set as ROOT would do.
Args:
paramset (:obj:`pyhf.paramsets.paramset`): The parameter set.
Returns:
:obj:`List[str]` or :obj:`str`: The generated parameter names
(for the non-scalar/scalar case) respectively.
Example:
pyhf parameter names and then the converted names for ROOT:
* ``"lumi"`` -> ``"Lumi"``
* unconstrained scalar parameter ``"foo"`` -> ``"foo"``
* constrained scalar parameter ``"foo"`` -> ``"alpha_foo"``
* non-scalar parameters ``"foo"`` -> ``"gamma_foo_i"``
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
>>> model.config.parameters
['mu', 'uncorr_bkguncrt']
>>> pyhf.compat.paramset_to_rootnames(model.config.param_set("mu"))
'mu'
>>> pyhf.compat.paramset_to_rootnames(model.config.param_set("uncorr_bkguncrt"))
['gamma_uncorr_bkguncrt_0', 'gamma_uncorr_bkguncrt_1']
"""
if paramset.name == 'lumi':
return 'Lumi'
if paramset.is_scalar:
if paramset.constrained:
return f'alpha_{paramset.name}'
return f'{paramset.name}'
return [f'gamma_{paramset.name}_{index}' for index in range(paramset.n_parameters)] | 0fc4732a7accff55f8015c5b46cb1c44c002ef18 | 8,306 |
from typing import Optional
def int_to_comma_hex(n: int, blength: Optional[int] = None) -> str:
"""
int_to_comma_hex
Translates an integer in its corresponding hex string
:type n: ``int``
:param n: Input integer
:type blength: ``Optional[int]``
:param blength: Add padding to reach length
:return: Translated hex string
:rtype: ``str``
"""
bhex = f'{n:x}'
if len(bhex) % 2 == 1:
bhex = '0' + bhex
if blength is not None:
bhex = '00' * max(blength - len(bhex), 0) + bhex
return ':'.join([bhex[i:i + 2] for i in range(0, len(bhex), 2)]) | a1833b68b4c179070f454bb675909b3e3495b8cf | 8,312 |
def adcm_api_credentials() -> dict:
"""ADCM credentials for use in tests"""
return {"user": "admin", "password": "admin"} | a2efa47ee588086b0d5dc089728e18b9108964a8 | 8,313 |
import re
def Language_req(description):
"""Create a function that captures the language requirements from the job description"""
description = description.lower()
matches = re.findall(r"\benglish\sand\sgerman\b|\bgerman\sand\senglish\b\benglisch\sund\sdeutsch\b|\benglish\b|\benglisch\b|\bgerman\b|\bdeutsch\b", description)
for item in matches:
return item | cb6ce14d3cba497f668c701d16c13fdfd78f5dc5 | 8,314 |
def str2bool(val):
"""Convert string to boolean value
"""
try:
if val.lower() in ['false', 'off', '0']:
return False
else:
return True
except AttributeError:
raise TypeError('value {0} was not a string'.format(type(val))) | 01d97b141686a79d310ab59a4acb318250b0746b | 8,320 |
def get_layers(net_param):
"""Get layers information.
Parameters
----------
net_param : caffe_pb2.NetParameter
A pretrined network description.
Returns
-------
layers : list
description of the layers.
version : string
version information of the pretrained model.
"""
if len(net_param.layers) > 0:
return net_param.layers[:], "V1"
elif len(net_param.layer) > 0:
return net_param.layer[:], "V2"
else:
raise Exception("Couldn't find layers!") | 6af46460f0ba9fa41111e265cd5e63a14b8ad5cb | 8,322 |
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> ACC
Calculate accuracy using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
l = len(ty)
ACC = 100.0 * total_correct / l
return ACC | 021c80dab1d4ed97876bebc882db3423af107ea5 | 8,327 |
def _GetSheriffForTest(test):
"""Gets the Sheriff for a test, or None if no sheriff."""
if test.sheriff:
return test.sheriff.get()
return None | daa272df6a1882e1379531b2d34297da8bec37b3 | 8,334 |
def booth(args):
""" Booth function
Global minimum: f(1.0,3.0) = 0.0
Search domain: -10.0 <= x, y <= 10.0
"""
return (args[0] + 2*args[1] - 7)**2 + (2*args[0] + args[1] - 5)**2 | 8adf94d9e96ee19758a6e495775d8bdb10330794 | 8,335 |
def _int_to_riff(i: int, length: int) -> bytes:
"""Convert an int to its byte representation in a RIFF file.
Represents integers as unsigned integers in *length* bytes encoded
in little-endian.
Args:
i (int):
The integer to represent.
length (int):
The number of bytes used to represent the integer.
Returns:
bytes:
The bytes representation of the specified integer.
"""
return i.to_bytes(length, "little", signed=False) | 9c382486be14af3e5ec22b5aed7c2d9dc3f21d57 | 8,343 |
def reproject(link, node, epsg):
"""
reporoject link and node geodataframes
for nodes, update X and Y columns
"""
link = link.to_crs(epsg=epsg)
node = node.to_crs(epsg=epsg)
node["X"] = node["geometry"].apply(lambda p: p.x)
node["Y"] = node["geometry"].apply(lambda p: p.y)
return link, node | 5ead99d074ea1d643f598d790b083dda511caa1a | 8,345 |
def StrContains(input_string, substring):
"""
Return True if the substring is contained in the concrete value of the input_string
otherwise false.
:param input_string: the string we want to check
:param substring: the string we want to check if it's contained inside the input_string
:return: True is substring is contained in input_string else false
"""
return substring.value in input_string.value | b04c22d567be6d1fce664f99719169b4585d75ea | 8,351 |
from typing import Iterable
def get_closest(iterable: Iterable, target):
"""Return the item in iterable that is closest to the target"""
if not iterable or target is None:
return None
return min(iterable, key=lambda item: abs(item - target)) | 9548954317e90574d7a6d232bc166bde2eea7863 | 8,352 |
def normalise(matrix):
"""Normalises the agents' cumulative utilities.
Parameters
----------
matrix : list of list of int
The cumulative utilities obtained by the agents throughout.
Returns
-------
list of list of int
The normalised cumulative utilities (i.e., utility shares) obtained by
the agents throughout.
"""
totals = [ sum(line) for line in matrix ]
return [ list(map(lambda x: x/tot, line)) for line, tot in zip(matrix, totals) ] | 594a46c9f1f741509ac56439779597545e63f25d | 8,354 |
def none_if_empty(tup):
"""Returns None if passed an empty tuple
This is helpful since a SimpleVar is actually an IndexedVar with
a single index of None rather than the more intuitive empty tuple.
"""
if tup is ():
return None
else:
return tup | 26ee7bb9720eaa532d901b9c1f6c4a0fb6f7a340 | 8,358 |
def _attributes_equal(new_attributes, old_attributes):
"""
Compare attributes (dict) by value to determine if a state is changed
:param new_attributes: dict containing attributes
:param old_attributes: dict containing attributes
:return bool: result of the comparison between new_attributes and
old attributes
"""
for key in new_attributes:
if key not in old_attributes:
return False
elif new_attributes[key] != old_attributes[key]:
return False
return True | 11c92f000f1811254db48a8def3e681a09e8a9a9 | 8,367 |
from datetime import datetime
def detect_resolution(d1: datetime, d2: datetime) -> int:
""" Detects the time difference in milliseconds between two datetimes in ms
:param d1:
:param d2:
:return: time difference in milliseconds
"""
delta = d1 - d2
return int(delta.total_seconds() * 1e3) | a5300254b9f2d84d8111fcb3ad9f222ce6b5a9a6 | 8,373 |
def total_distance(solution, distanceMap):
"""
Calculate the total distance among a solution of cities.
Uses a dictionary to get lookup the each pairwise distance.
:param solution: A list of cities in random order.
:distanceMap: The dictionary lookup tool.
:return: The total distance between all the cities.
"""
totalDistance = 0
for i, city in enumerate(solution[:-1]): # Stop at the second to last city.
cityA = city
cityB = solution[i + 1]
buildKey = (cityA, cityB)
totalDistance += distanceMap[buildKey]
# Build the key to return home
cityA = solution[-1]
cityB = solution[0]
goHome = (cityA, cityB)
totalDistance += distanceMap[goHome]
return totalDistance | b478c09540fb93d210df895a9ed31939bf889121 | 8,374 |
import struct
def py_float2float(val, be=False):
"""
Converts Python float to 4 bytes (double)
"""
sig = '>f' if be else '<f'
return struct.pack(sig, val) | e828bdedc1bca3536d6c6b5c90bf0cb4bf958066 | 8,375 |
def arg_name(name):
"""Convert snake case argument name to a command line name.
:param str name: The argument parameter name.
:returns: str
"""
return "--" + name.replace('_', '-') | e66284c3a99fe9a75799d4123ad23322089ec2ee | 8,376 |
def gcd_fast(a: int, b: int) -> tuple:
"""
GCD using Euler's Extended Algorithm generalized for all integers of the
set Z. Including negative values.
:param a: The first number.
:param b: The second number.
:return: gcd,x,y. Where x and y are bezout's coeffecients.
"""
gcd=0
x=0
y=0
x=0
"""
if a < 0:
sign_x=-1
else:
sign_x=1
if b < 0:
sign_y=-1
else:
sign_y=1
"""
#if a or b is zero return the other value and the coeffecient's accordingly.
if a==0:
return b, 0, 1
elif b==0:
return a, 0, 1
#otherwise actually perform the calculation.
else:
#set the gcd x and y according to the outputs of the function.
# a is b (mod) a. b is just a.
gcd, x, y = gcd_fast(b % a, a)
#we're returning the gcd, x equals y - floor(b/a) * x
# y is thus x.
return gcd, y - (b // a) * x, x | d29cf59b310a7035555a04aaa358410319c3d1b3 | 8,379 |
def get_option_name(flags):
"""
Function to get option names from the user defined arguments.
Parameters
----------
flags : list
List of user defined arguments
Returns
-------
flags : list
List of option names
"""
for individualFlag in flags:
if individualFlag.startswith("--"):
return individualFlag.replace("--", "")
return flags[0].replace("--", "") | 359ab05c563aac217d8f275bb946fbafc37f3af2 | 8,380 |
def first(*objects):
"""
Return the first non-None object in objects.
"""
for obj in objects:
if obj is not None:
return obj | 4d33225eb0348aa9b7fefa875070eb5527e67135 | 8,385 |
def init_list_with_values(list_length: int, init_value):
"""Return a pre-initialized list.
Returns a list of length list_length with each element equal to
init_value. init_value must be immutable (e.g. an int is okay; a
dictionary is not), or the resulting list will be a list of
references to same object (e.g. retlist[0] and retlist[1] would
point to the same object and manipulating
one would change it for the other).
Args:
list_length (int): The number of elements in the resulting
list.
init_value: A immutable value to initialize each list
element to.
Returns:
list: A list of length list_length with each element
initialized to init_value
Examples:
>>> init_list_with_values(3, 0)
[0, 0, 0]
>>> init_list_with_values(5, "Cookies")
['Cookies', 'Cookies', 'Cookies', 'Cookies', 'Cookies']
>>> init_list_with_values(2, (1, 2, 3))
[(1, 2, 3), (1, 2, 3)]
>>> init_list_with_values(2, {"foo": "bar"})
[{'foo': 'bar'}, {'foo': 'bar'}]
"""
return [init_value] * list_length | 85d5aa5f575b6aec54658ca6d398ded5f48f7188 | 8,387 |
import re
def decode_sigma(ds,sigma_v):
"""
ds: Dataset
sigma_v: sigma coordinate variable.
return DataArray of z coordinate implied by sigma_v
"""
formula_terms=sigma_v.attrs['formula_terms']
terms={}
for hit in re.findall(r'\s*(\w+)\s*:\s*(\w+)', formula_terms):
terms[hit[0]]=ds[hit[1]]
# this is where xarray really shines -- it will promote z to the
# correct dimensions automatically, by name
# This ordering of the multiplication puts laydim last, which is
# assumed in some other [fragile] code.
# a little shady, but its helpful to make the ordering here intentional
z=(terms['eta'] - terms['bedlevel'])*terms['sigma'] + terms['bedlevel']
return z | 3ea233f2b3acb0012166e8e002d6824845a7c043 | 8,390 |
def seqlib_type(cfg):
"""
Get the type of :py:class:`~enrich2.seqlib.SeqLib` derived object
specified by the configuration object.
Args:
cfg (dict): decoded JSON object
Returns:
str: The class name of the :py:class:`~seqlib.seqlib.SeqLib` derived
object specified by `cfg`.
Raises:
ValueError: If the class name cannot be determined.
"""
if "barcodes" in cfg:
if "map file" in cfg["barcodes"]:
if "variants" in cfg and "identifiers" in cfg:
raise ValueError("Unable to determine SeqLib type.")
elif "variants" in cfg:
return "BcvSeqLib"
elif "identifiers" in cfg:
return "BcidSeqLib"
else:
raise ValueError("Unable to determine SeqLib type.")
else:
return "BarcodeSeqLib"
elif "overlap" in cfg and "variants" in cfg:
return "OverlapSeqLib"
elif "variants" in cfg:
return "BasicSeqLib"
elif "identifiers" in cfg:
return "IdOnlySeqLib"
else:
raise ValueError("Unable to determine SeqLib type for configuration " "object.") | e48661f00eb6d0cb707fbb96d77b3a9ee3e798d6 | 8,394 |
def many2many_dicts(m2mlist):
"""
Maps objects from one list to the other list and vice versa.
Args:
m2mlist: list of 2lists [list1i, list2i] where list1i, list2i represent
a many to many mapping
Returns:
(one2two, two2one) : one2two, two2one
dictionaries from elements of list1i to list2i and vice versa
"""
one2two = {}
two2one = {}
for one, two in m2mlist:
for k in one:
one2two[k] = two
for k in two:
two2one[k] = one
return (one2two, two2one) | edce4101213941dc08d2b32f4c4624ece02bf79c | 8,401 |
def get_words_label(words_data: list) -> list:
"""
得到当前数据集下的词汇表
:param words_data: 读取到的词语数据
:return: 词汇表
"""
# 使用 set 去重
words_label = set({})
for words in words_data:
words_label.update(words[1])
res = list(words_label)
res.sort()
return res | d9ce0701c3c1baff1067d5c96a7730dc42b027f9 | 8,416 |
def read_story_file(filename):
"""read story file, return three lists, one of titles, one of keywords, one of stories"""
title_list, kw_list, story_list = [], [], []
with open(filename, 'r') as infile:
for line in infile:
title, rest = line.strip().split('<EOT>')
kw, story = rest.split('<EOL>')
title_list.append(title)
kw_list.append(kw)
story_list.append(story)
return title_list, kw_list, story_list | f354618f57eef3f8c842f2802044bc0fea0666f7 | 8,418 |
def rotate_points(points, width=600, height=300):
"""180 degree rotation of points of bbox
Parameters
----------
points: list or array
Coordinates of top-left, top-right, bottom-left and bottom-right points
width, height: int
Width/height of perspective transformed module image
Returns
-------
rotated_points: list or array
180 degree rotation
"""
return [[width - points[1][0], height - points[1][1]], [width - points[0][0], height - points[0][1]]] | ca15ebb21d9c34ba69049831395ce48de44c70a7 | 8,419 |
def inSkipTokens(start, end, skip_tokens: list) -> bool:
"""
Check if start and end index(python) is in one of skip tokens
"""
for token in skip_tokens:
if start >= token[0] and end <= token[1]:
return True
return False | a51124471eb9f1c3be84f132fc4cce3dad6ef336 | 8,420 |
def shape(parameter):
"""
Get the shape of a ``Parameter``.
Parameters
----------
parameter: Parameter
``Parameter`` object to get the shape of
Returns
-------
tuple:
shape of the ``Parameter`` object
"""
return parameter.shape | ceb2b9a6199d980386b306ff329b797dc1815a29 | 8,427 |
def get_maj_answer(votes_per_answer):
"""
:param votes_per_answer: dictionary with {'1': int, '2': int}, where the ints add up to NBR_ANNOTATORS
:return: the majority answers, i.e., number of votes at least half that of the number of annotators;
raises error if the answers are tied
"""
if (len(votes_per_answer) == 1 and '1' in votes_per_answer) or \
(len(votes_per_answer) == 2 and votes_per_answer['1'] > votes_per_answer['2']):
return '1'
elif (len(votes_per_answer) == 1 and '2' in votes_per_answer) or \
(len(votes_per_answer) == 2 and votes_per_answer['2'] > votes_per_answer['1']):
return '2'
else:
raise ValueError("The answers are equally often selected."
" This should have been impossible with the setup of the study.") | 87baeafd2ff39c4aa0ea970f501d1c195ffadecd | 8,432 |
import base64
def invoke_lambda_and_get_duration(lambda_client, payload, function_name):
"""
Invokes Lambda and return the duration.
:param lambda_client: Lambda client.
:param payload: payload to send.
:param function_name: function name.
:return: duration.
"""
response = lambda_client.invoke(
FunctionName=function_name,
InvocationType='RequestResponse',
LogType='Tail',
Payload=payload,
)
# Extract duration from Lambda log
lambda_log = base64.b64decode(response['LogResult']).decode('utf-8')
report_data = \
[line for line in lambda_log.split('\n')
if line.startswith('REPORT')
][0]
duration = \
[col for col in report_data.split('\t')
if col.startswith('Duration')
][0]
duration = float(duration.split()[1])
return duration | c8d2b77e4f7bc338efcdfd21db4f7297a625b05c | 8,433 |
from typing import Union
from pathlib import Path
import base64
def read_as_base64(path: Union[str, Path]) -> str:
"""
Convert file contents into a base64 string
Args:
path: File path
Returns:
Base64 string
"""
content = Path(path).read_text()
return base64.b64encode(content.encode("utf-8")).decode("utf-8") | bf4071662fd335882f50e10b72e7623d5e84d855 | 8,457 |
def set_active_design(oProject, designname):
"""
Set the active design.
Parameters
----------
oProject : pywin32 COMObject
The HFSS design upon which to operate.
designname : str
Name of the design to set as active.
Returns
-------
oDesign : pywin32 COMObject
The HFSS Design object.
"""
oEditor = oProject.SetActiveDesign(designname)
return oEditor | 825108bbfe21baa73e256d527fb982919a6b08ea | 8,461 |
def write_tags(tag, content='', attrs=None, cls_attr=None, uid=None, new_lines=False, indent=0,
**kwargs):
"""
Write an HTML element enclosed in tags.
Parameters
----------
tag : str
Name of the tag.
content : str or list(str)
This goes into the body of the element.
attrs : dict or None
Attributes of the element.
Defaults to None.
cls_attr : str or None
The "class" attribute of the element.
uid : str or None
The "id" attribute of the element.
new_lines : bool
Make new line after tags.
indent : int
Indentation expressed in spaces.
Defaults to 0.
**kwargs : dict
Alternative way to add element attributes. Use with attention, can overwrite some in-built
python names as "class" or "id" if misused.
Returns
-------
str
HTML element enclosed in tags.
"""
# Writes an HTML tag with element content and element attributes (given as a dictionary)
line_sep = '\n' if new_lines else ''
spaces = ' ' * indent
template = '{spaces}<{tag} {attributes}>{ls}{content}{ls}</{tag}>\n'
if attrs is None:
attrs = {}
attrs.update(kwargs)
if cls_attr is not None:
attrs['class'] = cls_attr
if uid is not None:
attrs['id'] = uid
attrs = ' '.join(['{}="{}"'.format(k, v) for k, v in attrs.items()])
if isinstance(content, list): # Convert iterable to string
content = '\n'.join(content)
return template.format(tag=tag, content=content, attributes=attrs, ls=line_sep, spaces=spaces) | ed0ade998b0232b8bca5cbb6083f8b60481b3ad9 | 8,462 |
def calc_BMI(w,h):
"""calculates the BMI
Arguments:
w {[float]} -- [weight]
h {[float]} -- [height]
Returns:
[float] -- [calculated BMI = w / (h*h)]
"""
return (w / (h*h)) | ee4d7b99a0bb1129d316c7031a65c465092358a3 | 8,464 |
def mb_to_human(num):
"""Translates float number of bytes into human readable strings."""
suffixes = ['M', 'G', 'T', 'P']
if num == 0:
return '0 B'
i = 0
while num >= 1024 and i < len(suffixes) - 1:
num /= 1024
i += 1
return "{:.2f} {}".format(num, suffixes[i]) | 95f6ae29c8031347e32f51f349afc986abe31473 | 8,470 |
def success(message, data=None, code=200):
"""Return custom success message
Args:
message(string): message to return to the user
data(dict): response data
code(number): status code of the response
Returns:
tuple: custom success REST response
"""
response = {'status': 'success', 'data': data, 'message': message}
return {key: value for key, value in response.items() if value}, code | 721542f71ad3a641efbe0a0d62723a2df23960a5 | 8,471 |
def past_days(next_day_to_be_planned):
"""
Return the past day indices.
"""
return range(1, next_day_to_be_planned) | d4b0e7387303f48bc3668f5d71b374dace6e7f44 | 8,473 |
def import_obj(obj_path, hard=False):
"""
import_obj imports an object by uri, example::
>>> import_obj("module:main")
<function main at x>
:param obj_path: a string represents the object uri.
;param hard: a boolean value indicates whether to raise an exception on
import failures.
"""
try:
# ``__import__`` of Python 2.x could not resolve unicode, so we need
# to ensure the type of ``module`` and ``obj`` is native str.
module, obj = str(obj_path).rsplit(':', 1)
m = __import__(module, globals(), locals(), [obj], 0)
return getattr(m, obj)
except (ValueError, AttributeError, ImportError):
if hard:
raise | cceed0d6162d4ab281472c1f2c9bb58a0b9195d1 | 8,476 |
import random
def create_contig_and_fragments(contig, overlap_size, fragment_size):
"""
Creates a contig and overlapping fragments
:param str contig: original sequence to create test data from
:param int overlap_size: number of bases fragments should overlap
:param int fragment_size: length of bases
note:: returned contig is probably going to be smaller than the input contig so that the
last fragment isn't too short
"""
assert overlap_size < fragment_size
assert fragment_size < len(contig)
step_size = fragment_size - overlap_size
fragments = []
i = 0
while i + fragment_size <= len(contig):
fragments.append(contig[i: i + fragment_size])
i += step_size
random.shuffle(fragments)
return contig[:i - step_size + fragment_size], fragments | cb6394004f1500aefb55354cadd9788ab29749f7 | 8,479 |
def _generate_spaxel_list(spectrum):
"""
Generates a list wuth tuples, each one addressing the (x,y)
coordinates of a spaxel in a 3-D spectrum cube.
Parameters
----------
spectrum : :class:`specutils.spectrum.Spectrum1D`
The spectrum that stores the cube in its 'flux' attribute.
Returns
-------
:list: list with spaxels
"""
spx = [[(x, y) for x in range(spectrum.flux.shape[0])]
for y in range(spectrum.flux.shape[1])]
spaxels = [item for sublist in spx for item in sublist]
return spaxels | 9d4b5339f18022607f349c326dc83e319a690a26 | 8,488 |
def convert_null_to_zero(event, field_or_field_list):
""" Converts the value in a field or field list from None to 0
:param event: a dict with the event
:param field_or_field_list: A single field or list of fields to convert to 0 if null
:return: the updated event
Examples:
.. code-block:: python
# Example #1
event = {'a_field': None}
event = convert_null_to_zero(event, field_or_field_list='a_field')
event = {'a_field': 0}
# Example #2
event = {'a_field': None,
'another_field': None}
event = convert_null_to_zero(event, field_list=['a_field', 'another_field'])
event = {'a_field': 0,
'another_field': 0}
"""
if type(field_or_field_list) is str:
field_or_field_list = [field_or_field_list]
for field in field_or_field_list:
if field in event and event[field] is None:
event[field] = 0
return event | c81bf5909d13b9cb2ce759c3ab0643e03b95c203 | 8,491 |
from typing import Dict
from typing import Any
def get_xml_config_gui_settings(xml_dict: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Get the tool configuration from the config XML.
Parameters
----------
xml_dict: OrderedDictionary
Parsed XML Tool configuration
Returns
-------
OrderedDict
GUI settings extracted from the parsed XML
"""
return xml_dict["AlteryxJavaScriptPlugin"]["GuiSettings"] | a13168d8441093f8fb6ce341fd07c5e51d4169a7 | 8,493 |
def deduplicate_list(list_with_dups):
"""
Removes duplicate entries from a list.
:param list_with_dups: list to be purged
:type lost_with_dups: list
:returns: a list without duplicates
:rtype: list
"""
return list(set(list_with_dups)) | 7034f1d8533613f9478916ce7f4b18fd9f94bfe4 | 8,494 |
def is_empty_element_tag(tag):
"""
Determines if an element is an empty HTML element, will not have closing tag
:param tag: HTML tag
:return: True if empty element, false if not
"""
empty_elements = ['area', 'base', 'br', 'col', 'colgroup', 'command', 'embed', 'hr',
'img', 'input', 'keygen', 'link', 'meta', 'param', 'source',
'track', 'wbr', 'html']
is_empty = False
for el in empty_elements:
if tag == el:
is_empty = True
break
return is_empty | 429129246c7458f0928f22f8b99db03763c2b699 | 8,497 |
def get_incident_message_ids(client, incident_id):
"""
Returns the message ids for all the events for the input incident.
"""
detail_response = client.get_incident_details(incident_id)
message_ids = []
# loop through all the events of this incident and collect the message ids
if 'events' in detail_response.keys():
for event in detail_response['events']:
message_ids.append(event['message_id'])
if 'abuse_events' in detail_response.keys():
for event in detail_response['abuse_events']:
message_ids.append(event['message_id'])
return message_ids | 3067eb438effb2977fd3a724a284bd58f485b743 | 8,499 |
def _Dirname(path):
"""Returns the parent directory of path."""
i = path.rfind("/") + 1
head = path[:i]
if head and head != "/" * len(head):
head = head.rstrip("/")
return head | 075063a6dd29456f4adb969621d9727aa187d53b | 8,500 |
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit" | dbc24fa0efa69447416963a9911c7fae3fd1f244 | 8,503 |
def _sample_ncols(col_limits, random_state):
""" Sample a valid number of columns from the column limits. """
integer_limits = []
for lim in col_limits:
try:
integer_lim = sum(lim)
except TypeError:
integer_lim = lim
integer_limits.append(integer_lim)
return random_state.randint(integer_limits[0], integer_limits[1] + 1) | e31f2e290910b9e7376750b18a6ca6436a82a0cb | 8,506 |
import re
def get_index_of_tone_vowel(syllable):
"""
Returns the index of the vowel that should be marked with a tone accent in a given syllable.
The tone marks are assigned with the following priority:
- A and E first
- O is accented in OU
- otherwise, the *final* vowel
Returns -1 if no vowels found.
ARGS:
syllable (str)
"""
vowels = "AaEeIiOoUuÜü"
index = -1
if 'a' in syllable:
index = syllable.index('a')
elif 'e' in syllable:
index = syllable.index('e')
elif 'ou' in syllable:
index = syllable.index('ou')
else:
match = re.search('[{vowels}]+'.format(vowels=vowels), syllable)
if match:
index = match.end() - 1
return index | 97d4f724f56ce3270e317b4d885b3ac5730e59f5 | 8,508 |
def statement_block(evaluator, ast, state):
"""Evaluates statement block "{ ... }"."""
state.new_local_scope()
for decl in ast["decls"]:
evaluator.eval_ast(decl, state)
for stmt in ast["stmts"]:
res, do_return = evaluator.eval_ast(stmt, state)
if do_return:
state.remove_local_scope()
return res, True
state.remove_local_scope()
return None, False | e38575e5e2119cd9794ea5f0b1629cecf411a0e9 | 8,509 |
def _create_query_dict(query_text):
"""
Create a dictionary with query key:value definitions
query_text is a comma delimited key:value sequence
"""
query_dict = dict()
if query_text:
for arg_value_str in query_text.split(','):
if ':' in arg_value_str:
arg_value_list = arg_value_str.split(':')
query_dict[arg_value_list[0].strip()] = arg_value_list[1].strip()
return query_dict | 2e4478bdf110911d4ca9fcc6c409aab3504a0b8a | 8,510 |
def _getVersionString(value):
"""Encodes string for version information string tables.
Arguments:
value - string to encode
Returns:
bytes - value encoded as utf-16le
"""
return value.encode("utf-16le") | 36646a686c17f2c69d71a0cdeede56f0a1e514e2 | 8,511 |
from typing import List
from typing import Optional
def get_shift_of_one_to_one_match(matches: List[List[bool]]) -> Optional[int]:
"""
Matches is an n x n matrix representing a directed bipartite graph.
Item i is connected to item j if matches[i][j] = True
We try to find a shift k such that each item i is matched to an item j + shift
usecase:
for other_relations a shift 'shift' of the greenyellow intervals must exist such that other_relation is satisfied
for each pair {(id_from, index), (id_to, index + shift)} of greenyellow intervals of signal groups id_from and
id_to.
:param matches: n x n matrix
:return: shift or None if no such shift can be found
:raises ValueError when matches is not an nxn boolean matrix
"""
value_error_message = "matches should be an nxn boolean matrix"
n = len(matches)
if not isinstance(matches, list):
raise ValueError(value_error_message)
for row in matches:
if not isinstance(matches, list) or len(row) != n:
raise ValueError(value_error_message)
if not all(isinstance(item, bool) for item in row):
raise ValueError(value_error_message)
for shift in range(n):
# example:
# suppose matches equals:
# [[False, True, False], [False, False, True],[True, False, False]]
# then a shift of 1 to the left would give
# np.array([[True, False, False], [False, True, False],[False, False, True]])
# this has all diagonal elements
# below we do this check more efficiently for a shift of 'shift' to the left.
if all(matches[row][(row + shift) % n] for row in range(n)):
return shift
return None | f8168e72dab64acc26841a49122dcf08d473ea1f | 8,512 |
def plurality(l):
"""
Take the most common label from all labels with the same rev_id.
"""
s = l.groupby(l.index).apply(lambda x:x.value_counts().index[0])
s.name = 'y'
return s | 4e363648e79b5e9049aca2de56fd343c1efe1b93 | 8,513 |
def _handle_text_outputs(snippet: dict, results: str) -> dict:
"""
Parse the results string as a text blob into a single variable.
- name: system_info
path: /api/?type=op&cmd=<show><system><info></info></system></show>&key={{ api_key }}
output_type: text
outputs:
- name: system_info_as_xml
:param snippet: snippet definition from the Skillet
:param results: results string from the action
:return: dict of outputs, in this case a single entry
"""
snippet_name = snippet['name']
outputs = dict()
if 'outputs' not in snippet:
print('No outputs defined in this snippet')
return outputs
outputs_config = snippet.get('outputs', [])
first_output = outputs_config[0]
output_name = first_output.get('name', snippet_name)
outputs[output_name] = results
return outputs | 693a3e5cba6d72d09b2adb3745abb4fcf07f92d3 | 8,515 |
from datetime import datetime
def parse_timestamp(datetime_repr: str) -> datetime:
"""Construct a datetime object from a string."""
return datetime.strptime(datetime_repr, '%b %d %Y %I:%M%p') | ddacf877c55466354559f751eac633b5bcd7313c | 8,516 |
def assemble_result_str(ref_snp, alt_snp, flanking_5, flanking_3):
"""
(str, str, str, str) -> str
ref_snp : str
DESCRIPTION: 1 character (A, T, G or C), the reference SNP.
alt_snp : str
DESCRIPTION: 1 character (A, T, G or C), the variant SNP.
flanking_5 : str
DESCRIPTION: 50 characters (A, T, G or C), the 50 bp upstream from ref_snp.
flanking_3 : str
DESCRIPTION: 50 characters (A, T, G or C), the 50 bp downstream from ref_snp.
Returns a new str, the reference SNP concatenated with its variant
and its flanking sequences, like this:
ref_snp = 'T'
alt_snp = 'C'
50 bp = 'XXXXXXXXXXXXXXX'
'XXXXXXXXXXXXXXX[T/C]XXXXXXXXXXXXXXX'
"""
return flanking_5 + '[' + ref_snp + '/' + alt_snp + ']' + flanking_3 | bc0b43464124d0d19f4bfc5646730a1f951a5ced | 8,517 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.