content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def convert_into_ascii(dec):
""" Convert the given input into ASCII """
char = str(chr(dec))
return char | 7f563deaf35f59199a4ff6467ecd8a61dd244b3a | 29,677 |
def deepmap(func, seq):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
"""
if isinstance(seq, list):
return [deepmap(func, item) for item in seq]
else:
return func(seq) | 43856a93e472f30b84bf842586003952649369c0 | 29,678 |
def get_endpoint(nautobot, term):
"""
get_endpoint(nautobot, term)
nautobot: a predefined pynautobot.api() pointing to a valid instance
of Nautobot
term: the term passed to the lookup function upon which the api
call will be identified
"""
endpoint_map = {
"aggregates": {"endpoint": nautobot.ipam.aggregates},
"circuit-terminations": {"endpoint": nautobot.circuits.circuit_terminations},
"circuit-types": {"endpoint": nautobot.circuits.circuit_types},
"circuits": {"endpoint": nautobot.circuits.circuits},
"circuit-providers": {"endpoint": nautobot.circuits.providers},
"cables": {"endpoint": nautobot.dcim.cables},
"cluster-groups": {"endpoint": nautobot.virtualization.cluster_groups},
"cluster-types": {"endpoint": nautobot.virtualization.cluster_types},
"clusters": {"endpoint": nautobot.virtualization.clusters},
"config-contexts": {"endpoint": nautobot.extras.config_contexts},
"console-connections": {"endpoint": nautobot.dcim.console_connections},
"console-ports": {"endpoint": nautobot.dcim.console_ports},
"console-server-port-templates": {
"endpoint": nautobot.dcim.console_server_port_templates
},
"console-server-ports": {"endpoint": nautobot.dcim.console_server_ports},
"device-bay-templates": {"endpoint": nautobot.dcim.device_bay_templates},
"device-bays": {"endpoint": nautobot.dcim.device_bays},
"device-roles": {"endpoint": nautobot.dcim.device_roles},
"device-types": {"endpoint": nautobot.dcim.device_types},
"devices": {"endpoint": nautobot.dcim.devices},
"export-templates": {"endpoint": nautobot.dcim.export_templates},
"front-port-templates": {"endpoint": nautobot.dcim.front_port_templates},
"front-ports": {"endpoint": nautobot.dcim.front_ports},
"graphs": {"endpoint": nautobot.extras.graphs},
"image-attachments": {"endpoint": nautobot.extras.image_attachments},
"interface-connections": {"endpoint": nautobot.dcim.interface_connections},
"interface-templates": {"endpoint": nautobot.dcim.interface_templates},
"interfaces": {"endpoint": nautobot.dcim.interfaces},
"inventory-items": {"endpoint": nautobot.dcim.inventory_items},
"ip-addresses": {"endpoint": nautobot.ipam.ip_addresses},
"manufacturers": {"endpoint": nautobot.dcim.manufacturers},
"object-changes": {"endpoint": nautobot.extras.object_changes},
"platforms": {"endpoint": nautobot.dcim.platforms},
"power-connections": {"endpoint": nautobot.dcim.power_connections},
"power-outlet-templates": {"endpoint": nautobot.dcim.power_outlet_templates},
"power-outlets": {"endpoint": nautobot.dcim.power_outlets},
"power-port-templates": {"endpoint": nautobot.dcim.power_port_templates},
"power-ports": {"endpoint": nautobot.dcim.power_ports},
"prefixes": {"endpoint": nautobot.ipam.prefixes},
"rack-groups": {"endpoint": nautobot.dcim.rack_groups},
"rack-reservations": {"endpoint": nautobot.dcim.rack_reservations},
"rack-roles": {"endpoint": nautobot.dcim.rack_roles},
"racks": {"endpoint": nautobot.dcim.racks},
"rear-port-templates": {"endpoint": nautobot.dcim.rear_port_templates},
"rear-ports": {"endpoint": nautobot.dcim.rear_ports},
"regions": {"endpoint": nautobot.dcim.regions},
"reports": {"endpoint": nautobot.extras.reports},
"rirs": {"endpoint": nautobot.ipam.rirs},
"roles": {"endpoint": nautobot.ipam.roles},
"services": {"endpoint": nautobot.ipam.services},
"sites": {"endpoint": nautobot.dcim.sites},
"tags": {"endpoint": nautobot.extras.tags},
"tenant-groups": {"endpoint": nautobot.tenancy.tenant_groups},
"tenants": {"endpoint": nautobot.tenancy.tenants},
"topology-maps": {"endpoint": nautobot.extras.topology_maps},
"virtual-chassis": {"endpoint": nautobot.dcim.virtual_chassis},
"virtual-machines": {"endpoint": nautobot.virtualization.virtual_machines},
"virtualization-interfaces": {"endpoint": nautobot.virtualization.interfaces},
"vlan-groups": {"endpoint": nautobot.ipam.vlan_groups},
"vlans": {"endpoint": nautobot.ipam.vlans},
"vrfs": {"endpoint": nautobot.ipam.vrfs},
}
return endpoint_map[term]["endpoint"] | f1fff2fabd66dbaa2e228ef3839a11c258423510 | 29,680 |
def center_id_from_filename(filename):
"""Given the name of a rollgen PDF output file, return the center_id embedded in that name"""
# Fortunately all filenames are of the format NNNNN_XXXXXX.pdf where NNNNN is the center id
# and everything else comes after the first underscore.
return int(filename[:filename.index('_')]) | d977e6acf509be011692bbf3e1fa910d16921130 | 29,683 |
def transcript(cloud_response):
"""Get text transcription with the highest confidence
from the response from google cloud speech-to-text
service.
Args:
cloud_response: response from speech-to-text service
Returns:
(transcription, confidence): string value of transcription
with corresponding confidence score
"""
transcription = None
confidence = 0.0
try:
for result in cloud_response.results:
for alt in result.alternatives:
if confidence < alt.confidence:
confidence = alt.confidence
transcription = alt.transcript
except:
pass
return (transcription, confidence) | a46b033a5baad298497a22b55550c0c9c16b7544 | 29,684 |
from typing import Any
def _default_key_func(item: Any):
"""Key function that orders based upon timestampMs value."""
return item.get("timestampMs") | d9cc0932ca3e8469d33186100f698293322a96bb | 29,689 |
def scalar_floordiv(x, y):
"""Implementation of `scalar_floordiv`."""
return x.__floordiv__(y) | 32bb16afb46d645f756cb21b9d9a3e609d0d839a | 29,692 |
def volt2temp(volt):
"""
Returns the temperature in float format from float voltage
Eq at: http://ww1.microchip.com/downloads/en/DeviceDoc/20001942G.pdf
"""
return (volt - 0.5) / 0.010 | 78691d6a7d02485ad2413b8d51c1ec4e7f448230 | 29,699 |
import json
def read_from_file(file_name):
"""Read data from file"""
with open(file_name, 'r') as load_file:
data = json.load(load_file)
return data | a83257b610ad420f7dd0fe0726d73111b020310e | 29,700 |
def mean_photon_v(pk1,pk2,pk3,mu1,mu2,mu3):
"""
Calculate the mean photon number for a signal sent by Alice.
This function uses individual values.
Parameters
----------
pk1 : float
Probability that Alice prepares a signal with intensity 1.
pk2 : float
Probability that Alice prepares a signal with intensity 2.
pk3 : float
Probability that Alice prepares a signal with intensity 3.
mu1 : float
Intensity 1.
mu2 : float
Intensity 2.
mu3 : float
Intensity 3.
Returns
-------
float
Mean signal photon number.
"""
return pk1*mu1 + pk2*mu2 + pk3*mu3 | 7afbcc2fa3030611c87da00b9ac164c6c20f485f | 29,701 |
def CsvEscape(text):
"""Escapes data entry for consistency with CSV format.
The CSV format rules:
- Fields with embedded commas must be enclosed within double-quote
characters.
- Fields with embedded double-quote characters must be enclosed within
double-quote characters, and each of the embedded double-quote characters
must be represented by a pair of double-quote characters.
- Fields with embedded line breaks must be enclosed within double-quote
characters.
- Fields with leading or trailing spaces must be enclosed within
double-quote characters.
Args:
text: str Data entry.
Returns:
str CSV encoded data entry.
"""
if not text: return ''
if text.find('"') > -1: text = text.replace('"', '""')
if (text == '' or text.find(',') > -1 or text.find('"') > -1 or
text.find('\n') > -1 or text.find('\r') > -1 or text[0] == ' ' or
text[-1] == ' '):
text = '"%s"' % text
return text | a28620e204f5433c580c00a234ea0ab5e6ac060c | 29,705 |
from typing import Union
def string_to_number(string: str) -> Union[int, float]:
"""Helper for converting numbers to string and keeping their original type."""
return float(string) if "." in string else int(string) | a53a6764171384ee258d38ff82014b4ac2724ab2 | 29,706 |
def fib_recursivo(n):
"""
Función recursiva que devuelve el término n de la secuencia de Fibonacci
@param n: posición a devolver
@return: El término n de la secuencia
"""
if n == 0:
res = 0
elif n == 1:
res = 1
else:
res = fib_recursivo(n - 1) + fib_recursivo(n - 2)
return res | 6d57d8408a2e55b9f327c98b31a55f16165983f6 | 29,709 |
def avg(first_num, second_num):
"""computes the average of two numbers"""
return (first_num + second_num) / 2.0 | 06277da01bdd122bad7957ed9675d00bcda16438 | 29,712 |
def list_workers(input_data, workerlimit):
"""
Count number of threads, either length of iterable or provided limit.
:param input_data: Input data, some iterable.
:type input_data: list
:param workerlimit: Maximum number of workers.
:type workerlimit: int
"""
runners = len(input_data) if len(input_data) < workerlimit else workerlimit
return runners | 9ea30a3a3fc3ebd67ffee7117d502caa9110de08 | 29,713 |
def upper(input_string: str) -> str:
"""Convert the complete string to uppercase."""
return input_string.upper() | 9acbac80cf1de15a1b21f4413d75e91e171c6c3f | 29,722 |
def auth_set(hashed_sks, auth_set_indices, height, hashfun):
"""Return the authentication set defined by the given indices.
Keyword arguments:
hashed_sks -- the hased secret key components which form the
leaves of the tree
auth_set_indices -- A list of tuples (h, i) defining the height and index of
the nodes that should end up in the authentication set
height -- the height of the binary tree
hashfun -- a hash function of 2n -> n that is used to produce the
parent node from its two child nodes
Returns:
A list containing tuples ((h, i), hash), the height, index, and hash of the
node in the authentication set. The order of the nodes in the returned list
is equal to the order of the nodes in auth_set_indices.
"""
tree = [None] * height
tree[0] = hashed_sks # the leaves
for current_height in range(1, height): # We don't need to compute the root,
# otherwise this would be off by one
num_nodes = 2**(height - current_height)
tree[current_height] = [None] * num_nodes
for index in range(0, num_nodes):
left = tree[current_height - 1][2*index]
right = tree[current_height - 1][2*index + 1]
tree[current_height][index] = hashfun(left, right)
return [((h, i), tree[h][i]) for h, i in auth_set_indices] | 0f4565d4aa399ab534a962b6f3ec89fb3325cdb5 | 29,725 |
def read_credentials(filepath='.yelp/credentials'):
"""Read credential file and return dictionary of important information"""
with open(filepath, 'r') as f:
contents = f.readlines()
credentials = {}
key_items = ['client_id', 'api_key']
for line in contents:
for item in key_items:
if item in line:
credentials[item] = line.split('=')[1].strip()
return credentials | 998ab1a770fb2c819757869772214fad89c7e13a | 29,728 |
def get_ns(tag):
"""
Extract namespace.
This function is opposite to get_local_name, in that
it returns the first part of the tag: the namespace.
Parameters
----------
tag : str
Tag to process.
"""
p_open = tag.find('{')
p_close = tag.find('}')
if p_open != -1 and p_close != -1:
return tag[p_open+1:p_close]
else:
return '' | d10d1e8d2a0a89d5aadd9fd4bf509a0e6e49d49e | 29,729 |
import ast
def convert_types_in_dict(xml_dict):
"""
Evaluates all dictionary entries into Python literal structure, as dictionary read from XML file is always string.
If value can not be converted it passed as it is.
:param xml_dict: Dict - Dictionary of XML entries
:return: Dict - Dictionary with converted values
"""
out = {}
for el in xml_dict:
try:
out[el] = ast.literal_eval(xml_dict[el])
except ValueError:
out[el] = xml_dict[el]
return out | 88e0030c2e1612802fd13c46c4ba6d7a1a2f8885 | 29,730 |
def get_parameters(parameters):
"""
Gives confidence and support, also if not given in parameters
"""
confidence = parameters.get("min_confidence", 0.75)
support = parameters.get("min_support", 2)
return confidence, support | e619fca63833651b1625a45ae2cfda806d227cd1 | 29,735 |
import math
def ZScore_GeneGo(n, N, n1, n2):
"""Each subnetwork is associated with a Z-score which ranks the subnetworks according to saturation with the objects from the initial gene list. The Z-score ranks the subnetworks of the analyze network algorithm with regard to their saturation with genes from the experiment. A high Z-score means the network is highly saturated with genes from the experiment. The formula for the Z-score is:
n (white in picked), N (total), n1 (white), n2 (picked, my gene list)
Z = (n-n1*n2/N)/Sqrt(n1*n2/N*(1-n2/N)(1-(n1-1)/(N-1)))
Z = (n*N-n1*n2)/Sqrt(n1*n2*(N-n1)*(N-n2)/(N-1))
notice this formula is symmetrical for n1 and n2"""
r=math.sqrt(n1*n2*(N-n1)*(N-n2)/(N-1))
if r<1e-100: return 0.0
return (n*N*1.0-n1*n2)/r | 92c8a46ffad5a3e218c45826423fa3ee31e4fee5 | 29,737 |
def read_acl_file(filename):
"""Read contents of given acl file.
Args:
filename: name of file to be read for acl information
Returns:
list: list containing ACL entries
"""
f = open(filename, 'r')
content = f.readlines()
f.close()
# Parse
acl = []
for entry in content:
if not entry.startswith("#"):
acl.append(entry.strip())
return acl | 9fb3e08bc35db72f90ee586f26de2fca04e4a66b | 29,745 |
def countDigits(n):
"""Count the number of digits in a given number "n". """
num = len(str(abs(n)))
return num | b8ecf5117f45a425b1bb0ced8b01f209cfdaca20 | 29,763 |
def safe_issubclass(cls, classinfo):
"""As the builtin issubclass, but returns False instead of a TypeError if the first argument is not a class."""
try:
return issubclass(cls, classinfo)
except TypeError: # cls is not actually a class
return False | 4e4218b51824d68a58f7aeca360845bbb12ba40a | 29,764 |
from typing import Optional
def doc_to_doc_string(doc: Optional[str], indent_level: int = 0) -> str:
"""Generate a documentation string from a schema salad doc field."""
lead = " " + " " * indent_level + "* "
if doc:
doc_str = "\n".join([f"{lead}{line}" for line in doc.split("\n")])
else:
doc_str = ""
return doc_str | efd0237f5d39660459e8b736e74ec24bc665ef77 | 29,770 |
def unzip(zipped):
"""
Unzip a zipped list
:param list-of-tuple zipped: list of tuples to be disaggregated
:returns: list of tuples
Example
_______
>>> unzip([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
[(1, 4, 7), (2, 5, 8), (3, 6, 9)]
"""
return list(zip(*zipped)) | 0ac5d80574eaeb5e84417dbfed9e617cde60d101 | 29,777 |
def can_manage_content(user, content):
"""
Returns True if user is staff or instructor(owner).
"""
if user.is_authenticated():
owner = user == content.course.owner
if user.is_staff or owner:
return True
return False | bb4d2209155b5df1458f3194737d25a4a9b1a7c3 | 29,779 |
from typing import List
from typing import Tuple
def extract_node_waypt(way_field: List[Tuple[str, str]]) -> int:
"""
Given a list with a reference node such as [('ref', '0')], extract out the lane ID.
Args:
way_field: key and node id pair to extract
Returns:
node_id: unique ID for a node waypoint
"""
key = way_field[0][0]
node_id = way_field[0][1]
assert key == "ref"
return int(node_id) | 564a738d0fdecb5230b9bb757764f4463dee85e0 | 29,780 |
from typing import List
def create_subdict(data: List):
""" Return data in List form as dictionary,
Start, to which model parameters is appended.
Parameters
-----------
data: List
Data (potentially pca component output in 2d form).
Return
-------
Dict:
Dictionary with 'data' key.
"""
return {'data' : data} | 6850588d1b1eeae69265aa1a63377ee2fa759b80 | 29,782 |
def get_suggestion_string(sugg):
"""Return the suggestion list as a string."""
sugg = list(sugg)
return ". Did you mean " + ", ".join(sugg) + "?" if sugg else "" | 7eb93d747818b66f5996241c993fbbe2ab638dd7 | 29,783 |
def get_key(text):
"""return keyword: first word of text,
isolating keywords followed by '(' and ':' """
t = text.replace('(', ' (').replace(':', ' :').strip()
return t.split(' ', 1)[0].strip() | e27e76b97e6b9247f24bd62c074787f6d140f688 | 29,788 |
from typing import Union
from typing import List
def read_file(file_path: str, split: bool = False) -> Union[str, List[str]]:
"""
Reads a text file.
>>> from snakypy import helpers
>>> file = '/tmp/my_file.txt'
>>> helpers.files.create_file('My content file', file, force=True)
True
>>> helpers.files.read_file(file)
'My content file'
Args:
file_path (str): You must receive the full/absolute file path.
split (bool): If this option is True, a list will be returned where
the breaks will be made using line skips. (default: {False})
Returns:
[str|list]: By default it returns a string. If the option split=True,
a list of line breaks will be returned.
"""
try:
with open(file_path) as f:
if split:
return f.read().split("\n")
return f.read()
except FileNotFoundError as err:
raise FileNotFoundError(f'>>> File "{file_path}" does not exist. {err}') | af2429f9d696a693b89c0fa33200e453906ee0c8 | 29,791 |
def max_sublist(a_list):
""" Kadane's Algorithm
>>> max_sublist([-2, 1, -3, 4, -1, 2, 1, -5, 4])
(6, 3, 6)
>>> max_sublist([0, -1, 2,- 3, 5, 9, -5, 10])
(19, 4, 7)
:param a_list: The list to get the maximum sub-list for.
:return: The sum from the sublist, the start index, and the end index. The last two are for testing.
"""
max_ending_here = max_so_far = a_list[0]
current_index = 0
start_index = 0
end_index = 0
for num in a_list:
max_ending_here = max(0, max_ending_here + num)
if max_ending_here >= max_so_far:
end_index = current_index
if max_ending_here == 0:
start_index = current_index + 1
max_so_far = max(max_so_far, max_ending_here)
current_index += 1
return max_so_far, start_index, end_index | e535a182e0a0118395dfd6f6e130a42d3c4b051f | 29,795 |
def get_fold(dict_actors_per_fold, actor_id):
"""
Get the fold that the actor belongs to.
:param dict_actors_per_fold[dict] Map with the fold number as key (ranging from 1 to 5) and the list of actors in that test fold as the value of the dict.
:param actor_id:[int] Id of the actor. We want to obtain the fold where it belongs
"""
for fold_key in list(dict_actors_per_fold.keys()):
if(actor_id in dict_actors_per_fold[fold_key]):
return fold_key | 0b5543ed37cd4fc8bed84a7f269fca04a5d71d01 | 29,797 |
def intToBinaryString(integer : int) -> str:
"""Convert an integer to a string representing a big endian binary number
Parameters
----------
integer : int
A positive integer
Returns
-------
str
A string representing a big endian binary number
"""
rtn = ""
while integer > 0:
rtn += str(integer % 2)
integer = integer // 2
return rtn[::-1] | 5f0d3aba71c7fffaa14725fef0576b72ae17b461 | 29,803 |
def expanding_mean(new_value, old_value, time):
"""
Args:
new_value: this time step's value
old_value: aggregate from last time step
time: current time, zero-indexed
Returns:
mean from t = 0 : time
"""
time_1plus = time + 1
return (1. / time_1plus) * new_value + (time_1plus - 1.) / time_1plus * old_value | 9f701473a9904e48ec042983a8bedc2e27a464ba | 29,813 |
from typing import Callable
from typing import Iterable
def sort_by_reversed(key: Callable):
"""Return a new list containing all items from the iterable in descending order, sorted by a key.
>>> sort_by_reversed(lambda x: x % 10)([2231, 47, 19, 100])
[19, 47, 2231, 100]
"""
def sort_by_reversed(seq: Iterable):
return sorted(seq, key=key, reverse=True)
return sort_by_reversed | 711df771751590a6315deefb075d9a436743cacb | 29,816 |
import itertools
def is_sorted(iterable, *, key=None, reverse=False, strict=False):
"""Returns true if iterable is sorted
Parameters
----------
iterable : iterable
The iterable to check for sorted-ness.
key : x -> y, optional
Apply mapping function key to iterable prior to checking. This can be
done before calling, but this ensures identical calls as sorted.
reverse : bool, optional
`key` and `reverse` function as they for `sorted`"""
if key is not None:
iterable = map(key, iterable)
ait, bit = itertools.tee(iterable)
next(bit, None) # Don't throw error if empty
if strict and reverse: # pylint: disable=no-else-return
return all(a > b for a, b in zip(ait, bit))
elif reverse:
return all(a >= b for a, b in zip(ait, bit))
elif strict:
return all(a < b for a, b in zip(ait, bit))
else:
return all(a <= b for a, b in zip(ait, bit)) | 682d0026193a3189bcee7e693f8ed28d5c18d10b | 29,819 |
from typing import Dict
from typing import Tuple
def _has_all_valid_descriptors(peak_descriptors: Dict[str, float],
filters: Dict[str, Tuple[float, float]]) -> bool:
"""
Check that the descriptors of a peak are in a valid range.
aux function of get_peak_descriptors.
Parameters
----------
peak_descriptors : dict
Dictionary from descriptors names to values.
filters : dict
Dictionary from descriptors names to minimum and maximum acceptable
values.
Returns
-------
is_valid : bool
True if all descriptors are inside the valid ranges.
"""
res = True
for descriptor, (lb, ub) in filters.items():
d = peak_descriptors[descriptor]
is_valid = (d >= lb) and (d <= ub)
if not is_valid:
res = False
break
return res | c797949c6faa5f401abfaf020c64b55e2e41c544 | 29,820 |
import re
def rlist_to_int(string):
"""Takes an rlist string as input, and outputs a corresponding integer"""
rlist = 0
string = re.sub(r"r|R", "", string)
for m in re.finditer(r"[^,]+", string):
args = m.group().split("-")
if len(args) == 1: lo,hi = args*2
else: lo,hi = args
rlist |= 2**(int(hi) + 1) - 2**int(lo)
return rlist | 782f01cdaee0e2c51ff6dfcd281c5d83e1a3e98b | 29,825 |
def interpolate_template(template, context):
"""Interpolate context into a template.
(i.e. replace things in strings without % replacement).
Args:
template (str) : String to fill with context
context (dict) : Dictionary of variables
Returns:
str : Populated template
"""
populated = template
for key, value in context.items():
_key = '{%s}' % key
populated = populated.replace(_key, str(value))
return populated | 6c9151075c9fa836b340ee163452de47eec8fe2c | 29,826 |
def make_enum_pair(ele, till, delimiter=", "):
"""
Create list of pair of strings with consecutive numbering as follows:
make_enumerated_pair('P%d p%d', 3) -> 'P1 p1, P2 p2, P3 p3'
"""
return delimiter.join(ele % (i + 1, i + 1) for i in range(till)) | 66b9410f307027ef151dd81edb7b29998ef65628 | 29,829 |
def average(keyword, values):
"""
calculate average of all values at values[+].keyword and return
:param keyword: the key of the value you care about
:param values: [{key: value},...] array of dicts, values assumed numeric
:return: single averaged value
"""
average = 0.0
for val in values:
average += float(val[keyword])
if len(values) > 0:
average = average / len(values)
return average | aed36e90a485e4f433de7330f920167dd0e558d5 | 29,831 |
def get_count(inputStr):
"""
Return the number (count) of vowels in the given string.
We will consider a, e, i, o, and u as vowels for this Kata.
The input string will only consist of lower case letters and/or spaces.
:param inputStr: input string value.
:return: the number (count) of vowels in the given string.
"""
return sum(1 for i in inputStr if i in ["a", "e", "i", "o", "u"]) | f4700b0c93175b4b3553f16e27384a3ee3481c60 | 29,838 |
def subDict(somedict, somekeys, default=None):
"""
Returns subset of a dictionary with keys from somekeys
"""
return dict([ (k, somedict.get(k, default)) for k in somekeys ]) | 17b95ee28986ca2c2f02e3405fc71c76a14fbbc9 | 29,842 |
def qset_cmb_box(cmb_box, string, data=False):
"""
Set combobox to the index corresponding to `string` in a text field (data = False)
or in a data field (data=True). When `string` is not found in the combobox entries,
select the first entry. Signals are blocked during the update of the combobox.
Returns: the index of the found entry
"""
if data:
idx = cmb_box.findData(str(string)) # find index for data = string
else:
idx = cmb_box.findText(str(string)) # find index for text = string
ret = idx
if idx == -1: # data does not exist, use first entry instead
idx = 0
cmb_box.blockSignals(True)
cmb_box.setCurrentIndex(idx) # set index
cmb_box.blockSignals(False)
return ret | f2a53bca106fe160d04142ebf0fdfb2de966a98d | 29,843 |
from typing import Iterator
import itertools
def decimal(start: int = 1) -> Iterator[int]:
"""
Increments from `start`.
e.g. 1, 2, 3, .. 9, 10, 11, etc.
Args:
start: The first value to start with.
"""
return itertools.count(start) | 5c0a4b75b45c391597091e83c4b3991ba64d6626 | 29,847 |
def get_rendered(font, text, color, cache):
"""Simple font renderer that caches render."""
if text in cache:
image = cache[text]
else:
image = font.render(text, 0, color)
cache[text] = image
return image | 13557f705882da5865812a0be86cf8b46b95e202 | 29,849 |
import math
def angle_to(tup1,tup2):
"""The angle to tup2 from tup1, measured against 'straight up' clockwise, in radians.
Tuples are given in a coordinate system with (0,0) top left, (1,0) is one step right, (0,1) is one step down,
and (1,1) is down right.
>>> angle_to((1,1),(1,0))/math.pi
0.0
>>> angle_to((1,1),(2,0))/math.pi
0.25
>>> angle_to((1,1),(1,2))/math.pi
1.0
>>> angle_to((1,1),(-1,-1))/math.pi
1.75
"""
x = tup2[0] - tup1[0]
y = tup1[1] - tup2[1]
angle_to_positive_x_axis = math.atan2(y,x)
angle_to_straight_up = (math.pi/2) -angle_to_positive_x_axis
positive_angle_to_straight_up = angle_to_straight_up % (2*math.pi)
return positive_angle_to_straight_up | edece575727f69d716ffd217807b4b794a6b7a1c | 29,851 |
def OkErrMiss(o):
"""
compute Ok, errors, missed from the ResultStructure object
return a triple (ok, err, miss)
"""
ok = o.truePos
err = o.resTotal - ok
miss = o.gtTotal - ok
return ok, err, miss | 11f3958272d1fdbb34621c5d0ad7700193b9a2f9 | 29,852 |
import logging
def featurize_data(model, array):
"""
Given a model and an array, perform error checking and return the prediction
of the full feature array.
Parameters:
----------
model : keras.models.Model
The featurizer model performing predictions
array : np.ndarray
The vectorized array of images being converted into features
Returns:
--------
full_feature_array : np.ndarray
A numpy array containing the featurized images
"""
# Raise error if the array has the wrong shape
if len(array.shape) != 4:
raise ValueError('Image array must be a 4D tensor, with dimensions: '
'[batch, height, width, channel]')
# Perform predictions
logging.info('Creating feature array.')
# NOTE: No clue why this is here, it's to make the models note break due to
# Keras update: https://github.com/keras-team/keras/issues/9394
model.compile('sgd', 'mse')
full_feature_array = model.predict(array, verbose=1)
# Return features
logging.info('Feature array created successfully.')
return full_feature_array | 1b8796ace6f3a72a288b6621f01e62c90a06a769 | 29,856 |
def column2list(matrix: list, column_idx: int) -> list:
"""
Convert column from features 2D matrix to 1D list of
that feature.
:param matrix: 2D square array of number
:param column_idx: column index in the matrix
:return:
"""
if len(matrix) <= 1:
return matrix if matrix else []
if column_idx >= len(matrix[0]):
return [-1]
new_column = []
for line in matrix:
new_column.append(line[column_idx])
return new_column | 841baf8f6cf6b78c9f41dc90a9ff1759a8285cc8 | 29,860 |
def __join_with_or(times):
"""Returns 'a', 'a or b', or 'a, b, or c'."""
if not times:
return ""
if len(times) == 1:
return times[0]
if len(times) == 2:
return " or ".join(times)
return ", or ".join([", ".join(times[:-1]), times[-1]]) | eec8adc3bf0b013cda315ed81c0ddf1a0a7511c6 | 29,861 |
def parseBool(txt):
"""
Parser for boolean options
:param str txt: String from config file to parse.
:returns: ``True`` if the string is 'True', ``False`` otherwise.
:rtype: boolean
"""
return txt == 'True' | 267c6c09c665735ac3ae766a9fd5259768ff3d40 | 29,862 |
def seq_to_subset(seq_sol, m, n):
"""Convert sequence solution (e.g.: ['r2','c3']) into subset solution (e.g.: [[0,1],[0,0,1]])"""
assert isinstance(seq_sol, list)
subset_sol = [[0]*m,[0]*n]
for e in seq_sol:
if e[0] == 'r':
subset_sol[0][int(e[1:])-1] = (1-subset_sol[0][int(e[1:])-1])
elif e[0] == 'c':
subset_sol[1][int(e[1:])-1] = (1-subset_sol[1][int(e[1:])-1])
else:
raise RuntimeError(f'This seq_sol is bad written: {seq_sol}')
return subset_sol | 345e3fab339edbdb059611b2b542917bc43bd6ef | 29,863 |
def fit_FC(model, data):
""" Convenience function to fit a flow curve
Args:
model: rheology model (e.g. HB_model)
data: pandas DataFrame with column 'Shear rate' and 'Stress'
Returns:
lmfit.fitresult
"""
return model.fit(data["Stress"], x=data["Shear rate"], weights=1 / data["Stress"]) | 6fca8cf3d2ea965e55b667be27aeaf4bb4fbc402 | 29,864 |
def GenerateWIUpdateMsgString(membership, issuer_url, resource_name,
cluster_name):
"""Generates user message with information about enabling/disabling Workload Identity.
We do not allow updating issuer url from one non-empty value to another.
Args:
membership: membership resource.
issuer_url: The discovery URL for the cluster's service account token
issuer.
resource_name: The full membership resource name.
cluster_name: User supplied cluster_name.
Returns:
A string, the message string for user to display information about
enabling/disabling WI on a membership, if the issuer url is changed
from empty to non-empty value or vice versa. An empty string is returned
for other cases
"""
if membership.authority and not issuer_url:
# Since the issuer is being set to an empty value from a non-empty value
# the user is trying to disable WI on the associated membership resource.
return ('A membership [{}] for the cluster [{}] already exists. The cluster'
' was previously registered with Workload Identity'
' enabled. Continuing will disable Workload Identity on your'
' membership, and will reinstall the Connect agent deployment.'
.format(resource_name, cluster_name))
if not membership.authority and issuer_url:
# Since the issuer is being set to a non-empty value from an empty value
# the user is trying to enable WI on the associated membership resource.
return ('A membership [{}] for the cluster [{}] already exists. The cluster'
' was previously registered without Workload Identity.'
' Continuing will enable Workload Identity on your'
' membership, and will reinstall the Connect agent deployment.'
.format(resource_name, cluster_name))
return '' | 75a54be804d5012e94587c4d5094936dc1f770da | 29,874 |
import random
def choice_optional(lst):
"""" Returns random.choice if there are elements, None otherwise """
if len(lst) > 0:
return random.choice(lst)
return None | 778561f1a42952d9f2af9692adeadd57917a4ea3 | 29,875 |
import json
def read_json_file(path=r'web_crawler\scrapped_data\all_scraped_data.json'):
"""reads Json file
Args:
path: the path to the json file
"""
with open(path, 'r') as data_file:
return json.load(data_file) | 1f82ae07906d554a227aa653a2fe1bffc6a46a9b | 29,884 |
def grep(*matches):
"""Returns a generator function that operates on an iterable:
filters items in the iterable that match any of the patterns.
match: a callable returning a True value if it matches the item
>>> import re
>>> input = ["alpha\n", "beta\n", "gamma\n", "delta\n"]
>>> list(grep(re.compile('b').match)(input))
['beta\n']
"""
def _do_grep_wrapper(*matches):
def _do_grep(lines):
for line in lines:
for match in matches:
if match(line):
yield line
break
return _do_grep
return _do_grep_wrapper(*matches) | 9a28efd6416c0f8e02532072d9c366747dc52291 | 29,885 |
from unicodedata import normalize
def unicodify(s, encoding='utf-8', norm=None):
"""Ensure string is Unicode.
.. versionadded:: 1.31
Decode encoded strings using ``encoding`` and normalise Unicode
to form ``norm`` if specified.
Args:
s (str): String to decode. May also be Unicode.
encoding (str, optional): Encoding to use on bytestrings.
norm (None, optional): Normalisation form to apply to Unicode string.
Returns:
unicode: Decoded, optionally normalised, Unicode string.
"""
if not isinstance(s, str):
s = str(s, encoding)
if norm:
s = normalize(norm, s)
return s | ee4882dd7450ba0b146e3fb9ddabf9deaf0e7903 | 29,888 |
import collections
def eval_step(test_batch, snlds_model, num_samples, temperature):
"""Runs evaluation of model on the test set and returns evaluation metrics.
Args:
test_batch: a batch of the test data.
snlds_model: tf.keras.Model, SNLDS model to be evaluated.
num_samples: int, number of samples per trajectories to use at eval time.
temperature: float, annealing temperature to use on the model.
Returns:
Dictionary of metrics, str -> list[tf.Tensor],
aggregates the result dictionaries returned by the model.
"""
test_values = collections.defaultdict(list)
for _ in range(10):
result_dict = snlds_model(
test_batch, temperature, num_samples=num_samples)
for k, v in result_dict.items():
test_values[k].append(v)
return test_values | 2a7ec12f43925aecf266048c6eaa0b331641a4fe | 29,890 |
def exception_to_dict(error):
"""Takes in an exception and outputs its details, excluding the stacktrace, to a dict
Args:
error (Exception): The exception to serialize
Returns:
dict: The serialized exception
"""
return {"type": str(type(error).__name__), "message": str(error)} | a677e151079a7b0005a7da1065f08b49cbf559be | 29,891 |
def transform_play_to_column(play):
"""Return position of the column where the play was made.
Parameters:
play (int): bit board representation of a piece
Returns:
int : column position"""
return [2 ** i for i in range(49)].index(play) // 7 | aa0e0298ead19d9f1375092f6b06f95ff2829f0f | 29,897 |
import binascii
def get_internal_name(cache_key):
"""Converts a cache key into an internal space module name."""
package, version = cache_key
return 'multiversion.space.%s___%s' % (package, binascii.hexlify(version)) | a34c3c746135f3913f9577238f8a8d6b13398d5d | 29,899 |
def counting_sort(a, k, reverse=False):
"""
计数排序
基本思想:对每一条记录,计算小于这条记录的记录个数。利用这一信息,就可以直接确定该记录的位置。
当有几条记录相同时,则需要调整一下位置。
记录值分布在0-k之间,建立C[0...k],根据记录值找到在C中的位置,该位置计数器+1。
数组C中某个位置Ci,C[0...i-1]的所有值相加,即为小于i的记录的个数。依次计算C[1...k]的值,即C[i]=C[i]+C[i-1]。
根据C中数据重新确定待排序列中记录的位置。
:param a:
:param k:
:param reverse:
:return:
"""
b = [0] * len(a) # 有序序列输出辅助
c = [0] * (k + 1) # 计数辅助
# 计数
for v in a:
c[v] += 1
if reverse:
# 计算比记录i大的记录的个数
for i in range(len(c) - 2, -1, -1):
c[i] += c[i + 1]
else:
# 计算比记录i小的记录的个数
for i in range(1, len(c)):
c[i] += c[i - 1]
for v in a:
# 根据C数据,定位记录到输出序列
b[c[v] - 1] = v
# 如果有多条相同的记录,调整位置
c[v] -= 1
return b | 20d4a26d9541a397645890ce22a384efe6fe8427 | 29,901 |
def build_index(data: list) -> tuple:
"""
Create an index with items and shops for faster lookup. Format -
{
'product1': 'shops': set([1, 3, 5]),
...
}
{
shop1: [
(price1, [item1, item2, ...]),
...
],
...
}
:param data: List of tuples containg data about products and shops
:return: An index based on products and shops
"""
item_index = {} # Create an empty item_index
shop_index = {}
for item in data:
for product in item[2]:
if product not in item_index: # New, initialize
item_index[product] = set()
item_index[product].add(item[0]) # Add to shop set
if item[0] not in shop_index:
shop_index[item[0]] = []
shop_index[item[0]].append((item[1], item[2:]))
return item_index, shop_index | 64b8f75553bf1968e922dd06a2cc3c75e44e9bf6 | 29,911 |
def bst_contains(node, value):
"""
Return whether tree rooted at node contains value.
Assume node is the root of a Binary Search Tree
@param BinaryTree|None node: node of a Binary Search Tree
@param object value: value to search for
@rtype: bool
>>> bst_contains(None, 5)
False
>>> bst_contains(BinaryTree(7, BinaryTree(5), BinaryTree(9)), 5)
True
"""
if node is None:
return False
elif node.data == value:
return True
elif value < node.data:
return bst_contains(node.left, value)
elif value > node.data:
return bst_contains(node.right, value)
else:
assert False, "WTF!" | bf1c875d9a3b4c5b8af42fb63465aa0f10039b49 | 29,912 |
def invert_dict(d):
"""Return an 'inverted' dictionary, swapping keys against values.
Parameters
----------
d : dict-like
The dictionary to invert
Returns
--------
inv_d : dict()
The inverted dictionary.
Notes
------
If the key-mapping is not one-to-one, then the dictionary is not
invertible and a ValueError is thrown.
"""
inv_d = dict((d[key], key) for key in d)
if len(d) != len(inv_d):
raise ValueError('Key-value mapping is not one-to-one.')
return inv_d | 4a5d56cbf7ac4dc8c787635f7fbc8b4608b6532e | 29,913 |
def c_uchar(i):
"""
Convert arbitrary integer to c unsigned char type range as if casted in c.
>>> c_uchar(0x12345678)
120
>>> (c_uchar(-123), c_uchar(-1), c_uchar(255), c_uchar(256))
(133, 255, 255, 0)
"""
return i & 0xFF | 937b683505282eb8577affb14236515ecdda20be | 29,919 |
def is_same_shape(T1, T2):
"""
Two partial latin squares T1, T2 have the same shape if T1[r, c] =
0 if and only if T2[r, c] = 0.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: is_same_shape(elementary_abelian_2group(2), back_circulant(4))
True
sage: is_same_shape(LatinSquare(5), LatinSquare(5))
True
sage: is_same_shape(forward_circulant(5), LatinSquare(5))
False
"""
for i in range(T1.nrows()):
for j in range(T1.ncols()):
if T1[i, j] < 0 and T2[i, j] < 0: continue
if T1[i, j] >= 0 and T2[i, j] >= 0: continue
return False
return True | 4602f7cb2a093393445f7e23f2d9f539021d2f7a | 29,921 |
def condition_str(condition, phase, component):
"""
Returns a string representation of a condition for a specific phase and component.
"""
if phase!=None:
return condition + '(' + phase + ',' + component + ')'
else:
return condition + '(' + component + ')' | ef7432bf1a7ca25ffb6bf7a9c1dc033dd53918af | 29,922 |
def list_to_string(l: list, s: str = "\n") -> str:
"""Transforms a list into a string.
The entries of the list are seperated by a seperator.
Args:
l (list): the list
s (str, optional): the seperator. Defaults to "\\n".
Returns:
str: the list representation with seperator
"""
r = ""
for e in l:
r += e + s
return r | ca04ba59c2edf740aa29b7e2ef79b8562c7d071c | 29,926 |
def _parse_idd_type(epbunch, name):
"""Parse the fieldvalue type into a python type.
Possible types are:
- integer -> int
- real -> float
- alpha -> str (arbitrary string),
- choice -> str (alpha with specific list of choices, see \key)
- object-list -> str (link to a list of objects defined elsewhere, see \object-list and \reference)
- external-list -> str (uses a special list from an external source, see \external-list)
- node -> str (name used in connecting HVAC components)
"""
_type = next(iter(epbunch.getfieldidd_item(name, "type")), "").lower()
if _type == "real":
return float
elif _type == "alpha":
return str
elif _type == "integer":
return int
else:
return str | bbf86c41ad685c2fb44f24671d22cf6601794cc1 | 29,929 |
def dot_in_stripe(dot, stripe):
"""True if dot.y is in horizontal stripe."""
return stripe[0] < dot.y <= stripe[1] | a742ea3dd7d4dc9ed32d6df3bf2cb358fda2fda7 | 29,935 |
def parse_agents(args):
"""
Each element is a class name like "Peer", with an optional
count appended after a comma. So either "Peer", or "Peer,3".
Returns an array with a list of class names, each repeated the
specified number of times.
"""
ans = []
for c in args:
s = c.split(',')
if len(s) == 1:
ans.extend(s)
elif len(s) == 2:
name, count = s
ans.extend([name]*int(count))
else:
raise ValueError("Bad argument: %s\n" % c)
return ans | e62aef22c20a449ae670bd517e3cb6f64375d792 | 29,939 |
import math
def A000010(n: int) -> int:
"""Euler totient function phi(n): count numbers <= n and prime to n."""
numbers = []
i = 0
for i in range(n):
if math.gcd(i, n) == 1:
numbers.append(i)
return len(numbers) | 46b28b077f78965666eb34e0f4e7892e95b4162b | 29,946 |
import math
def CalcDistance(p1, p2):
"""
Function to calculate distance in space between two points (p)
p1, p2: (f) lists of coordinates for point1 and point2
"""
dist = math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2 + (p2[2]-p2[2])**2)
return(dist) | 099512d41acafa7832a425cb8fbe60a6c4be8ecf | 29,951 |
def getlambda(pixel, lo, hi):
#-----------------------------------------------------------------------------
"""
Small utility to calculate lambda on a line for given position
in pixels
"""
#-----------------------------------------------------------------------------
if pixel is None:
return 0.5
delta = hi - lo
if delta == 0.0:
return 0.5
return (pixel-lo)/(delta) | 6245ff3ec09db39d913175e58b9cec08888aa66e | 29,957 |
def sort_by_name(dicts):
"""
Sorting of a list of dicts. The sorting is based on the name field.
Args:
list: The list of dicts to sort.
Returns:
Sorted list.
"""
return sorted(dicts, key=lambda k: k.get("name", "").lower()) | 3364995d2cbc55e87a6f60d054861eadec9e0dda | 29,959 |
import re
def alphanum_string(input_string):
"""
Removes all non-alphanumeric characters from the given string.
"""
pattern = re.compile(r'[\W_]+')
return pattern.sub('', input_string) | 3e27c52b02d85c3374a82d5cc909b8a305a8c67a | 29,961 |
def preprocess_doi(line):
"""
Removes doi.org prefix if full URL was pasted, then strips unnecessary slashes
"""
(_, _, doi) = line.rpartition('doi.org')
return doi.strip('/') | 55badcf6fb55d19dcdcaba0d6ece706978001d7f | 29,965 |
def point_in_rectangle(point, rect_top_left, rect_sides):
"""
Checks if point is in rectangle
Parameters
----------
point : (float, float)
(x,y) coordinates of point
rect_top_left : (float, float)
(x,y) coordinates of rectangle top left corner
rect_sides : (float, float)
(x,y) lengths of rectangle sides
Returns
-------
bool
True if point is in rectangle, otherwise False.
"""
return rect_top_left[0] < point[0] < rect_top_left[0] + rect_sides[0] and \
rect_top_left[1] < point[1] < rect_top_left[1] + rect_sides[1] | 49fe7980f32d4716e38b2591bbfe8d1d8910c11e | 29,968 |
def prefix_dash_dash(params):
""" Add -- for keys in gdmix tfjob params. """
if isinstance(params, dict):
newParams = {}
for k, v in params.items():
newParams["--{}".format(k)] = v
return newParams
else:
raise ValueError("job params can only be dict") | 85170ccf0adeb83718394c572949a420108d0e03 | 29,972 |
import importlib
def get_from_module(module, attr_name):
""" Return a reference from a module.
The attribute name must exist in the module, it could be a variable,
a callable or a class.
If reference name doesn't exists in the module it will return None.
Example:
>>> get_from_module("my.module", "my_object")
>>> get_from_module("my.module", "my_function")
>>> get_from_module("my.module", "MyClass")
:param basestring module: The module name.
:param basestring attr_name: What should be returned from the module.
:return: The value resolved by the module and attr name provided or None.
"""
module = importlib.import_module(module)
try:
return getattr(module, attr_name)
except AttributeError:
return None | b4e95ee167d71035241bc0df2ab87c25bb45b4ac | 29,973 |
import torch
def get_nmse(x_hat, x):
"""
Calculate ||x_hat - x|| / ||x||
"""
sse = torch.sum((x_hat - x)**2, dim=[1,2,3]) #shape [N] - sse per image
denom = torch.sum(x**2, dim=[1,2,3]) #shape [N] - squared l2 norm per ground truth image
nmse_val = sse / denom
return nmse_val.cpu().numpy().flatten() | edda096ebc8d8932cde975a07e579c2cfca8051f | 29,979 |
def convert_to_table(header, rows):
"""
Create an HTML table out of the sample data.
Args:
header (str): The table header
rows (List[str]): A list of rows as strings
Returns:
A dataset sample in the form of an HTML <table>
"""
header_html = '<tr><th>{}</th></tr>'.format('</th><th>'.join(header))
rows_html = ['<tr><td>{}</td></tr>'.format('</td><td>'.join(row)) for row in rows]
if rows:
table = '<table>{header}\n{rows}</table>'.format(
header=header_html, rows='\n'.join(rows_html))
else:
table = None
return table | cf7c861015135b940d2a011da106f40d0aa31ba5 | 29,982 |
def looksLikeVestLutFile(path):
"""Returns ``True`` if the given ``path`` looks like a VEST LUT file,
``False`` otherwise.
"""
with open(path, 'rt') as f:
lines = []
for i in range(10):
line = f.readline()
if line is None: break
else: lines.append(line.strip())
validHeaders = ('%!VEST-LUT', '%BeginInstance', '%%BeginInstance')
return len(lines) > 0 and lines[0] in validHeaders | c1e8006b6b81d949d353f773d2a41164c26eec98 | 29,984 |
from typing import Dict
import csv
def load_subjects(csvfile: str) -> Dict:
"""Load a list of subjects from a csv file along with metadata
Subject,Age,Gender,Acquisition,Release
195041,31-35,F,Q07,S500
...
Return a dictionary with Subjects as keys and Age as the value
"""
result: Dict = {}
with open(csvfile, 'r', encoding='utf-8-sig') as fd:
reader: csv.DictReader = csv.DictReader(fd)
for row in reader:
if 'Age' in row:
result[row['Subject']] = {'age': row['Age'], 'gender': row['Gender']}
else:
result[row['Subject']] = {}
return result | e9249ce7a04869e6b565238aa06c65063becb361 | 29,985 |
def _mai(a: int, n: int) -> int:
"""
Modular Additive Inverse (MAI) of a mod n.
"""
return (n - a) % n | 634af47425e9bd3afec12742782755a3f0f4ac1f | 29,987 |
import re
def get_model_presets(config, model):
"""Collect all the parameters model from the UI and return them as a dict.
Args:
config (dict): Recipe config dictionary obtained with dataiku.customrecipe.get_recipe_config().
model (str): Model name found in the UI.
Returns:
Dictionary of model parameters to be used as kwargs in gluonts Predictor.
"""
model_presets = {}
matching_key = f"{model}_model_(.*)"
for key in config:
key_search = re.match(matching_key, key, re.IGNORECASE)
if key_search:
key_type = key_search.group(1)
model_presets.update({key_type: config[key]})
return model_presets | 492079ef55f7d1c3a621f2228f1e22b14765575d | 29,992 |
def _get_name(index, hdf5_data):
"""Retrieves the image file name from hdf5 data for a specific index.
Args:
index (int): Index of image.
hdf5_data (obj): h5py file containing bounding box information.
Returns:
(str): Image file name.
"""
ref = hdf5_data['/digitStruct/name'][index, 0]
file_name = ''.join([chr(item) for item in hdf5_data[ref][:]])
return file_name | 7b65d6f6aede25265865734dae6a94def8b3524f | 29,995 |
import re
from datetime import datetime
import time
def extract_date(str_date):
"""Find the first %Y-%m-%d string
and return the datetime and the remainder of the string
"""
rgx = re.compile('\d{4}-\d{2}-\d{2}')
o_match = rgx.search(str_date)
if o_match is not None:
i_start = o_match.start()
i_end = i_start+10
return (datetime(
*(time.strptime(str_date[i_start:i_end], "%Y-%m-%d")[0:6])),
str_date[0:i_start] + str_date[i_end:])
else:
return (None, str_date) | 3f3407490eec4e3d65e289b5e2ebef3246c9c63f | 29,996 |
import operator
def freeze_dict(dict_):
"""Freezes ``dict`` into ``tuple``.
A typical usage is packing ``dict`` into hashable.
e.g.::
>>> freeze_dict({'a': 1, 'b': 2})
(('a', 1), ('b', 2))
"""
pairs = dict_.items()
key_getter = operator.itemgetter(0)
return tuple(sorted(pairs, key=key_getter)) | 0694f264419bb426597bc1fe6a4d0a7d3ae89fc5 | 29,997 |
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False | d8ad5c7884453a5dc3cec6b340d8fa5ac3094cb3 | 30,006 |
from typing import Dict
from typing import Union
from typing import List
import json
def load_connections() -> Dict[str, Dict[str, Union[int, str, List[str]]]]:
"""Loads the static set of connections."""
with open('data/connections.json') as f:
return json.load(f) | cee39b0cb7d34b6c71de9d625fe2202c1a8d3a6b | 30,008 |
def is_float(num):
"""
given a string variable, returns True if the string can be directly converted to a float, otherwise returns False
:param num: string to check whether or not it can be converted to a float
:type num: string
:return: True: the string can be converted to a float (via the float(<string>) function). False: it cannot.
:rtype: boolean
"""
try:
float(num)
return True
except ValueError:
return False | 57de4de786f711f609499de337cfc33994ee0a3d | 30,009 |
def get_descendant_ids(node):
"""
This filter returns the ids of all the node's descendants.
:param node: The requested node
:type node: ~integreat_cms.cms.models.abstract_tree_node.AbstractTreeNode
:return: The list of all the node's descendants' ids
:rtype: list [ int ]
"""
return [
descendant.id for descendant in node.get_cached_descendants(include_self=True)
] | eef8b9c0e26c2dccf061bdd402b809b9eff51b83 | 30,010 |
def kinetic_energy(momentum, mass):
"""Compute the kinetic energy of moving particles
Arguments:
momentum (array-like of float): momentum of the particles
mass (array-like of float): mass of the particles
"""
if momentum.ndim == 3:
mass = mass[None, :]
return 0.5 * (momentum / mass[..., None]).sum(axis=(-2, -1)) | 4361c1497d1107def2ae4e74f8cb7ec7f1312b8d | 30,013 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.