content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Tuple
def get_happiest_and_saddest(emotions: list) -> Tuple[int, int]:
"""
Get happiest and saddest index
:param emotions: list of lists containing emotions likelihood
:return: happiest_tweet_index, saddest_tweet_index
"""
happiest_item = max(emotions, key=lambda e: e[2]) # Based on joy
saddest_item = max(emotions, key=lambda e: e[4]) # Based on sadness
return emotions.index(happiest_item), emotions.index(saddest_item) | c31b9d4d8908f49a03c909f6ba1716c430bbd30f | 23,583 |
def has_gendered_nouns(doc):
"""
Doc-level spaCy attribute getter, which returns True if
any Token with a NOUN pos_ tag is of "m" or "f" gender.
"""
noun_genders = [token._.gender for token in doc if token.pos_ in ["NOUN", "PROPN"]]
has_gendered_noun = any([g in ["m", "f"] for g in noun_genders])
return has_gendered_noun | dbf28f9d43ce5bcd5cb1b391f14f35076d193975 | 23,595 |
def retrieveSampleAnnotationsFromCondensedSdrf(condensedSdrfStr):
"""
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("")
>>> len(diseases)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tcell line\t5637\thttp://www.ebi.ac.uk/efo/EFO_0002096")
>>> len(diseases) + len(tissues) + len(crossRefs)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tdisease\\tbladder carcinoma\\thttp://www.ebi.ac.uk/efo/EFO_0000292")
>>> "bladder carcinoma" in diseases
True
>>> "EFO_0000292" in crossRefs
True
>>> tissues
set()
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-513\\t\\tERR030881\\tfactor\\torganism part\\tadrenal\\thttp://purl.obolibrary.org/obo/UBERON_0002369")
>>> "adrenal" in tissues
True
>>> "UBERON_0002369" in crossRefs
True
>>> diseases
set()
"""
diseases, tissues, crossRefs = (set([]), set([]), set([]))
for row in condensedSdrfStr.split("\n"):
arr = row.strip().split("\t")
if len(arr) > 4 and arr[3] == "factor":
if arr[4].lower() == "organism part":
tissues.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
elif arr[4].lower() == "disease":
diseases.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
return (diseases, tissues, crossRefs) | cfada3e870e0ec1b6790381aa7bb966e6ecf8f41 | 23,601 |
def _strip_prefix(s, prefix):
"""A helper to strip the prefix from the string if present"""
return s[len(prefix):] if s and s.startswith(prefix) else s | 8438b6e8c3b7e478fe93ede3433adaa9a22a739e | 23,605 |
def in_cksum_done(s):
"""Fold and return Internet checksum."""
while (s >> 16):
s = (s >> 16) + (s & 0xffff)
return (~s & 0xffff) | 25011c254e89179fe4232ad0ecfa0a847bf0b30b | 23,609 |
import ast
def empty_list(lineno=None, col=None):
"""Creates the AST node for an empty list."""
return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col) | 2def486baf5537d2754312c6234a7908c4aa46dd | 23,612 |
def flip_transpose(arr):
"""
Flip a 2D-list (i.e. transpose).
"""
m = len(arr)
n = len(arr[0])
res = [[-1 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[j][i]
return res | ad0bb8b1e3a67cc0323425b5cba26d66665336e0 | 23,618 |
def _mean(items):
"""Return average (aka mean) for sequence of items."""
return sum(items) / len(items) | 49fd8e1b42e3b454103074b512e32c1c443f974f | 23,620 |
import random
def rand_pl(m: int) -> int:
"""
Random integer distributed by a power law in the limit of the parameter m
E.g.:
With m = 2
returns 1 80% of the time
returns 2 20% of the time
With m = 3
returns 1 73.47% of the time
returns 2 18.37% of the time
returns 3 8.16% of the time
"""
weight = (i**-2 for i in range(1, m+1))
chs = random.choices(range(1, m+1), tuple(weight))
return chs[0] | ab48cf84ba3cf1d62ffcac0d2e702e7936d573b2 | 23,623 |
def format_duration(secs):
"""
>>> format_duration(0)
'00:00'
>>> format_duration(1)
'00:01'
>>> format_duration(100)
'01:40'
>>> format_duration(10000)
'02:46:40'
>>> format_duration(1000000)
'277:46:40'
>>> format_duration(0.0)
'00:00.000'
>>> format_duration(0.5)
'00:00.500'
>>> format_duration(12345.6789)
'03:25:45.679'
>>> format_duration(-1)
'-00:01'
>>> format_duration(-10000)
'-02:46:40'
"""
if secs < 0:
return '-' + format_duration(-secs)
else:
s = int(secs) % 60
m = int(secs) // 60 % 60
h = int(secs) // 60 // 60
res = ':'.join('%02.0f' % x for x in (
[m, s] if h == 0 else [h, m, s]
))
if isinstance(secs, float):
ms = round(secs % 1, 3)
res += ('%.3f' % ms)[1:]
return res | 444a65d11f54b090f9d03252c3720ef2dabb3062 | 23,625 |
from typing import Optional
def fmac_cisco(mac: str) -> Optional[str]:
"""
Given a string representation of a MAC address in a common format, return it in Cisco format.
"""
# Fast-like remove ":", ".", and "-" in one go
mac = mac.translate({58: None, 45: None, 46: None}).lower()
if len(mac) != 12:
return None
return f"{mac[:4]}.{mac[4:8]}.{mac[8:12]}" | 4c1eb0a0f2b5dcf715c2653a224a18ecf979ac4b | 23,627 |
def plusminus(n):
"""Get a number of +s or -s corresponding to n's value.
If n == 0, returns "".
Arg:
n: An int
Returns:
A str, possibly empty.
"""
return ("-", "+")[n > 0] * abs(n) | 26ddf2f90fff6ad3a24aca5ff8e3d3e26c2a7a2e | 23,628 |
def get_console_domain(region: str) -> str:
"""
Get the domain for the AWS management console based on the region
"""
if region.startswith('us-gov'):
return "console.amazonaws-us-gov.com"
if region.startswith('cn'):
return "console.amazonaws.cn"
if region.startswith('us-iso'):
raise ValueError("AWS ISO regions are not supported")
return "console.aws.amazon.com" | 05c3f313616b4d71a59e4d0bdf5151e509939d87 | 23,629 |
import re
def build_doc_index(doc):
"""
Given a document string, construct index.
Args:
doc - a string to be indexed
Returns:
a dictionary with key being each distinct word in the doc string
and value being a list of positions where this word occurs
"""
doc = doc.lower()
index = {}
overallIndex = 0
splitRegex = '(\W+)'
for word in re.split(splitRegex, doc):
if (len(word) >= 2 and not re.match(splitRegex, word)):
if word in index:
index[word].append(overallIndex)
else:
index[word] = [overallIndex]
overallIndex += len(word)
return index | 5fa34a3b9978b40d846e4a6d3da60c5a7da0633d | 23,636 |
from dateutil import tz
from datetime import datetime
def tofrom_utc(timestamp, parseformat, from_utc=True):
"""
Convert a timestamp to/from UTC time
:param str timestamp: Date/time to modify
:param str parseformat: Format of the timestamp to parse
:param bool from_utc: True if source stamp is UTC; otherwise False
:return: Converted timestamp
:rtype: str
"""
utc_zone = tz.tzutc()
local_zone = tz.tzlocal()
time_obj = datetime.strptime(timestamp, parseformat)
new_time = time_obj.replace(tzinfo=(local_zone, utc_zone)[from_utc])
new_time = new_time.astimezone((utc_zone, local_zone)[from_utc])
return new_time.strftime(parseformat) | feaf28653500bf9df58f73e86d19690098f1951d | 23,637 |
def reorder_array(array, old_ids, new_ids):
"""Reorders a numpy array based on two lists of ids.
The old_ids contains the ids of the elements currently in array.
new_ids is the desired order. Elements and sizes must match!
"""
if type(old_ids) not in [list, tuple]:
old_ids = list(old_ids) # Needs to have the index method
permut = [old_ids.index(ni) for ni in new_ids]
return array[permut] | 365789b959a785985c0e4d071c674069a2221949 | 23,640 |
from unittest.mock import Mock
def fake_oauth_token(token, token_secret):
"""Return a mock OAuth token object."""
return Mock(key=token, secret=token_secret) | 608d2aaf1163f35091b9f221043fab2d16af7fb3 | 23,643 |
def calc_sum_mem_dict(mem_dict):
""" Calculates sum of values stored in memory.
:param mem_dict: dictionary with memory address as key and stored value as value
:return: int
"""
return sum(mem_dict.values()) | de0ac4f2fc5f04d7e2e1bd8edd73fef2fd5f0b50 | 23,644 |
def get_weights(cbf):
"""Retrieve the latest gain corrections and their corresponding update times."""
weights, times = {}, {}
for sensor_name in cbf.sensor:
if sensor_name.endswith('_gain_correction_per_channel'):
sensor = cbf.sensor[sensor_name]
input_name = sensor_name.split('_')[1]
reading = sensor.get_reading()
weights[input_name] = reading.value
times[input_name] = reading.timestamp
return weights, times | 84887af7eda90fccb46242051a0f5b1d91a8e983 | 23,645 |
def allowed_image(filename):
"""
Check each uploaded image to ensure a permissible filetype and filename
Takes full filename as input, returns boolean if filename passes the check
"""
allowed_img_ext = ["JPEG", "JPG", "HEIC"]
# Ensure file has a . in the name
if not "." in filename:
return False
# Split the extension and the file name
exten = filename.rsplit(".", 1)[1]
# Check if the extension is in ALLOWED_IMAGE_EXTENSIONS
if exten.upper() in allowed_img_ext:
return True
else:
return False | 6dfdd37587ffa7abd98209c2cb965e78d808c229 | 23,646 |
import logging
import yaml
def get_yaml_config(config_file):
"""Return configuration from YAML file.
:param config_file: Configuration file name
:type config_file: string
:returns: Dictionary of configuration
:rtype: dict
"""
# Note in its original form get_mojo_config it would do a search pattern
# through mojo stage directories. This version assumes the yaml file is in
# the pwd.
logging.info('Using config %s' % (config_file))
return yaml.safe_load(open(config_file, 'r').read()) | 07d90588b753a8ddecbca681a94f7ef5ca25fc27 | 23,650 |
def avg(vals, count=None):
""" Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count.
"""
sum = 0
for v in vals:
sum += v
if count is None:
count = len(vals)
return float(sum) / count | dbb7d7d9cacb635b702c842aeaeb55194f7fcb50 | 23,656 |
def format_file_name(input_file, output_file = None):
"""
Determine the name of the file to write to disk.
If the user has specified an output file name and extension, use this to
write the file. If they haven't, append "_no_grave" onto the name of the
input file and add the .h5m extension.
Input:
______
input_file: str
User supplied data file location.
output_file: str
Optional user supplied output file name and extension.
Returns:
________
file_name: str
The name of the file to write to disk.
"""
if output_file is None:
input_list = input_file.split("/")
file_name = '.'.join(input_list[-1].split(".")[:-1])
output_file = file_name + "_no_grave.h5m"
return output_file | bd13b2d3b5957df67f1a7d14a6189a12fe4ba3e8 | 23,661 |
def part1_and_2(lines, draw_diagonal=False):
"""
Part1: Consider only horizontal and vertical lines.
Part2: Consider horizontal, vertical, *and* diagonal lines.
All diagonal lines will be exactly 45 degrees
At how many points do at least two lines overlap?
"""
# create the empty graph
graph = dict()
for y in range(0,1000):
graph[y] = [0 for x in range(1000)]
# draw lines:
for line in lines:
x1, y1, x2, y2 = line[0], line[1], line[2], line[3]
# vertical line:
if x1 == x2:
for i in range(min(y1, y2), max(y1, y2)+1):
graph[i][x1] += 1
# horizontal line:
elif y1 == y2:
for i in range(min(x1, x2), max(x1, x2)+1):
graph[y1][i] += 1
# everything else must be a diagonal line:
elif draw_diagonal:
if x1 > x2:
# ensure x increases from x1 to x2
x1, y1, x2, y2 = line[2], line[3], line[0], line[1]
while x1 <= x2:
graph[y1][x1] += 1
x1 += 1
if y1 < y2: # downhill slope
y1 += 1
else: # uphill slope
y1 -= 1
# count the number of crossing lines
crossing_lines = 0
for y in graph:
for spot in graph[y]:
if spot > 1:
crossing_lines += 1
return crossing_lines | 92501f82ab84a0b7dcbbc13b74ac5a8189fdf518 | 23,666 |
from typing import Optional
from typing import Union
from typing import Tuple
def _normalize_keep(keep: Optional[Union[str, Tuple[Optional[str], Optional[str]]]]) -> Tuple[Optional[str], Optional[str]]:
"""Convert a value passed for the 'keep' parameter to a normalized form."""
if keep is None:
return (None, None)
elif isinstance(keep, (str, bytes)):
if keep == 'first' or keep == 'last':
return (keep, keep)
else:
raise ValueError(f"Unsupported value '{keep}' passed for the `keep` parameter.")
elif isinstance(keep, tuple):
if len(keep) == 2:
return keep
else:
raise ValueError(f"Invalid tuple (length={len(keep)}) passed for the `keep` parameter.")
else:
raise ValueError(f"Invalid argument value passed for the `keep` parameter.") | 4e91c1c25ffab6b0488e37105d1a2ca5d5b3f849 | 23,667 |
import copy
def copy_face_features(feats: list):
"""
Performs deep copy of feats
:param feats: list of features
:return: deep-copied features
"""
return copy.deepcopy(feats) | 47c37b528bbb63fe8d123bb8ebcf619d033ee310 | 23,671 |
def _get_dict_from_longtolongmap(proto_map):
"""
Convert the ProtoLongToLongMap_pb2 type to a simple dict.
"""
if len(proto_map.keys.elements) != len(proto_map.values.elements):
raise IndexError('array length mismatch')
new_dict = {}
for key, value in zip(proto_map.keys.elements, proto_map.values.elements):
new_dict[key] = value
return new_dict | 07502282c5d000a74d0b24eace4dffcbe3dd81ae | 23,672 |
import torch
def gram_matrix(x):
""" Calculates the Gram matrix for the
feature maps contained in x.
Parameters:
x: feature maps
Returns:
G: gram matrix
"""
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G | 69789d925fcd84d3d9dff93f70e41c1f8ae9d3e6 | 23,673 |
def search(d, key, default=None):
"""Return a dict containing to the specified key in the (possibly
nested) within dictionary d. If there is no item with that key, return
default.
"""
stack = [d]
while stack:
cur_d = stack[-1]
stack.pop()
for k, v in cur_d.items():
if k == key:
return cur_d
elif isinstance(v, dict):
stack.append(v)
return default | cd96fda8462b4fe6904f138cc3fc25a83faca802 | 23,674 |
def compute_route_cost(dist, route):
"""Compute the cost of a route."""
N = len(route)
assert N == len(dist) + 1
assert route[0] == route[-1]
cost = 0
for i in range(1, len(route)):
u = route[i - 1]
v = route[i]
c = dist[u][v]
assert c != 0
cost += c
return cost | 8a1f8ce83ac0f2f990dbed08019fa04a97b1b725 | 23,678 |
def is_hello_message(message: str) -> bool:
"""Checks if a message is a hello message."""
if "Hello" in message:
return True
return False | 1acebc9ee74d05e3e1bb9913f68a6aaf6b48faa2 | 23,681 |
def find_output_value(name, outputs):
""" Finds a specific output within a collection. """
return next(
output['value'] for output in outputs if output['name'] == name
) | 29ab594f969757ce9e8aab79ced58b285b9e49c2 | 23,686 |
import hashlib
def get_sha256_hash(key, size=None):
"""
Provide a SHA256 hash based on the supplied key values.
:param key: An iterable of key values.
:param size: The size of the returned hash. Defaults to full hash. If
size provided is greater than the hash size the full hash is returned.
:returns: a SHA256 hash for the key values supplied.
"""
partition_hash = hashlib.sha256()
for part in key:
partition_hash.update(str(part).encode('utf-8'))
sha256_hash = partition_hash.hexdigest()
if not size or size > len(sha256_hash):
size = len(sha256_hash)
return sha256_hash[:size] | 311c751d5c64eb9bef3a297760922654958d58cc | 23,689 |
def _queue_number_order_priority(v):
"""Returns the number to be used as a comparison for priority.
Lower values are more important. The queue priority is the lowest 31 bits,
of which the top 9 bits are the task priority, and the rest is the timestamp
which may overflow in the task priority.
"""
return v.queue_number & 0x7FFFFFFF | fd32678eb1984d2fcf9392467722cc43f72f64d9 | 23,690 |
def create_segment_allele_counts(segment_data, allele_data):
""" Create a table of total and allele specific segment counts
Args:
segment_data (pandas.DataFrame): counts of reads in segments
allele_data (pandas.DataFrame): counts of reads in segment haplotype blocks with phasing
Returns:
pandas.DataFrame: output segment data
Input segment_counts table is expected to have columns 'chromosome', 'start', 'end', 'readcount'.
Input phased_allele_counts table is expected to have columns 'chromosome', 'start', 'end',
'hap_label', 'is_allele_a', 'readcount'.
Output table will have columns 'chromosome', 'start', 'end', 'readcount', 'major_readcount',
'minor_readcount', 'major_is_allele_a'
"""
# Calculate allele a/b readcounts
allele_data = (
allele_data
.set_index(['chromosome', 'start', 'end', 'hap_label', 'is_allele_a'])['readcount']
.unstack(fill_value=0)
.reindex(columns=[0, 1])
.fillna(0.0)
.astype(int)
.rename(columns={0: 'allele_b_readcount', 1: 'allele_a_readcount'})
)
# Merge haplotype blocks contained within the same segment
allele_data = allele_data.groupby(level=[0, 1, 2])[['allele_a_readcount', 'allele_b_readcount']].sum()
# Reindex and fill with 0
allele_data = allele_data.reindex(segment_data.set_index(['chromosome', 'start', 'end']).index, fill_value=0)
# Calculate major and minor readcounts, and relationship to allele a/b
allele_data['major_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(max, axis=1)
allele_data['minor_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(min, axis=1)
allele_data['major_is_allele_a'] = (allele_data['major_readcount'] == allele_data['allele_a_readcount']) * 1
# Merge allele data with segment data
segment_data = segment_data.merge(allele_data, left_on=['chromosome', 'start', 'end'], right_index=True)
return segment_data | f27b8e925d58ea70806c90ad2d3d5144e7690812 | 23,691 |
def parse_state(value):
"""
Parse state from LEA code.
"""
return value[0:2] | 8814cc94785674f411afe7ba54802891babb20a7 | 23,694 |
import math
def get_ue_sig_power(ue_ap_distance):
"""
Function to calculate signal power between the UE and AP
"""
# To avoid ZeroDivisionError
if ue_ap_distance:
distance = (10 * math.log10(1 / math.pow(ue_ap_distance, 2)))
# discretizing the distance
distance /= 10
return round(distance) | 1239e60153c397871e7a8b37d1b48bba39f41bee | 23,695 |
import re
def extract_timestamp(line):
"""Extract timestamp from log item.
:param line: log item.
:type line: str
:return: timestamp or empty string
:rtype: str
"""
rex = r"(\d{4}\-\d\d\-\d\d\s\d\d:\d\d:\d\d[\,\d]*[\s\w]*)"
match = re.search(rex, line)
if match:
return match.group(1)
return "" | 2f8efdb9bdc95bf511d2f225ab42e3e489a61677 | 23,698 |
def is_unique(sentence):
""" 1.1 Is Unique: Implement an algorithm to determine if a string has
all unique characters. What if you cannot use additional data structures?
Complexity: O(n) time, O(n) space
"""
h = set([])
for c in sentence:
if c in h:
return False
h.add(c)
return True | 5c69a4217803c7ab88bb20b641dcd8726d29cb1e | 23,702 |
def get_most_similar_factors(n):
"""Factorize n into two numbers.
Returns the best pair, in the sense that the numbers are the closest to each other."""
i = int(n**0.5 + 0.5)
while n % i != 0:
i -= 1
return i, n/i | ea45874901031a95ba103b5f6bf89c743d8f65c3 | 23,705 |
import click
def get_short_help_str(command, limit=45):
"""
Gets short help for the command or makes it by shortening the long help string.
"""
return command.short_help or command.help and click.utils.make_default_short_help(command.help, limit) or '' | e123db3a912f1da13b7afd94fb8759a18237c36b | 23,707 |
def countMorphemes(morphlist):
""" Cuenta el número de ocurrencias de cada label
:param morphlist: Lista de bio-labels
:return: Diccionario con las labesl como llave y el número de
ocurrencias como valor
"""
counts = {}
for morpheme in morphlist:
label = morpheme[0][2:]
counts[label] = counts.get(label, 0) + 1
return counts | 6cd4aa59b7c41cc416693c3287297570b94197fe | 23,710 |
def sort_file(fh):
""" sort the contents of a file handle. """
lst = list(fh.readlines())
lst.sort()
return lst | 8a8bf189e4294414024285187c66cd303dad2768 | 23,711 |
from typing import Union
from typing import Dict
from typing import Any
def dict_to_txt(dict_val: Union[str, Dict[str, Any]]) -> str:
"""
Return string as "key:val; key2:val2" pairs from `dict_val`.
Parameters
----------
dict_val : Union[str, Dict[str, Any]]
Dict of key/val pairs
or string of single key/value
Returns
-------
str
str formatted as "key:val; key2:val2"
"""
if isinstance(dict_val, str):
if not dict_val:
return ""
if ":" in dict_val:
key, val = dict_val.split(":", maxsplit=1)
else:
key, val = dict_val, ""
return f"{key}:{val}"
if isinstance(dict_val, dict):
return "\n".join(f"{key}:{val}" for key, val in dict_val.items())
return "" | 116aeb9236466e71db5f84651d4cb36d3da05422 | 23,716 |
from pathlib import Path
from typing import TextIO
import gzip
def tsv_opener(path: Path) -> TextIO:
"""
Open a TSV (either text file or gzip-compressed text file).
Args:
path : The path to the TSV file.
"""
if path.suffix == ".gz":
fh = gzip.open(path, "rt")
else:
fh = open(path, "r")
return fh | 7e5186138f9331e27b35458dc0f33b268dc48582 | 23,717 |
def gpsWeekCheck(t):
"""Makes sure the time is in the interval [-302400 302400] seconds, which
corresponds to number of seconds in the GPS week"""
if t > 302400.:
t = t - 604800.
elif t < -302400.:
t = t + 604800.
return t | acec8cff009f8dac53363a4686d869f4d5054b8d | 23,719 |
import six
import binascii
def _uvarint(buf):
"""Reads a varint from a bytes buffer and returns the value and # bytes"""
x = 0
s = 0
for i, b_str in enumerate(buf):
if six.PY3:
b = b_str
else:
b = int(binascii.b2a_hex(b_str), 16)
if b < 0x80:
if i > 9 or (i == 9 and b > 1):
raise ValueError("Overflow")
return (x | b << s, i + 1)
x |= (b & 0x7f) << s
s += 7
return 0, 0 | 825921b72501436ca52dff498c76c43c0f5f48ca | 23,720 |
def binarize_ic50(ic50, ic50_threshold):
"""
Binarize ic50 based on a threshold
"""
if ic50 <= ic50_threshold:
return 1
return 0 | d1512f790dfad4fb3f85f4757184ceb7d21fc56a | 23,725 |
def integer(number, *args):
"""In Python 3 int() is broken.
>>> int(bytearray(b'1_0'))
Traceback (most recent call last):
...
ValueError:
"""
num = int(number, *args)
if isinstance(number, str) and '_' in number or isinstance(number, (bytes, bytearray)) and b' ' in number:
raise ValueError()
return num | e24f208db97be51ee535ad93cb795958848dd18f | 23,728 |
def middle(t):
"""Returns all but the first and last elements of t.
t: list
returns: new list
"""
return t[1:-1] | 99fd08614830e3b6d932289f95e6d38b95175fc4 | 23,730 |
def make_type_entity_id(type_id=None, entity_id=None):
"""
Assemble a type_id and entity_id and return a composite identifier.
If the entity Id is blank, ignore the supplied type id
>>> make_type_entity_id(type_id="type_id", entity_id="entity_id") == "type_id/entity_id"
True
>>> make_type_entity_id(type_id="type_id", entity_id="") == ""
True
"""
assert type_id is not None, "make_type_entity_id: no type id (%s, %s)"%(type_id, entity_id)
assert entity_id is not None, "make_type_entity_id: no entity id (%s, %s)"%(type_id, entity_id)
if entity_id != "":
return type_id + "/" + entity_id
return "" | 8de40843e2bc35431333a2ea21947e5cd91d2db2 | 23,732 |
def sublist_generator(input_list: list) -> list:
"""
Given a list generates all possible combinations
:param input_list: input list
:return: all possible combinations of lists
"""
list_of_lists = []
list_max_pos = len(input_list)+1
for initial_sublist_pos in range(list_max_pos):
for final_sublist_pos in range(initial_sublist_pos+1, list_max_pos):
list_of_lists.append(input_list[initial_sublist_pos:final_sublist_pos])
return list_of_lists | 5a2ab952ce7cae55433f364371abc1951caa29ce | 23,735 |
from typing import List
def all_one_aligned(digits: List[int]) -> bool:
"""
Checks if all the 1 are aligned from the beginning of the list.
"""
to_check: List[int] = digits[0:digits.count(1)]
return all(x == 1 for x in to_check) | 2c9a3d7e094f97a40048aa83561ff0ff5c31f40f | 23,743 |
import pathlib
import yaml
import json
def read_file(file: pathlib.Path) -> dict:
"""Read a json/yaml file without the znjson.Decoder
Parameters
----------
file: pathlib.Path
The file to read
Returns
-------
dict:
Content of the json/yaml file
"""
if file.suffix in [".yaml", ".yml"]:
with file.open("r") as f:
file_content = yaml.safe_load(f)
elif file.suffix == ".json":
file_content = json.loads(file.read_text())
else:
raise ValueError(f"File with suffix {file.suffix} is not supported")
return file_content | 9fce2b658ce26bb9c65b1f54de2d9d362fa06f57 | 23,744 |
def make_conditional(req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest) | d858e4e42f10e364617c8675ce8126fb94784c97 | 23,745 |
def validate_issue_year(iyr):
"""
Validate issue year - four digits; at least 2010 and at most 2020.
"""
if len(iyr) != 4:
return False
return 2010 <= int(iyr) <= 2020 | ef86d9a6bdda3372b83cf2ab960fa43ac17016ec | 23,748 |
def get_permutation_tuple(src, dst):
"""get_permtation_tuple(src, dst)
Parameters:
src (list): The original ordering of the axes in the tiff.
dst (list): The desired ordering of the axes in the tiff.
Returns:
result (tuple): The required permutation so the axes are ordered as desired.
"""
result = []
for i in dst:
result.append(src.index(i))
result = tuple(result)
return result | 2bdd06162b41a10e7f92e1b25f1b9e4f821670a3 | 23,752 |
import io
def latexify_results(eval_data, id_column='run_id'):
"""Take a data frame produced by `EvaluationProtocol.eval_data()` and
produce a LaTeX table of results for this method. Will use the `run_id`
column as an algorithm name (or to get algorithm names, if there's more
than one algorithm present in the given data). You can override that by
specifying the `id_column` keyword argument."""
# Each column of the LaTeX table corresponds to a particular evaluation
# environment, while each row corresponds to an algorithm. In contrast,
# each row of the given Pandas frame is represents a series of rollouts by
# one particular algorithm on one particular test configuration.
test_envs = eval_data['test_env'].unique()
col_names = [r'\textbf{%s}' % e for e in test_envs]
alg_names = eval_data[id_column].unique()
# write to buffer so we can use print()
fp = io.StringIO()
# prefix is just LaTeX table setup
print(r"\centering", file=fp)
print(r"\begin{tabular}{l@{\hspace{1em}}%s}" % ("c" * len(col_names)),
file=fp)
print(r"\toprule", file=fp)
# first line: env names
print(r'\textbf{Randomisation} & ', end='', file=fp)
print(' & '.join(col_names), end='', file=fp)
print('\\\\', file=fp)
print(r'\midrule', file=fp)
# next lines: actual results
for alg_name in alg_names:
alg_mask = eval_data[id_column] == alg_name
stat_parts = []
for env_name in test_envs:
full_mask = alg_mask & (eval_data['test_env'] == env_name)
relevant_rows = list(eval_data[full_mask].iterrows())
if len(relevant_rows) != 1:
raise ValueError(
f"got {len(relevant_rows)} rows corresponding to "
f"{id_column}={alg_name} and test_env={env_name}, but "
f"expected one (maybe IDs in column {id_column} aren't "
f"unique?)")
(_, row), = relevant_rows
std = row['std_score']
stat_parts.append(f'{row["mean_score"]:.2f} ($\\pm$ {std:.2f})')
print(r'\textbf{%s} & ' % alg_name, end='', file=fp)
print(' & '.join(stat_parts), end='', file=fp)
print('\\\\', file=fp)
print(r'\bottomrule', file=fp)
print(r'\end{tabular}', file=fp)
return fp.getvalue() | f2bb66f0a97392414bbbf3b4e449da3bd37c1954 | 23,756 |
import re
def ireplace(text, old, new, count=None):
"""
A case-insensitive replace() clone. Return a copy of text with all occurrences of substring
old replaced by new. If the optional argument count is given, only the first count
occurrences are replaced.
"""
pattern = re.compile(re.escape(old), re.IGNORECASE)
if count:
return pattern.sub(new, text, count=count)
else:
return pattern.sub(new, text) | d2ec2e4ea0a7393f79ea9223df666f1ea58730d5 | 23,757 |
def backtested_periods(backtest_statistics):
"""
Helper function for organizing column labels and their corresponding index in backtest_statistics.
:param backtest_statistics: (pd.DataFrame) Ex-post performance of efficient frontier portfolios.
:return: (dict) Dictionary of column labels as keys and their corresponding index in backtest_statistics.
"""
periods = list(backtest_statistics.keys())
columns = ['1WK', '1MO', '3MO', '6MO', '1YR', '3YR', '5YR', '10YR']
dictionary = dict(zip(columns, periods))
return dictionary | 45fabfa0ec5ccbcc6ad2f802a40ac97d94d81a80 | 23,759 |
def operatingCost(fuel, oilLube, tires, maint, H):
"""
fuel = annual fuel cost
oilLube = annual oil and lubricant costs
tires = tire cost/hour inc. maintenance
maint = maintenance and repair costs
H = Productive hours
"""
hMaint = maint/H
return {'Hourly maintenance and repair': [hMaint],
'Fuel': [fuel],
'Oil & lubricants': [oilLube],
'Tires': [tires],
'Operating cost': [fuel+hMaint+oilLube+tires]} | 559c85de88436b170b9c3a8d5ad8226323dcd562 | 23,760 |
def strategy(history, memory):
"""
If opponent defected, respond with defection. *UNLESS* we defected the turn before.
"""
opponents_last_move = history[1, -1] if history.shape[1] >= 1 else 1
our_second_last_move = history[0, -2] if history.shape[1] >= 2 else 1
choice = 1 if (opponents_last_move == 1 or our_second_last_move == 0) else 0
return choice, None | 56af54fe2ae78b389da440c10b6bc9f4560141c9 | 23,761 |
def update_position_avg_price_2way(cma_price, position_qty, trade_action, trade_direction, trade_price, trade_qty):
"""
Update position quantity and calculate average prices with a new trade.
Long/short positions are updated together, i.e. sell long == buy short.
Moving average price of current position is only updated when the position direction flips.
:param cma_price: Cumulative moving average prices of current position, either long or short.
:param position_qty: Position qty. Positive: long, negative: short.
:param trade_action: 0 - buy, 1 - sell
:param trade_direction: 0 - long, 1 -short
:param trade_price: float
:param trade_qty: int
:return: int, float, float. New position qty, average price and realized gain.
**Note**: Returned realized gain is not scaled with contract unit.
"""
if trade_action != trade_direction: # short
trade_qty *= -1
position_qty_new = position_qty + trade_qty
if position_qty_new == 0:
cma_price_new = 0.0
elif position_qty == 0 or (position_qty > 0) != (position_qty_new > 0):
cma_price_new = float(trade_price)
elif (position_qty > 0) == (trade_qty > 0):
cma_price_new = float(cma_price * position_qty + trade_price * trade_qty) / position_qty_new
else:
cma_price_new = cma_price
if position_qty != 0 and ((position_qty > 0) != (trade_qty > 0)):
realized_gain = (trade_price - cma_price) * (
2 * int(position_qty > 0) - 1) * min(abs(position_qty), abs(trade_qty))
else:
realized_gain = 0
return cma_price_new, position_qty_new, realized_gain | b4352da4a20ede5bb178d185d40fc3de43dc720d | 23,769 |
def celsius_to_fahrenheit(degrees):
"""
Given a temperature in celsius, return it in fahrenheit
:param degrees:
:return:
"""
return (degrees * 1.8) + 32. | 548d4c9a67e353c54ee8bdc17291b2e881db154e | 23,770 |
def simple_table(row, col, cell_factory):
"""
Create and return a simple table, like: [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
return [
[cell_factory(i, j) for j in range(col)]
for i in range(row)
] | 3133bc3178ab9ac53b64a4a1ef2289f1d46d2f67 | 23,771 |
def uses_requirement(requirement, field):
"""
Check if a given database field uses the specified requirement
(IS_IN_SET, IS_INT_IN_RANGE, etc)
"""
if hasattr(field.requires, "other") or requirement in str(field.requires):
if hasattr(field.requires, "other"):
if requirement in str(field.requires.other):
return True
elif requirement in str(field.requires):
return True
return False | 99286b46992de285c0b202240530f44d0143e30d | 23,778 |
from typing import Counter
def sum_all_counts(counters):
"""Sum up all the counter objects into a single one ignoring the courts.
:param counters: A dict of name-counter pairs.
:return: A counter object with counts for every year across all courts.
"""
c = Counter()
for court in counters.values():
c.update(court)
return c | 39ddf048df859ca6a99ca374074aa66cf968af17 | 23,780 |
def list_methods(interface):
"""
List all explicit methods of `interface`, hidding methods that starts with '_'
Parameters:
----------
interface : interface of CST Studio
Returns:
----------
methods : array of string
Explict methods of interface
"""
methods = []
for key in dir(interface):
method = str(key)
if not method.startswith('_'):
methods.append(method)
return methods | 34d96913cc31eeb837de3bcc4ad1a276d86ebfc7 | 23,784 |
import re
def words(text: str) -> list[str]:
"""
Splits text into an array of its words. Splits at all spaces and trims any punctuation.
Parameters
------------
text: str
The text to be split up.
Returns
------------
str[]
An array of strings (words)
"""
list_words = re.split(' |\n|\t', text) #Splits along spaces
for i in range(0, len(list_words)):
list_words[i] = re.sub(r'\.|\?|!|\,|\;|\"|\(|\)|\:|\/|\“|\”', '', list_words[i])
list_words = filter(str.strip, list_words) #Strips leading/trailing whitespaces
#and filters out 'sentences' that are only whitespace
return list(list_words) | d7a58575b7c4a7bbf802cc4e24628206e3c6b19c | 23,788 |
def build_service_catalog_parameters(parameters: dict) -> list:
"""Updates the format of the parameters to allow Service Catalog to consume them
Args:
parameters (dict): List of parameters in the format of
{"key1":"value1", "key2":"value2"}
Returns:
list: Parameters in the format of {"Key":"string", "Value":"string"}
"""
new_parameters = list()
for key, value in parameters.items():
y = dict()
y['Key'] = key
y['Value'] = value
new_parameters.append(y)
return new_parameters | 7529be4639300a88ad2f7409dd5355fb6791ba4c | 23,789 |
import math
def get_distance_charge(delivery_distance: int) -> float:
"""
This function calculates distance fee and adds it to delivery fee.
---
Args:
delivery_distance (int): distance of the delivery
Returns:
distance_charge (float): fee for additional distance
"""
first_kilometer = 1000
additional_distance = 500
additional_distance_fee = 100
minimum_fee = 200
if delivery_distance > 1000:
additional_distance = math.ceil((delivery_distance - first_kilometer) / additional_distance)
return additional_distance * additional_distance_fee + minimum_fee
return minimum_fee | bc433a2a2dead852d2d5b63723319afffc1ce97e | 23,791 |
import re
def standardize_name(name):
"""Replace invalid characters with underscores."""
return re.sub("[^0-9a-zA-Z]+", "_", name) | f7e207ed109cdc893d1ed299b2b60b187c2ffafa | 23,793 |
def _get_tile_translation(tile_grid_dict, img_size, img_overlap):
"""Calculates translation for each tile depending on their position
in a grid, their size and overlap.
Translations are returned as values in a dictionary with tile positions as keys.
Args:
tile_grid_dict (dict): Tile numbers and their respective positions in the grid.
img_size (tuple): Size of images (pixel in y- and x-direction).
img_overlap (float): Overlap between tiles in percentage.
Returns:
(dict): Tiles and their translation (x and y direction).
"""
assert len(img_size) == 2, f"img_size should be a tuple with two integers for y- and x-dimensions of a tile image."
assert (img_overlap < 1) and (img_overlap > 0), ("img_overlap should be a float thats represents tile overlap"
f"in percentage, instead got: {img_overlap}")
y_size, x_size = img_size
tile_trans = {}
# iterate over tiles and find translations in x and y direction
for tile, (row, col) in tile_grid_dict.items():
x_trans = ((col - 1) * x_size) - ((col - 1) * (x_size * img_overlap))
y_trans = ((row - 1) * y_size) - ((row - 1) * (y_size * img_overlap))
tile_trans[tile] = (x_trans, y_trans)
return tile_trans | 64d661454833776e895453c821bc6b8c5b1e835a | 23,794 |
def PF_op_pw(u, df, inverses, x):
"""
PERRON-FROBENIUS OPERATOR POINTWISE
Arguments:
- <u> a function with one argument
- <df> a function: the derivative of the dynamical system function f
(should take one arg)
- <inverses> a list of functions, each taking one argument, that find the
inverse of x under each branch of f
- <x> a float
Returns:
- a float, which is the value of PF(u) at the point x -- where PF is the
PF-operator associated to the system f.
NOTES:
- Uses a formula for the PF-operator that only works if f is piecewise
monotonic.
"""
y = 0
for inv in inverses:
z = inv(x)
y += u(z) / abs(df(z))
return y | 58c3870ef6d09d0153e1e3ea403562781280dda7 | 23,797 |
def _gzipped(stream):
"""Return True if stream is a gzip file."""
initial_pos = stream.tell()
gzip_magic = b"\x1f\x8b"
file_magic = stream.read(2)
stream.seek(initial_pos) # rewind back 2 bytes
return file_magic == gzip_magic | 1b837da396b16d10d3382cba1c804b89c5026dc4 | 23,802 |
def get_epoch_catalogues(epochs_file):
"""
Read a file which contains a list of the catalogues to be read
parameters
----------
epochs_files : str
A file which has a list of catalogues, one per line.
returns
-------
files : list
A list of filenames
"""
files = list(map(str.strip, open(epochs_file).readlines()))
return files | b64d96e823cff1ee2b5d43780e7b96ff36397784 | 23,803 |
from typing import List
def domain_param_list_from_dict(dp_dict: dict, dp_mapping: dict) -> List:
"""
Map a dict of domain parameter keys and its values to a list which has the same ordering as `dp_mapping`.
:param dp_dict: A dict assigning a value to a domain-parameter name.
:param dp_mapping: A dict assinging a domain-paramter name to an integer. The integer specifies the order in which
the domain parameter should be listed
:return: Ordered list of the `dp_dict` values based on the dp_mapping
"""
return [dp_dict[dp_mapping[val]] for val in sorted(dp_mapping.keys())] | b9666aa681229d76404b153b807ddd7bc439e0dd | 23,806 |
def repr_attributes(obj: object, *anonymous_elements: object, **named_elements: object) -> str:
"""
A simple helper function that constructs a :func:`repr` form of an object. Used widely across the library.
>>> class Aa: pass
>>> assert repr_attributes(Aa()) == 'Aa()'
>>> assert repr_attributes(Aa(), 123) == 'Aa(123)'
>>> assert repr_attributes(Aa(), foo=123) == 'Aa(foo=123)'
>>> assert repr_attributes(Aa(), 456, foo=123, bar='abc') == "Aa(456, foo=123, bar='abc')"
"""
fld = list(map(repr, anonymous_elements)) + list(f'{name}={value!r}' for name, value in named_elements.items())
return f'{type(obj).__name__}(' + ', '.join(fld) + ')' | 251efd2086a3d5ec179317cbe6d88c456be8498e | 23,807 |
import six
import string
def name_for_filesystem(input_name):
"""Generate a name 'safe' and 'clean' for filesystem usage (no spaces,
only lowercase, etc.)
:param input_name: name to use as input
:type input_name: str
:raises TypeError: if the input name is not a valid string
:return: the safe name
:rtype: str
"""
if not isinstance(input_name, six.string_types):
raise TypeError("Please provide a valid string. "
"Received: %s, %s" % (input_name, type(input_name)))
input_name = input_name.replace(" ", "_")
input_name = input_name.replace("-", "_")
letters = [
a for a in input_name
if a in string.ascii_letters or a in string.digits or a == "_"
]
return "".join(letters).lower() | 7691ed2835b18a4d9efd0380ed0d20d5a6c6bfa4 | 23,808 |
def is_probably_graphql(location: str) -> bool:
"""Detect whether it is likely that the given location is a GraphQL endpoint."""
return location.endswith(("/graphql", "/graphql/")) | 195c3bb14cc5129531cd92218f90177fbb08a900 | 23,810 |
def strip_subreddit_url(permalink):
"""
Strip a subreddit name from the subreddit's permalink.
This is used to avoid submission.subreddit.url making a seperate API call.
"""
subreddit = permalink.split('/')[4]
return '/r/{}'.format(subreddit) | e1bbc64a0607518bc8bb8f12a27439377a2fc6b7 | 23,811 |
def load_smiles_from_txt(file):
"""Load SMILES from a txt file.
Parameters
----------
file : str
Path to a txt file where each line has a SMILES string.
Returns
-------
list of str
List of SMILES
"""
smiles = []
with open(file, 'r') as f:
for line in f.readlines():
smiles.append(line.strip())
return smiles | 32fc375cddc244a750ad86b009d1019b6f131365 | 23,812 |
import math
def pack_size(value):
"""Returns the number of bytes required to represent a given value.
Args:
value (int): the natural number whose size to get
Returns:
The minimal number of bytes required to represent the given integer.
Raises:
ValueError: if ``value < 0``.
TypeError: if ``value`` is not a number.
"""
if value == 0:
return 1
elif value < 0:
raise ValueError('Expected non-negative integer.')
return int(math.log(value, 256)) + 1 | cf0a83259ce7d76d0ec1b7ac4a69b5d2a6099d92 | 23,816 |
def inr(r,s,t):
"""r is in range of s and t left inclusive"""
return (r < t) and (r >= s) | 7a75021a9b0e22c0a580fc658aafe349d34cb2a0 | 23,817 |
import inspect
def _is_class(module, member, clazz):
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
:rtype: bool
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True | bdfc269e28c08d72ad683b76e143a09c86da9d6e | 23,830 |
def verifyIsCloseEnough(number1, number2, margin = 0.05):
"""
Return true if number1 is within margin of number 2.
"""
max_diff = number2 * margin
return (abs(number1 - number2) < max_diff) | 70ea34f182f77508a7b1b10dfaeccbed09c0c239 | 23,832 |
def process_text(raw_text: str) -> list[str]:
"""
Parses each line in `raw_text` and adjusts chars as needed to get a csv format
:param raw_text: string to parse lines from.
:return: List of rows (strings) with tabular data in csv format.
"""
results = list()
text = raw_text
for c in ('\r', '-', '—'):
text = text.replace(c, '')
text = text.replace(',', '.')
lines = text.split('\n')
for line in lines:
words = line.split('\t')
csv_string = ", ".join(words)
results.append(csv_string + '\n')
return results | e6abb52f2581fedd9b69bbfd642bd26db06b466e | 23,834 |
def call_once(f):
"""Cache the result of the function, so that it's called only once"""
result = []
def wrapper(*args, **kwargs):
if len(result) == 0:
ret = f(*args, **kwargs)
result.append(ret)
return result[0]
return wrapper | 1713445b4dbc1837ed76a33aa93aeab07bedd693 | 23,836 |
import torch
from typing import Sequence
def crop(data: torch.Tensor, corner: Sequence[int], size: Sequence[int]) -> torch.Tensor:
"""
Extract crop from last dimensions of data
Args:
data: input tensor
corner: top left corner point
size: size of patch
Returns:
torch.Tensor: cropped data
"""
_slices = []
if len(corner) < data.ndim:
for i in range(data.ndim - len(corner)):
_slices.append(slice(0, data.shape[i]))
_slices = _slices + [slice(c, c + s) for c, s in zip(corner, size)]
return data[_slices] | 2f596db499e3b1d59475477e71a95e8a170242da | 23,838 |
def find_subtree_indices(doc, dependency_type):
"""
This function finds and returns the indices of the entire clause
(each token) in the subtree to be removed.
Args:
doc: spaCy Doc of the clean sentence
dependency_type:str Options are "appos", "acl", "relcl", "advcl"
Return:
indices_to_remove_subtree: list of indices of the subtree
"""
# List of indices of clause tokens to be removed in the sentence
indices_to_remove_subtree = []
# List of unique spaCy hashes for string tokens in the doc
# Position remains the same from original doc
hash_ids_of_tokens = [token.orth for token in doc]
# Iterate through the doc to get the dep clause subtree
for index, token in enumerate(doc):
# Check for dependency label
if token.dep_ == dependency_type:
# Get the indices of subtree- all tokens of the clause
for subtree_token in token.subtree:
# Get the unique hash id for the subtree token
subtree_token_id = subtree_token.orth
# Look up the token's index in the doc
subtree_token_index_in_doc = hash_ids_of_tokens.index(subtree_token_id)
# Add to list of indices to be removed
indices_to_remove_subtree.append(subtree_token_index_in_doc)
# Return list of indices
return indices_to_remove_subtree | 31c6b3c7120075f5abf2ddada2113c73177b031b | 23,840 |
def check_table_exists(connection, table_name):
"""
Returns a Boolean to tell if a certain table exists already.
"""
data = None
with connection.cursor() as cursor:
cursor.execute(
f'SELECT * '
f'FROM information_schema.tables '
f"WHERE table_schema = 'public' AND table_name = '{table_name}'"
'LIMIT 1;')
data = cursor.fetchone()
return data is not None | 3580dbe9b84d521fb9f16da7b82f400525852e23 | 23,841 |
def to_callable(obj):
"""Turn an object into a callable.
Args:
obj: This can be
* **a symbolic expression**, in which case the output callable
evaluates the expression with symbols taking values from the
callable's arguments (listed arguments named according to their
numerical index, keyword arguments named according to their
string keys),
* **a callable**, in which case the output callable is just the
input object, or
* **anything else**, in which case the output callable is a
constant function which always returns the input object.
Returns:
callable
Examples:
>>> to_callable(Symbol(0) + Symbol('x'))(3, x=4)
7
>>> to_callable(lambda x: x + 1)(10)
11
>>> to_callable(12)(3, x=4)
12
"""
if hasattr(obj, '_eval'):
return lambda *args, **kwargs: obj._eval(dict(enumerate(args), **kwargs))
elif callable(obj):
return obj
else:
return lambda *args, **kwargs: obj | 37e305726cdf409623bf764864ab1ec5989b4690 | 23,842 |
from datetime import datetime
def datetime_unserializer(d_str):
"""Convert a string representation to a datetime object for JSON unserialization.
:param d_str: the datetime string we want to unserialize
:type d_str: str
:return: the datetime unserialized to a datetime
:rtype: datetime
"""
return datetime.fromisoformat(d_str) | 5555425063a73cedff6a87b3e70ba7ea8429b01e | 23,845 |
def get_shuffle_together(config):
"""Get which parameters need to be shuffled together, including multi-step values."""
shuffle_together = config.get('shuffle_together', [])
data_multistep = config.get('rl_multistep', [])
for steps in data_multistep:
group = []
for s in range(1, steps + 1):
group.append(f'multistep_{steps}_obs_{s}')
for s in range(1, steps):
group.append(f'multistep_{steps}_act_{s}')
group.append(f'multistep_{steps}_rew_{s}')
group.append(f'multistep_{steps}_done_{s}')
shuffle_together.append(group)
return shuffle_together | 2279bcc762d99d8642f6841b7a80c3a2cb053bb3 | 23,847 |
def strip_action(action: str) -> str:
"""
removes whitespace and changes all characters to lower case
:param action: the name of the action taken on a position
:return: the input string minus the above mentioned
"""
action = action.replace(" ", "")
action = action.casefold()
return action | ef85ba082043f5f25cb3f33c73504de48e3cf530 | 23,853 |
import configparser
def get_param_value_from_conf_ini_file(conf_ini_file_path: str, section: str, param: str) -> str:
"""
Returns the value of the specified param from the specified .ini configuration file.
:param conf_ini_file_path: Path to the .ini configuration file.
:param section: Name of the section in the .ini file. For example: '[MALLET]'.
:param param: Name of the param inside that section. For example: 'SOURCE_CODE_PATH'.
:return: A str with the value specified in the .ini file for that param.
Example:
; demo-conf.ini
[MALLET]
SOURCE_CODE_PATH = /path/to/mallet
To access that value, execute:
>>> get_param_value_from_conf_ini_file('MALLET', 'SOURCE_CODE_PATH')
"""
config = configparser.ConfigParser()
config.read(conf_ini_file_path)
return config[section][param] | aa141586ca97250c0c80b12dab0eb37cb7a0be9b | 23,859 |
def check_answer(user_guess, start_a_followers, start_b_followers):
"""Take user guess and follwer count and return it they guess right"""
if start_a_followers > start_b_followers:
return user_guess == 'a'
else:
return user_guess == "b" | 023e7c9b3430608e858ce780a828e26623f93664 | 23,863 |
def _get_color(request):
"""
Get color from request
Args:
request (Request): contains info about the conversation up to this point (e.g. domain,
intent, entities, etc)
Returns:
string: resolved location entity
"""
color_entity = next((e for e in request.entities if e['type'] == 'color'), None)
return color_entity['text'] if color_entity else None | 64a48c91b01d658b905b7aa5e47c06898ff2ff0a | 23,865 |
def _npairs(n_items):
"""Return the number of pairs given n_items; corresponds to the length
of a triu matrix (diagonal included)"""
if n_items < 2:
raise ValueError("More than two items required, "
"passed {0}".format(n_items))
n_pairs = int(n_items * (n_items - 1) / 2. + n_items)
return n_pairs | fe8698a025e78d959dcaa93d440ac79cc3460245 | 23,874 |
def adjust_release_version(release_name):
"""
Adjust release_name to match the build version from the executable.
executable: 1.8.0_212-b04 release_name: jdk8u212-b04
executable: 11.0.3+7 release_name: jdk-11.0.3+7
executable: 12.0.1+12 release_name: jdk-12.0.1+12
"""
if release_name.startswith('jdk8u'):
return release_name.replace('jdk8u', '1.8.0_')
else:
return release_name[4:] | f225eef29a67b2e4bf7869ff673a53cc7c7d7869 | 23,877 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.