content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import re
def break_down(compound, forgiving=True):
"""
Breaks a string representing a chemical formula down into constituent parts.
Things in parentheses are considered one part.
Any string of a capital letter followed by lower case letters is considered to be
an irriducible element.
Any number is considered to quantify the element imediately proceeding it.
Space is ignored
Other characters raise a ValueError unless forgiving=True
Example:
>>> break_down('CH3(CH2)5CHO')
>>> {'C':2, 'H':4', 'CH2':5, 'O':1}
This function is called recursively by get_elements.
"""
parts = {}
number = ""
element = ""
subcompound = ""
nest = 0
N = len(compound)
addit = False
for i, char in enumerate(compound):
if char == "(":
if nest == 0:
addit = True
else:
subcompound += char
nest += 1
elif nest > 0:
if char == ")":
nest -= 1
if nest == 0:
element = subcompound
subcompound = ""
else:
subcompound += char
else:
if re.search("[/.0-9]", char):
number += char
elif re.search("[a-z]", char):
element += char
elif re.search("[A-Z\-\+]", char):
addit = True
elif re.search("\S", char):
print(
"Not quite sure what you're talking about, mate, when you say ",
char,
)
if not forgiving:
raise ValueError
if i == N - 1:
addit = True
# print('char = ' + char + '\nelement = ' + element + '\naddit = ' + str(addit))
if addit:
if len(number) > 0:
try:
n = int(number)
except ValueError:
n = float(number)
number = ""
else:
n = 1
if len(element) > 0:
if element in parts:
parts[element] += n
else:
parts[element] = n
if nest == 0:
element = char
if i == N - 1 and re.search("[A-Z\-\+]", char):
if element in parts:
parts[element] += 1
else:
parts[element] = 1
addit = False
return parts | d09c038423f5a89d75499c1395e31c3328c9df17 | 15,479 |
def read_row(width):
"""Reads a grid row of the specific width."""
row = list(input().strip())
if len(row) != width:
raise ValueError('wrong row width')
return row | 5b55f0a5df2c1d966b970705d5b1bbbc8a1aec12 | 15,484 |
def sum_abs_of_all_without_range(sequence):
"""
Same specification as sum_abs_of_all above,
but with a different implementation.
"""
# ------------------------------------------------------------------
# EXAMPLE 2. Iterates through a sequence of numbers, summing them.
# Same as Example 1 above, but uses the "no range" form.
# ------------------------------------------------------------------
total = 0
for number in sequence:
total = total + abs(number)
return total
# ------------------------------------------------------------------
# The above example shows how you can iterate through a sequence
# WITHOUT using a RANGE expression. This works ONLY
# ** IF you do NOT need the index variable. **
#
# You can ALWAYS use the form in Example 1 that uses RANGE;
# this form in Example 2 is just "syntactic sugar."
# Use this form if you like, but:
# -- Don't let it keep you from understanding the critical
# concept of an INDEX.
# -- Be aware of the limitation of this form.
# -- Don't confuse the two forms!
# ------------------------------------------------------------------ | 21fdb3d5d4edf6a920a3e0523b8af5948c6244f4 | 15,487 |
def get_progress_string(tag, epoch, minibatch, nbatches, cost, time,
blockchar=u'\u2588'):
"""
Generate a progress bar string.
Arguments:
tag (string): Label to print before the bar (i.e. Train, Valid, Test )
epoch (int): current epoch to display
minibatch (int): current minibatch to display
nbatches (int): total number of minibatches, used to display relative progress
cost (float): current cost value
time (float): time elapsed so far in epoch
blockchar (str, optional): character to display for each step of
progress in the bar. Defaults to u2588
(solid block)
"""
max_bar_width = 20
bar_width = int(float(minibatch) / nbatches * max_bar_width)
s = u'Epoch {:<3} [{} |{:<%s}| {:4}/{:<4} batches, {:.2f} cost, {:.2f}s]' % max_bar_width
return s.format(epoch, tag, blockchar * bar_width, minibatch, nbatches, cost, time) | 9c5a927433795426fe6466708d24b925ae37e4a5 | 15,493 |
def cell_has_code(lines):
"""Is there any code in this cell?"""
for i, line in enumerate(lines):
stripped_line = line.strip()
if stripped_line.startswith('#'):
continue
# Two consecutive blank lines?
if not stripped_line:
if i > 0 and not lines[i - 1].strip():
return False
continue
return True
return False | c7094accae4d7c1a9f8eae3aa33eda2e980993bb | 15,498 |
def clip(number,start,end):
"""Returns `number`, but makes sure it's in the range of [start..end]"""
return max(start, min(number, end)) | c2fed329f855e36c08a05cc5e72d33e743c183c4 | 15,504 |
def get_nested_value(d: dict, path: dict):
"""
Retrieves value from nested dictionary
:param d: {key: {key_2: {key_2_1: hey}, key_1: 1234}
:param path: {key: {key_2: key_2_1}} path to take through the dictionary d
:return: d[key][key_2][key_2_1]
"""
if isinstance(path, dict):
for k, v in path.items():
if k in d:
return get_nested_value(d[k], v)
else:
raise KeyError(f'Dictionary {d} does not have key {k}')
else:
return d[path] | a9eaf03cd549b6794b4ea7ffd9bb3ab853a7d35d | 15,508 |
import math
def entropia(p):
"""Entropía de una Bernoulli de parámetro p."""
if p == 0 or p == 1:
return 0
else:
return - p * math.log(p, 2) - (1 - p) * math.log(1 - p, 2) | 2ded93b09f5199fbd9d7564d45d1937735fb73eb | 15,509 |
def getIdForMember(member):
"""
Returns the member's unique id as a string.
"""
return member['@Member_Id'] | 5aca8b6413581313fffee8ca21761a23c9315e38 | 15,515 |
def binary_search(array, x):
"""
Vanilla BS algorithm
Look for x in the sorted array
return the index if found
or else return None
[8, 9, 10]
"""
# A: init the low and high values
low, high = 0, len(array) - 1
# B: search the array
while low <= high:
# find the middle value
middle = (low + high) // 2
mid_elem = array[middle]
# if found, return the index
if mid_elem == x:
return middle
# if not, divide the array into two subproblems
elif mid_elem < x: # go right
low = middle + 1
elif mid_elem > x: # go left
high = middle - 1
# C: if not found, return None
return -1 | 99fb659774efabbbd5f53c91fd47d6e106542a4c | 15,519 |
from typing import List
from typing import Dict
import json
def load_jsonl(filepath: str) -> List[Dict[str, str]]:
"""
Load data from a `.jsonl` file, i.e., a file with one dictionary per line
Args:
filepath (str): Path to `.jsonl` file.
Returns:
List[Dict[str, str]]: A list of dictionaries, one per paper.
"""
with open(filepath, "r") as f:
data = [json.loads(line) for line in f.readlines()]
return data | defed6436b4e6dd17aae7163db19a3558446fb34 | 15,521 |
def get_select_options(form):
"""
Return a dict or tags and descriptions based on the SELECT field in the
given form.
"""
tags = {}
selects = []
for input in form.inputs:
if not input.attrib.has_key('type'):
selects.append(input)
for select in selects:
tags[select.attrib['name']] = []
for child in select.getchildren():
tags[select.attrib['name']].append(child.attrib['value'])
return tags | 15773b1fd7483a71e6bae7d5d00647af788b8c8a | 15,524 |
def write_kpts_vasp(kpts):
"""
Write a list of kpoints to a vasp-formatted KPOINTS file.
"""
output = "Supercell_k-points_from_primitive_cell\n"
output += str(len(kpts))+"\n"
output += "reciprocal\n"
for i in range(len(kpts)):
for j in range(3):
output += str(kpts[i,j])+" "
output += "1.0\n"
return output | 66a0f5b5128172d64c1e3d0af55695ffb10653b3 | 15,525 |
def prettyprint_binding(binding, indent_level=0):
"""Pretty print a binding with variable id and data."""
indent = " " * indent_level
if not binding:
return indent + "<>"
return "%s<v%d : %r>" % (indent, binding.variable.id, binding.data) | e1e196afd53027bbab076585dd8b6b6464b578bc | 15,531 |
def dist(p,q):
""" Calculates the distance between the points p and q.
Parameters
---------
p : Point
the first point
q : Point
the second point
Returns
----------
Float
The distance between the points p and q.
"""
return (sum((p.array()-q.array())**2))**0.5 | 97c0beb7f8c22e59da9551e35e66a7d8e39bf667 | 15,535 |
def try_int(text, default=0):
"""Try to parse an integer but return a default if it fails."""
try:
return int(text)
except ValueError:
return default | b699bbd7209a88e1df3d2b941d581904bf30e737 | 15,536 |
from typing import IO
from typing import Any
from typing import Sequence
import struct
def read_bytes_as_tuple(f: IO[Any], fmt: str) -> Sequence[Any]:
"""Read bytes using a `struct` format string and return the unpacked data values.
Parameters
----------
f : IO[Any]
The IO stream to read bytes from.
fmt : str
A Python `struct` format string.
Returns
-------
Sequence[Any]
The unpacked data values read from the stream.
"""
data = f.read(struct.calcsize(fmt))
return struct.Struct(fmt).unpack(data) | b2622d5aad02c528163cb3318b5c70ea0510ea01 | 15,537 |
def clean_record(rec: list, field_names: list) -> dict:
"""
Parses the record supplied by the calling function and returns a dictionary
that can be transformed into a database record.
:param rec: record extracted from file
:ptype rec: list
:param field_names: field names for the record
:ptype field_names: list
:rtype: Dictionary of cleaned elements.
"""
return dict(zip(field_names, [elem.strip('~') for elem in rec])) | a76974cb8f7e12aec80404df85b90001df2a69f4 | 15,539 |
import re
def _get_changelist(perforce_str):
"""Extract change list from p4 str"""
rx = re.compile(r'Change: (\d+)')
match = rx.search(perforce_str)
if match is None:
v = 'UnknownChangelist'
else:
try:
v = int(match.group(1))
except (TypeError, IndexError):
v = "UnknownChangelist"
return v | 26b08d8ccf6251e0d3a9698147093e2bb6e89919 | 15,540 |
def parse_step_id_from_sacct(output, step_name):
"""Parse and return the step id from a sacct command
:param output: output of sacct --noheader -p
--format=jobname,jobid --job <alloc>
:type output: str
:param step_name: the name of the step to query
:type step_name: str
:return: the step_id
:rtype: str
"""
step_id = None
for line in output.split("\n"):
sacct_string = line.split("|")
if len(sacct_string) < 2:
continue
if sacct_string[0] == step_name:
step_id = sacct_string[1]
return step_id | 2ed06aba82665ad2a42c12a39c589d10258883f4 | 15,541 |
def extract_items(topitems_or_libraryitems):
"""
Extracts a sequence of items from a sequence of TopItem or
LibraryItem objects.
"""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq | 6a6918b4fc8153f4c98f5df906ce584422b3df78 | 15,544 |
def get_custom_module_description(name):
"""Return string with description for custom modules."""
return "Custom module from corpus directory ({}.py).".format(name.split(".")[1]) | 647e2df266a25b888558c34b4d425e09b0f58ca1 | 15,546 |
def _showcompatlist(
context, mapping, name, values, plural=None, separator=b' '
):
"""Return a generator that renders old-style list template
name is name of key in template map.
values is list of strings or dicts.
plural is plural of name, if not simply name + 's'.
separator is used to join values as a string
expansion works like this, given name 'foo'.
if values is empty, expand 'no_foos'.
if 'foo' not in template map, return values as a string,
joined by 'separator'.
expand 'start_foos'.
for each value, expand 'foo'. if 'last_foo' in template
map, expand it instead of 'foo' for last key.
expand 'end_foos'.
"""
if not plural:
plural = name + b's'
if not values:
noname = b'no_' + plural
if context.preload(noname):
yield context.process(noname, mapping)
return
if not context.preload(name):
if isinstance(values[0], bytes):
yield separator.join(values)
else:
for v in values:
r = dict(v)
r.update(mapping)
yield r
return
startname = b'start_' + plural
if context.preload(startname):
yield context.process(startname, mapping)
def one(v, tag=name):
vmapping = {}
try:
vmapping.update(v)
# Python 2 raises ValueError if the type of v is wrong. Python
# 3 raises TypeError.
except (AttributeError, TypeError, ValueError):
try:
# Python 2 raises ValueError trying to destructure an e.g.
# bytes. Python 3 raises TypeError.
for a, b in v:
vmapping[a] = b
except (TypeError, ValueError):
vmapping[name] = v
vmapping = context.overlaymap(mapping, vmapping)
return context.process(tag, vmapping)
lastname = b'last_' + name
if context.preload(lastname):
last = values.pop()
else:
last = None
for v in values:
yield one(v)
if last is not None:
yield one(last, tag=lastname)
endname = b'end_' + plural
if context.preload(endname):
yield context.process(endname, mapping) | a0e717c645aa00a8dd239a435cdec42593d58411 | 15,547 |
def get_pixdist_ratio(m_size, ant_rad):
"""Get the ratio between pixel number and physical distance
Returns the pixel-to-distance ratio (physical distance, in meters)
Parameters
----------
m_size : int
The number of pixels used along one-dimension for the model
(the model is assumed to be square)
ant_rad : float
The radius of the antenna trajectory during the scan, in meters
Returns
-------
pix_to_dist_ratio : float
The number of pixels per physical meter
"""
# Get the ratio between pixel and physical length
pix_to_dist_ratio = m_size / (2 * ant_rad)
return pix_to_dist_ratio | b307ee5da49b9cd92958aa6d94c4e3ffa860bf1d | 15,549 |
import textwrap
def wrap_string(data, width=40, indent=32, indentAll=False, followingHeader=None):
"""
Print a option description message in a nicely
wrapped and formatted paragraph.
followingHeader -> text that also goes on the first line
"""
data = str(data)
if len(data) > width:
lines = textwrap.wrap(textwrap.dedent(data).strip(), width=width)
if indentAll:
returnString = ' ' * indent + lines[0]
if followingHeader:
returnString += " " + followingHeader
else:
returnString = lines[0]
if followingHeader:
returnString += " " + followingHeader
i = 1
while i < len(lines):
returnString += "\n" + ' ' * indent + (lines[i]).strip()
i += 1
return returnString
else:
return data.strip() | 4e983e4116058da1fa263e65aa3516a275d89001 | 15,551 |
def initial(x, idx):
"""
Get first value of series.
"""
if idx is None:
return x.iloc[0]
return x.iloc[idx.start or 0] | 28d364329af3f579868807773efb8a074e2c3cb5 | 15,552 |
def get_duration_in_time( duration ):
"""
Calculate the duration in hh::mm::ss and return it
@param duration: timestamp from the system
@return: formatted string with readable hours, minutes and seconds
"""
seconds = int( duration % 60 )
minutes = int( (duration / 60) % 60 )
hours = int( (duration / 3600) % 24 )
output = "{:0>2}:{:0>2}:{:0>2}".format(hours, minutes, seconds)
return output | bcd6413b32183688c8b2aac428c5f9bfd5d34b8e | 15,570 |
def set_(data_type):
""" Create an alias for a SetType that contains this data type """
return frozenset([data_type]) | 3ffbe4e111506c5897793cfe423cbbe55137de53 | 15,573 |
import math
def marginal_parameter_likelihood(p: float, lambda_: float, x: int, tx: float, T: float) -> float:
"""Computes the marginal likelihood of the parameters lambda and p, given the transaction history.
See http://brucehardie.com/papers/018/fader_et_al_mksc_05.pdf equation (3).
Args:
p (float): The churn parameter ``p``.
lambda_ (float): The time interval parameter ``lambda``.
x (int): The number of events observed.
tx (float): The time of the last transaction observed.
T (float): The current time (``asof_time``).
Returns:
float: The likelihood
"""
z1 = (1 - p) ** x * lambda_**x * math.exp(-lambda_ * T)
delta = int(x > 0)
z2 = delta * p * (1 - p) ** (x - 1) * lambda_**x * math.exp(-lambda_ * tx)
return z1 + z2 | a90140259da58247b13f4ce165f035076c9816be | 15,578 |
import math
def __rotate(origin: tuple, point: tuple, angle: float):
"""
Rotates a point counterclockwise by a given angle around a given origin.
:param origin: Landmark in the (X, Y) format of the origin from which to count angle of rotation
:param point: Landmark in the (X, Y) format to be rotated
:param angle: Angle under which the point shall be rotated
:return: New landmarks (coordinates)
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy | af8acd38d07042c1ab8d9ae3fe05afb7a1fea623 | 15,582 |
def precision_ranges(result2rank, total_terms):
"""Computes precision at standard cutoff ranks: [5, 10, 15, 20, 30, 100, 200, 500, 1000]
Args:
result2rank: A dict of source to ranks of good translation candidates.
total_terms: The expected term count.
Returns:
A dict containing a precision value for each cutoff rank
"""
map_of_prec = dict()
for cutoff in [5, 10, 15, 20, 30, 100, 200, 500, 1000]:
map_of_prec[cutoff] = sum(1.0 for ranks in result2rank.values() if len([r for r in ranks if r <= cutoff]) > 0) / total_terms
return map_of_prec | 7e1c60030933530c1d1b1bd09387270174ae2aad | 15,583 |
import socket
def get_local_network(private=False, subnet=100):
"""
Returns the IP address of the local network
Defaults to the local area network. If 'private", defaults to '100' for
the ROACH network.
"""
if private:
IP = socket.gethostbyname("192.168."+str(subnet)+".1")
else:
IP = socket.gethostbyname(socket.gethostname())
return '.'.join(IP.split('.')[:-1]) | c007005bcf2e377b0c1789b5eeb43fd43c4e46e4 | 15,590 |
def make_text_objs(text, font, color):
"""
Function creates a text.
text -> string; content of text
font -> Font object; face of font
color -> tuple of color (red, green blue); colour of text
returns the surface object, rectangle object
"""
surf = font.render(text, True, color)
return surf, surf.get_rect() | d5c0a41982f6d6979063dcb24aafc186ff132f1c | 15,594 |
def get_string(element):
"""Helper for safely pulling string from XML"""
return None if element == None else element.string | ff2dbec31f9c3c91c8cf6ca08a36313839265bbd | 15,595 |
def dt_to_us_from_epoch(dt):
"""Convert datetime.datetime object to microseconds since the epoch.
:param dt: datetime.datetime object.
:returns: microseconds since the epoch as a string.
"""
return '{:.0f}'.format(dt.timestamp() * 1e6) | 60d4befd19666ad4799bf45c00a083b3443e9c82 | 15,596 |
import uuid
def high_low_2_uuid(uuid_high, uuid_low):
"""Combine high and low bits of a split UUID.
:param uuid_high: The high 64 bits of the UUID.
:type uuid_high: int
:param uuid_low: The low 64 bits of the UUID.
:type uuid_low: int
:return: The UUID.
:rtype: :py:class:`uuid.UUID`
"""
return uuid.UUID(int=(uuid_high << 64) + uuid_low) | b0c7c53bc4b61085574bcda1d5a8c616f0db8c92 | 15,605 |
def _make_pr(source_repo, source_branch, base_ref, base_url=''):
"""Create a PR JSON object."""
return {
'head': {
'repo': {
'full_name': source_repo,
},
'ref': source_branch,
},
'base': {
'ref': base_ref,
'repo': {
'clone_url': base_url,
},
},
} | d32e9748608bea7f491db39a750f64cea463b50b | 15,607 |
def _isFloat(argstr):
""" Returns True if and only if the given string represents a float. """
try:
float(argstr)
return True
except ValueError:
return False | 414e802de9557b531e881a5c8430a9a2cb295339 | 15,609 |
import struct
import re
import io
def get_size(data: bytes):
"""
Returns size of given image fragment, if possible.
Based on image_size script by Paulo Scardine: https://github.com/scardine/image_size
"""
size = len(data)
if size >= 10 and data[:6] in (b'GIF87a', b'GIF89a'):
# GIFs
w, h = struct.unpack("<HH", data[6:10])
return int(w), int(h)
if size >= 24 and data.startswith(b'\211PNG\r\n\032\n') and data[12:16] == b'IHDR':
# PNGs
w, h = struct.unpack(">LL", data[16:24])
return int(w), int(h)
if size >= 16 and data.startswith(b'\211PNG\r\n\032\n'):
# older PNGs
w, h = struct.unpack(">LL", data[8:16])
return int(w), int(h)
if size >= 30 and data[:4] == b'RIFF' and data[8:12] == b'WEBP':
# WebP
webp_type = data[12:16]
if webp_type == b'VP8 ': # Lossy WebP (old)
w, h = struct.unpack("<HH", data[26:30])
elif webp_type == b'VP8L': # Lossless WebP
bits = struct.unpack("<I", data[21:25])[0]
w = int(bits & 0x3FFF) + 1
h = int((bits >> 14) & 0x3FFF) + 1
elif webp_type == b'VP8X': # Extended WebP
w = int((data[26] << 16) | (data[25] << 8) | data[24]) + 1
h = int((data[29] << 16) | (data[28] << 8) | data[27]) + 1
else:
w = 0
h = 0
return w, h
if b'<svg' in data:
# SVG
start = data.index(b'<svg')
end = data.index(b'>', start)
svg = str(data[start:end + 1], 'utf8')
w = re.search(r'width=["\'](\d+)', svg)
h = re.search(r'height=["\'](\d+)', svg)
return int(w.group(1) if w else 0), int(h.group(1) if h else 0)
if size >= 2 and data.startswith(b'\377\330'):
# JPEG
with io.BytesIO(data) as inp:
inp.seek(0)
inp.read(2)
b = inp.read(1)
while (b and ord(b) != 0xDA):
while ord(b) != 0xFF:
b = inp.read(1)
while ord(b) == 0xFF:
b = inp.read(1)
if 0xC0 <= ord(b) <= 0xC3:
inp.read(3)
h, w = struct.unpack(">HH", inp.read(4))
return int(w), int(h)
inp.read(int(struct.unpack(">H", inp.read(2))[0]) - 2)
b = inp.read(1)
return 0, 0 | 1b45563b0f59f5670638d554821406389b5333a6 | 15,612 |
def category_grouping(data):
"""
Each of the features "TrafficType", "OperatingSystems", and "Browser"
contain categorical values with less than 1% (123) overall datapoints.
Since these "categorical outliers" could potentially skew a clustering
algorithm, we will combine each value with ten or fewer datapoints into
a single "Other" value.
Parameters:
data: The dataset in question.
Returns:
data: The transformed dataset.
"""
data['TrafficType'] = data['TrafficType'].apply(lambda x: 'Other' if x in
['7', '9', '12', '14',
'15', '16', '17', '18',
'19']
else x)
data['OperatingSystems'] = data['OperatingSystems'].apply(lambda x: 'Other'
if x in
['4', '5', '6',
'7', '8']
else x)
data['Browser'] = data['Browser'].apply(lambda x: 'Other' if x in
['3', '7', '9', '11', '12', '13']
else x)
data['VisitorType'] = data['VisitorType'].apply(lambda x: x if
x == 'Returning_Visitor'
else 'New_or_Other')
return data | 25a461688c13b8e583cc2748c252b4035c789f48 | 15,613 |
def srgb_to_linear(c):
"""Convert SRGB value of a color channel to linear value."""
assert 0 <= c <= 1
if c <= 0.03928:
return c /12.92
else:
return ((c + 0.055) / 1.055)**2.4 | 805960e67b40923608d51cab2a1915aae3d1e3ba | 15,614 |
def _df_to_html(df):
"""Converts DataFrame to HTML table with classes for formatting.
Parameters
---------------
df : pandas.DataFrame
Returns
---------------
str
HTML table for display.
"""
classes = ['table', 'table-hover']
html_raw = '<div id="config_table">{src}</div>'
src = df.to_html(index=False,
classes=classes,
justify='center',
escape=False)
html = html_raw.format(src=src)
return html | e1310947ff84178e0da32a8be4cda35d9ea16326 | 15,615 |
def separate_coords(df):
"""Separate the coordinates into a list of 'lats' and 'longs'."""
return df['coordinates'].apply(lambda x: x['latitude']), df['coordinates'].apply(lambda x: x['longitude']) | 5141431e5d1d9a2a60e31867c07b50a645d48165 | 15,623 |
import math
def calc_gps_distance(lat1, long1, lat2, long2):
"""
All calculations need to be done in radians, instead of degrees. Since most
GPS coordinates tend to use degrees, we convert to radians first, and then
use the Haversine formula. The Haversine formula gives the shortest
great-circle distance between any two points, i.e. as-the-crow-flies
distance using a reasonably focussed crow
WARNING: The calculation is done in Kilometres. But, at the street level,
kilometres is not useful. So, we convert to metres and return!
>>> calc_gps_distance(53.34376885732333,-6.240988668839767,53.34376349, \
-6.24099402)
0000.6945396560484981
>>> calc_gps_distance(53.34376885732333,-6.240988668839767,0,0)
5959609.740337647
>>> calc_gps_distance(90,0,0,0)
10007543.398
"""
radius_of_earth = 6371 # in Kilometres
delta_latitude = math.radians(lat2 - lat1)
delta_longitude = math.radians(long2 - long1)
rad_lat1 = math.radians(lat1)
rad_lat2 = math.radians(lat2)
a = math.sin(delta_latitude / 2) * math.sin(delta_latitude / 2) + \
math.cos(rad_lat1) * math.cos(rad_lat2) * math.sin(delta_longitude / 2) \
* math.sin(delta_longitude / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
distance = radius_of_earth * c
distance_in_metres = distance * 1000
return distance_in_metres | 4c38ad7a7d468b137834d87959d1c45bd00df9fb | 15,624 |
def fibonacci(n):
"""returns a list of the first n fibonacci values"""
n0 = 0
n1 = 1
fib_list = []
if type(n) != type(0) or n<=0:
raise Exception("'%s' is not a positive int" % str(n))
for i in range(n):
fib_list.append(n1)
(n0, n1) = (n1, n0+n1)
return fib_list | eb680c89d9d66647b24b5d27dbb34e1a8bb4352c | 15,629 |
def sanitize_mobile_number(number):
"""Add country code and strip leading zeroes from the phone number."""
if str(number).startswith("0"):
return "+254" + str(number).lstrip("0")
elif str(number).startswith("254"):
return "+254" + str(number).lstrip("254")
else:
return number | 8f08563f015d77722f5dec0d07686956b4f46019 | 15,630 |
def total_seconds(timedelta):
""" Some versions of python don't have the timedelta.total_seconds() method. """
if timedelta is None:
return None
return (timedelta.days * 86400) + timedelta.seconds | 800c70a2855034563ab9baf1ca12032677889f5b | 15,631 |
def find_minimal_helices(nturn_starts):
"""
Find helices on the basis of the n-turn beginnings lists
Minimal helices are defined as consecutive n-turns
"""
min_helices = {
"3-min_helices": [],
"4-min_helices": [],
"5-min_helices": []
}
for n in [3, 4, 5]:
name = str(n) + "-turn"
list_nturns = nturn_starts[name]
for i in range(len(list_nturns) - 1):
if list_nturns[i+1] == list_nturns[i] + 1:
helix_name = str(n) + "-min_helices"
min_helices[helix_name].append(list_nturns[i])
return min_helices | 2ceaa35efdd09de8b943c96e1caa70d86fcc8832 | 15,636 |
def get_authorization_key(request):
"""
Get the Authorization Key from the request
"""
auth = request.headers.get('Authorization')
if auth:
auth = auth.split()
if len(auth) == 2:
if auth[0].lower() == 'key':
return auth[1]
return None | 5cba14bdebb4b203c773c1e0832373114e554c78 | 15,639 |
def reactor_efficiency(voltage, current, theoretical_max_power):
"""Assess reactor efficiency zone.
:param voltage: voltage value (integer or float)
:param current: current value (integer or float)
:param theoretical_max_power: power that corresponds to a 100% efficiency (integer or float)
:return: str one of 'green', 'orange', 'red', or 'black'
Efficiency can be grouped into 4 bands:
1. green -> efficiency of 80% or more,
2. orange -> efficiency of less than 80% but at least 60%,
3. red -> efficiency below 60%, but still 30% or more,
4. black -> less than 30% efficient.
The percentage value is calculated as
(generated power/ theoretical max power)*100
where generated power = voltage * current
"""
generated_power = voltage * current
efficiency = (generated_power / theoretical_max_power) * 100
if efficiency >= 80:
return 'green'
if 60 <= efficiency < 80:
return 'orange'
if 30 <= efficiency < 60:
return 'red'
return 'black' | b58fe806da2bcfdabc12bd3f5b36a0b296ce7142 | 15,640 |
def is_list_with_max_len(value, length):
""" Is the list of given length or less?
:param value: The value being checked
:type value: Any
:param length: The length being checked
:type length: Nat
:return: True if the list is of the length or less, False otherwise
:rtype: bool
"""
return isinstance(value, list) and len(value) <= length | fd5016617d264b79ee4e4a0dae7782776d997fc5 | 15,642 |
def split_data_to_chunks(data: list, max_chunk_size: int, overlapping_size: int):
"""
Because GP can take very long to finish, we split data into smaller chunks and train/predict these chunks separately
:param data:
:param max_chunk_size:
:param overlapping_size:
:return: list of split data
"""
chunks = list()
n = len(data)
i = 0
while True:
next_i = min(i + max_chunk_size, n)
chunks.append(data[i:next_i])
if n <= next_i:
break
i = next_i - overlapping_size
return chunks | 370e9fe9a17c58d7dca202bb4c822f9b8b662fae | 15,644 |
def read_moves(file_name):
"""Read moves from file. Move features are pipe-separated.
Returns moves and keys as lists (every move is a dict at this point)."""
moves = []
with open(file_name, encoding='utf-8') as f:
first_line = f.readline()
first_line = first_line[1:] # remove '#' at the beginning
keys = [w.strip() for w in first_line.split('|')]
for line in f:
if line.startswith('#') or not line.strip():
continue
m = {}
vals = [v.strip() for v in line.split('|')]
for k, v in zip(keys, vals):
m[k] = eval(v)
moves.append(m)
return moves, keys | a87b73feaf7d49b3ece716fe5732f5647e5dcd25 | 15,645 |
def number_of_constituents(bc_class):
"""
Calculates the number of constituents
Args:
bc_class: The ADH simulation class that holds all simulation information
Returns:
The number of transport constituents
"""
num_trn = 0
cn = bc_class.constituent_properties
if cn.salinity:
num_trn += 1
if cn.temperature:
num_trn += 1
if cn.vorticity:
num_trn += 1
if not cn.general_constituents.empty:
num_trn += len(cn.general_constituents.index)
if not cn.sand.empty:
num_trn += len(cn.sand.index)
if not cn.clay.empty:
num_trn += len(cn.clay.index)
return num_trn | b290bc6ef6f4b02889dcc82d91120f44bff5f650 | 15,646 |
def circle_distance_circle(circle, other):
"""
Give the distance between two circles.
The circles must have members 'center', 'r', where the latest is the radius.
"""
d = abs(circle.center - other.center) - circle.r - other.r
if d < 0.0:
d = 0.0
return d | c76146d1ec9003be5345b14b12563dfb8bac7798 | 15,650 |
def safe_equals(left: object, right: object) -> bool:
"""Safely check whether two objects are equal."""
try:
return bool(left == right)
except Exception:
return False | 0ba9bb81e6b5ef8580b4677c74e82a40522d5aeb | 15,659 |
def get_minutes_remain(minutes: int) -> int:
"""
Returns minutes remaining after converting to hours
:param minutes: Total minutes before converting to hours
:return: minutes after converting to hours
"""
return minutes % 60 | abf025a83804a03d2c41b88eaac35606a5eddc4c | 15,660 |
def get_topics(bag):
"""
Get an alphabetical list of all the unique topics in the bag.
@return: sorted list of topics
@rtype: list of str
"""
return sorted(set([c.topic for c in bag._get_connections()])) | 863faf144cef064324bb0dff41a9ac70464837ee | 15,664 |
from typing import Optional
def format_ipfs_cid(path: str) -> Optional[str]:
"""Format IPFS CID properly."""
if path.startswith('Qm'):
return path
elif path.startswith('ipfs://'):
return path.replace('ipfs://', '') | eca4a79bc2ba4151495831b51bbd50df68f73025 | 15,667 |
def insertion_sort(integers):
"""Iterate over the list of integers. With each iteration,
place the new element into its sorted position by shifting
over elements to the left of the pointer until the correct
location is found for the new element.
Sorts the list in place. O(n^2) running time, O(1) space.
"""
integers_clone = list(integers)
for i in range(1, len(integers_clone)):
j = i
while integers_clone[j] < integers_clone[j - 1] and j > 0:
integers_clone[j], integers_clone[j-1] = integers_clone[j-1], integers_clone[j]
j -= 1
return integers_clone | ee7aa8920406f7c74870e346e486f29486894df1 | 15,670 |
from typing import List
def primary() -> List[str]:
"""Primary color scheme."""
return ["#00A58D", "#008A8B", "#9FCD91", "#09505D", "#00587C"] | 8527c8a649e554e077a57172dfb0d529fff4036a | 15,677 |
import warnings
def pull_halo_output(h5file, clusterID, apertureID, dataset):
"""
Function to extract a dataset from a Bahamas snapshot output.
:param h5file: The h5py File object to extract the data from
:param clusterID: The number of cluster in the order imposed by FoF
:param apertureID: int(0-22) The index of the spherical aperture centred on the CoP
:param dataset: The name of the dataset to pull
:return: None if the cluster does not exist in the file, the dataset as np.ndarray if it exists
"""
if f'halo_{clusterID:05d}' not in h5file:
warnings.warn(f"[-] Cluster {clusterID} not found in snap output.")
return None
else:
return h5file[f'halo_{clusterID:05d}/aperture{apertureID:02d}/{dataset}'][...] | d389af763c7dc7a3c6e54a1f61f4906c7fa4dc0e | 15,679 |
def create_template_dict(dbs):
""" Generate a Template which will be returned by Executor Classes """
return {db: {'keys': [], 'tables_not_found': []} for db in dbs} | 01dbd3733ec77fef0323eea35bc67064b19093c9 | 15,682 |
def get_current_application(request):
"""Get current application."""
try:
app_name = request.resolver_match.namespace
if not app_name:
app_name = "home"
except Exception as e:
app_name = "home"
return app_name | 310004714da3129cafb2bc635837920a7457fbe7 | 15,683 |
def missing_summary(df):
"""
Takes in a dataframe and
returns a summary of all
missing values.
Parameters:
-----------
df : dataframe
Dataframe to calculate the
missing summary from.
Returns:
--------
df_miss : dataframe
Missing values summary
"""
# Copy for output
df_out = df.copy()
# Create a new summary dataframe
# for each column.
df_miss = df_out.notnull().sum().reset_index()
df_miss["Missing"] = df_out.isnull().sum().values
df_miss["Percentage Missing"] = (
(df_miss["Missing"] / df_out.shape[0]) * 100
).round(1)
# Rename all the columns
df_miss.columns = ["Column", "Not-Null", "Missing", "Perc Missing (%)"]
return df_miss | 4c6bb35e9d01827667d7b5ecbbc0b1ebcc8231bb | 15,687 |
import hashlib
def checksum(file, method='sha1', chunk_size=4096):
"""Calculate the checksum of a file.
Args:
file: str, path-like, or file-like bytes object
"""
try:
fh = open(file, 'rb')
except TypeError:
fh = file
try:
h = hashlib.new(method)
while True:
chunk = fh.read(chunk_size)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
finally:
if fh != file:
fh.close() | 9638985bec0c95d2c9cd30a93bf48f93381f99de | 15,689 |
def week(make_week):
"""Fixture creating a week."""
return make_week() | 52dda51ed415d75f966e302c930cc54119005307 | 15,693 |
def pad_batch(batch, pad_id, neox_args):
"""
pads context lengths in batch with pad_id to equal neox_args.seq_length,
and returns the padded batch and the new lengths.
batch: torch.Tensor of tokens
pad_id: int, integer to use as padding token
neox_args: neox_args
"""
context_lengths = []
for tokens in batch:
context_length = len(tokens)
if context_length < neox_args.seq_length:
tokens.extend([pad_id] * (neox_args.seq_length - context_length))
context_lengths.append(context_length)
return batch, context_lengths | 4d2b4630c5a84e1eaa7e0e05a45b8cfcd0d9fae8 | 15,699 |
def reverse_edges(edges):
"""Reverses direction of dependence dict.
Parameters
----------
edges : dict
Dict of the form {a: {b, c}, b: set(), c: set()} where b and c depend
on a.
Returns
-------
Dict of the form {a: set(), b: {a}, c: {a}} where b and c depend on a.
Examples
--------
.. testcode::
from nengo.utils.graphs import reverse_edges
d = {0: {1, 2}, 1: {2, 3}, 2: set(), 3: set()}
print(reverse_edges(d))
.. testoutput::
{0: set(), 1: {0}, 2: {0, 1}, 3: {1}}
Notes
-----
dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {k: set() for k in edges}
for key in edges:
for val in edges[key]:
result[val].add(key)
return result | d0de015c2b26f6ba211009b6d7bef4c9a9750baf | 15,702 |
def find_empty_cells(board):
"""Returns the empty cells of the board."""
return [x for x in board if x in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] | b7a976f910710f7e10f1ad60804a6d7e22550da1 | 15,713 |
def is_set_nickname(string, nickname):
"""
Test if this is a nickname setting message
"""
if string.startswith(f"{nickname} set the nickname for "):
return True
if string.startswith(f"{nickname} set his own nickname to"):
return True
if string.startswith(f"{nickname} set her own nickname to"):
return True
if string.startswith(f"{nickname} set your nickname to"):
return True
return False | aec4909f3c4d3dea689383cc73d26623a16dfd85 | 15,719 |
def chunks(l, n):
"""Yield successive n-sized chunks from l.
Args:
l:``list``
list of data
n:``int``
n-sized
Return:
list of list: [[]]
"""
temp_l = []
for i in range(0, len(l), n):
temp_l.append(l[i : i + n])
return temp_l | 75e3e987bab8ea1f5f5aada5851fc0692ea69ccc | 15,723 |
def decay_every_scheduler(step, steps_per_decay, decay_factor):
"""Gives a scaling factor based on scheduling with a decay every n-steps.
Args:
step: int; Current step.
steps_per_decay: int; How often to decay.
decay_factor: float; The amount to decay.
Returns:
Scaling factor applied to the learning rate on the given step.
"""
return decay_factor**(step // steps_per_decay) | 1a7a0f333cebfbc6851111f9cd83156b72fecc3d | 15,727 |
def filter_ctrl_pert(gse_gsm_info):
"""
Filter the GSE that do not contain both control and perturbation samples
Args:
gse_gsm_info: the GSE and GSM info tuple
Returns:
True if there are both control and perturbation samples, False otherwise
"""
gse_id, gsm_info = gse_gsm_info
sample_types = gsm_info[3]
has_ctrl = has_pert = False
for sample_type in sample_types:
if has_ctrl and has_pert:
break
if sample_type == "ctrl":
has_ctrl = True
elif sample_type == "pert":
has_pert = True
return has_ctrl and has_pert | 491fd23723a026ddf68fe3a56d98e097a0732e63 | 15,728 |
def construct_base_url(bible, text):
"""Return the base URL for BIBLE and TEXT.
BIBLE is 'KJV' or 'LEB'
TEXT is 'xml' or 'json' or 'txt'
"""
base_url = 'http://api.biblia.com/v1/bible/content/'
url = base_url + bible + '.' + text
return url | aa4ab823808d186a2830f1531706919ef0afd98f | 15,739 |
def dsu_sort(idx, seq):
"""Sorts a list of tuples according to the idx column using a Decorate-Sort-Undecorate method"""
for i, e in enumerate(seq):
seq[i] = (e[idx], e)
seq.sort()
seq.reverse()
for i, e in enumerate(seq):
seq[i] = e[1]
return seq | a20f28aa10522d9653a85421104755f31861218c | 15,741 |
def _is_line_from_candidate(line: str, from_imports: list, import_str: str) -> bool:
"""
Check if line has from import
:param line: the line to check
:param from_imports: the from imports list
:param import_str: the import string
:return: True if the line has from import to replace
"""
if import_str not in line:
return False
for from_import in from_imports:
if line.strip().startswith(from_import):
return True
return False | 04e2d20985fde1a08a9090ac733aafa8c3911968 | 15,743 |
from datetime import datetime
def gen_today_file_name() -> str:
"""
generate today json filename
Returns:
today_in_history-*.json
"""
now = datetime.now().strftime('%m-%d')
file_today: str = 'today_in_history-%s.json' % now
return file_today | 37991e761021a1d5742b82359fbdf88d1d58f975 | 15,745 |
def get_valid_messages(log, expiry):
"""
Return only the messages that haven't expired.
"""
valid = []
for message in log:
try:
timestamp = int(message.get('timestamp', 0))
if timestamp > expiry:
valid.append(message)
except ValueError:
continue
return valid | d24e9bc5e6d0a2c0efd9442f0d36f6e248b51137 | 15,750 |
def decodeSurrogatePair(hi, lo):
"""Returns a scalar value that corresponds to a surrogate pair"""
return ((ord(hi) - 0xD800) * 0x400) + (ord(lo) - 0xDC00) + 0x10000 | 018eb6678052af00cd1b8fe51d58230e26c44bd8 | 15,763 |
import socket
def _get_free_port(host):
"""
Gets a free port by opening a socket, binding it, checking the assigned
port, and then closing it.
"""
s = socket.socket()
s.bind((host, 0))
port = s.getsockname()[1]
s.close()
return port | 85e08ba0a7832c5de0620634d2285bb317cde751 | 15,764 |
import hashlib
def calculate_variant_md5(chromosome, position, reference, alternate):
"""Calculate MD5 hash for a variant
Args:
chromosome (str): Chromosome
position (int): Genomic position
reference (str): Reference allele
alternate (str): Alternate allele
Returns:
str: MD5 hash for a variant
"""
key = '|'.join(list(map(str, [chromosome, position, reference, alternate])))
return hashlib.md5(key.encode('utf-8')).hexdigest() | 36a201f05a8e2a09c2e567acf02089d01462248c | 15,769 |
import torch
def _find_max_response_value(train_X, train_Y):
"""
determines best (max) response value max_X across recorded values in train_Y, together with the corresponding
X values
:param train_X (torch.tensor)
:param train_Y (torch.tensor)
:return max_X (float): the X values corresponding to max_Y
:return max_Y (float): the maximum Y value recorded
"""
idmax = train_Y.argmax().item()
max_X = torch.tensor([train_X[idmax].numpy()], dtype=torch.double)
max_Y = torch.tensor([train_Y[idmax].numpy()], dtype=torch.double)
return max_X, max_Y | 8be4afaa622f42f23859bb6ec5262d1cd2458781 | 15,772 |
import math
def n_vector(lat, lon):
"""Converts lat/long to n-vector 3D Cartesian representation."""
# Convert to radians.
if not (-90.0 <= lat <= 90):
raise ValueError("lat={:2.2f}, but must be in [-90,+90]".format(lat))
rad_lat = math.radians(lat)
if not (-180.0 <= lon <= 180):
raise ValueError("lon={:2.2f}, but must be in [-180,+180]".format(lon))
rad_lon = math.radians(lon)
x = math.cos(rad_lat) * math.cos(rad_lon)
y = math.cos(rad_lat) * math.sin(rad_lon)
z = math.sin(rad_lat)
return x, y, z | dca7be0345b7498ece4f66cceeb6e062e9cda9a0 | 15,774 |
def get_fields_for_l3_plot(product: str, model: str) -> list:
"""Return list of variables and maximum altitude for Cloudnet quicklooks.
Args:
product (str): Name of product, e.g., 'iwc'.
model (str): Name of the model, e.g., 'ecmwf'.
Returns:
list: List of wanted variables
"""
if product == "l3-iwc":
fields = [f"{model}_iwc", f"iwc_{model}"]
elif product == "l3-lwc":
fields = [f"{model}_lwc", f"lwc_{model}"]
elif product == "l3-cf":
fields = [f"{model}_cf", f"cf_V_{model}"]
else:
raise NotImplementedError
return fields | 6a757c48cfb168c86912315e95e9f70c63458c6b | 15,779 |
def get_fk_query_name(model, related_model):
"""
Format the DB column name of a foreign key field of a model
with the DB table of the model. Finds the foreign key relating to
related model automatically, but assumes that there is only one related field.
Args:
model (Model): The model for which the foreign key field is searched.
related_model (Model): A model related to `model`.
Returns:
str: The formated foreign key column name.
"""
related_field = [f for f in model._meta.get_fields()
if f.is_relation and f.concrete and f.related_model == related_model]
return '%s.%s' % (model._meta.db_table, related_field[0].column) | 4b3c145486537274a64d8675f81276ba5018975e | 15,780 |
def compute_something(a: float, b: int) -> float:
"""Sums `a` and `b`.
Args:
a: A brief explanation of `a`.
b: A brief explanation of `b`.
Returns:
float: The sum of `a` and `b`.
Notes:
The addition of an `int` and a `float` returns a `float`.
Mathematically, this performs the following operation:
.. math::
c = a + b
Warnings:
The code will not break if you pass two str.
"""
return a + b | 997d6d811d10a2d70606addc76f7e906e8f9c73d | 15,783 |
import math
def is_finite(x):
"""
Returns true if the argument is a float or int and it is not infinite or NaN
"""
try:
return not math.isinf(x)
except TypeError:
return False | 1dc4e253078f73126320a0e80658d52cceabdb07 | 15,787 |
def _get_dim_size(start, stop, step):
"""Given start, stop, and stop, calculate the number of elements
of this slice."""
assert step != 0
if step > 0:
assert start < stop
dim_size = (stop - start - 1) // step + 1
else:
assert stop < start
dim_size = (start - stop - 1) // (-step) + 1
return dim_size | 550f77162570fb6b0608b4f5640a5487dd728ea2 | 15,789 |
def compact_capitalized_geography_string(s):
""" Go from lowercase "county, state-abbrev" string to Capitalized string
Args:
s:
Returns:
Examples:
"lancaster, pa" --> "LancasterPA"
"anne arundel, md" --> "AnneArundelMD"
"st. mary's, md" --> "StMarysMD"
"""
s = s.replace(',', '').replace('.', '').replace("'", '').title().replace(' ', '')
return s[:len(s) - 1] + s[(len(s) - 1):].capitalize() | 2393e09007774b965f556af40c1e0cb969362cc2 | 15,790 |
def update_direction(direction, turn):
"""Return the directions ID after the given turn."""
answer = None
if turn == "R":
answer = (direction + 1) % 4
elif turn == "L":
answer = (direction - 1) % 4
return answer | a8ea3da50df3cbca3af2a4f42de764d1031285a6 | 15,792 |
def partition(lst, fn):
"""Partition lst by predicate.
- lst: list of items
- fn: function that returns True or False
Returns new list: [a, b], where `a` are items that passed fn test,
and `b` are items that failed fn test.
>>> def is_even(num):
... return num % 2 == 0
>>> def is_string(el):
... return isinstance(el, str)
>>> partition([1, 2, 3, 4], is_even)
[[2, 4], [1, 3]]
>>> partition(["hi", None, 6, "bye"], is_string)
[['hi', 'bye'], [None, 6]]
"""
true_list = []
false_list = []
for val in lst:
if fn(val):
true_list.append(val)
else:
false_list.append(val)
return [true_list, false_list] | 74b4a293bc13c06759a5334164d80f63651aefab | 15,794 |
import json
def createMessage(message):
"""Create a JSON string to be returned as response to requests"""
return json.dumps({"message": message}) | 3a37a494e0876af8f176338c16e81b41a27993d5 | 15,801 |
def _safe_decode(output_bytes: bytes) -> str:
"""
Decode a bytestring to Unicode with a safe fallback.
"""
try:
return output_bytes.decode(
encoding='utf-8',
errors='strict',
)
except UnicodeDecodeError:
return output_bytes.decode(
encoding='ascii',
errors='backslashreplace',
) | 0f4b3d9e04b910d0ccfa2fde12e209901cadf52c | 15,804 |
def example_batch_to_list(example_batch, num_templates):
"""Convert a single batch item in a dataset to a list of items.
Say you have a dataset where each item is shape {question: (), answer: ()}.
An example_batch will be a batched example with shape {question: (None,),
answer: (None,)}.
This will convert this example_batch to a list of examples, each with shape
{question: (), answer: ()}.
Args:
example_batch: a single batch item in a dataset
num_templates: the number of templates that are written, equal to batch size
Returns:
A list of items.
"""
return [
{k: v[i] for k, v in example_batch.items()} for i in range(num_templates)
] | c0eba5fee52ba59de2d59844810ccc58ace9a805 | 15,807 |
from typing import Sequence
def get_rank_upto(
ranks: Sequence[str], ter_rank: str, include_terminal: bool = False
) -> Sequence[str]:
"""Generates list of ranks from `ranks` terminated at `ter_rank`
Parameters
----------
ranks
List of ranks
ter_rank
Terminal rank
include_terminal
Include terminal/last rank or not
Returns
-------
list
List of ranks
"""
ret = []
tmp_ranks = list(ranks)
if ter_rank in tmp_ranks:
ter_index = (
tmp_ranks.index(ter_rank) + 1
if include_terminal
else tmp_ranks.index(ter_rank)
)
if ter_index != 0:
ret = tmp_ranks[:ter_index]
return ret | 9f5e21dbc80652c444e3f049d7e0bb40aca76203 | 15,809 |
import re
def StripColor(string):
"""Returns string with color escape codes removed."""
regex = re.compile(r'\x03(?:\d{1,2}(?:,\d{1,2})?)?', re.UNICODE)
return regex.sub('', string) | 04a253edf0842a2bda44d8c124445158588215e7 | 15,810 |
def hex_to_bin(hex_str: str, width: int = 32) -> str:
"""Converts hex string to binary string
Parameters
----------
hex_str : str
hexadecimal string to convert
width : int, optional
width of binary output (used for zero padding), default=32
Returns
-------
str
binary array as string
Raises
------
ValueError
raises ValueError if supplied width is not wide enough for binary string
"""
if len(hex_str)*4 > width:
raise ValueError(
f"Hex string of length {len(hex_str)} too large for binary array of width {width}"
)
format_str = f"{{0:0{width}b}}"
return format_str.format(int(hex_str, 16)) | 4ad8046e2cd97e04824239feb997381108d67e37 | 15,815 |
def split3(text, pat1, pat2):
"""Split text in 3 parts: before pat1, between, and after pat2."""
part1, text = text.split(pat1, 1)
part2, part3 = text.split(pat2, 1)
return part1, part2, part3 | 5b8bff3b7214a1ac4999bfdc913082a98d1ed1b7 | 15,821 |
def _GenerateEstimatorConstructor(estimator_class_name, variable_types, variable_names, extension_class_name):
"""
Generates the consructor for the estimator class.
"""
code = ["\n\npublic {0}(IHostEnvironment env".format(estimator_class_name)]
# Generate the Constructor parameters
for variable_type, variable_name in zip(variable_types, variable_names):
code.append(", {0}.TransformParameter<{1}> {2}".format(extension_class_name, variable_type, variable_name))
code.extend(
[
", string outputColumn",
")\n{"
]
)
# Generate assigning the values in the constructor
for variable_name in variable_names:
code.append("\n_{0} = {0};".format(variable_name))
# Add assignments that are always required
code.extend(
[
"\n_outputColumn = outputColumn;",
"\n_host = env.Register(nameof({0}));".format(estimator_class_name),
"\n}"
]
)
return "".join(code) | 19366e1e25befa2e0723604d31f0f59b602b9b51 | 15,829 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.