content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def getExactFreePlaceIndexForCoordinate(freePlaceMap, x, y):
"""
Returns the Exact Value for a given Coordinate on the FreePlaceMap
:param freePlaceMap: The generated FreePlaceMap
:param x: The X Coordinate on the FreePlaceMap
:param y: The Y Coordinate on the FreePlaceMap
:return: The Indexvalue on the FreePlaceMap
"""
if freePlaceMap is None or len(freePlaceMap) <= y or len(freePlaceMap[0]) <= x or x < 0 or y < 0:
return None
if freePlaceMap[y][x] != -1:
return freePlaceMap[y][x] - 1
return None | 4af9dec9163bd505f944f02db55a2dcfa80cb434 | 6,943 |
def align_frontiers_on_bars(frontiers, bars):
"""
Aligns the frontiers of segments to the closest bars (in time).
The idea is that frontiers generally occurs on downbeats,
and that realigning the estimation could improve perfomance for low tolerances scores.
Generally used for comparison with techniques which don't align their segmentation on bars.
Parameters
----------
frontiers : list of float
Time of the estimated frontiers.
bars : list of tuple of float
The bars of the signal.
Returns
-------
frontiers_on_bars : list of floats
Frontiers, realigned on bars.
"""
frontiers_on_bars = []
i = 1
for frontier in frontiers:
while i < len(bars) - 1 and bars[i][1] < frontier:
i+=1
if i == len(bars) - 1:
frontiers_on_bars.append(frontier)
else:
if bars[i][1] - frontier < frontier - bars[i][0]:
frontiers_on_bars.append(bars[i][1])
else:
frontiers_on_bars.append(bars[i][0])
return frontiers_on_bars | ef1f3d62a36065f64d31c4e4d7f6ce07045e2e5e | 6,946 |
def _high_bit(value):
"""returns index of highest bit, or -1 if value is zero or negative"""
return value.bit_length() - 1 | 1bd783593ae7d5b15cc56c8a8db5c86798fd8c9f | 6,947 |
def qual(obj):
"""
Return fully qualified name of a class.
"""
return u'{}.{}'.format(obj.__class__.__module__, obj.__class__.__name__) | 5b9779935b84a8bb3653cc9fc2c627dda5dd0e7f | 6,949 |
def default_reply(event, message):
"""Default function called to reply to bot commands."""
return event.unotice(message) | 3c83d8abaea0f4c968db25fff51185bb6c32d26e | 6,950 |
def making_change(amt: int, coins: list) -> int:
"""Iterative implementation of the making change algorithm.
:param amt (int) : Amount, in cents, to be made into change.
:param coins (list) : List of coin denominations
:return (int) : Number of different combinations of change.
"""
# calc[i] represents the number of ways to get to amount i
calc = [0] * (amt + 1)
# 1 way to get zero
calc[0] = 1
# Pick all coins one by one and update calc[] values after the
# index greater than or equal to the value of the picked coin
for coin_val in coins:
for j in range(coin_val, amt + 1):
calc[j] += calc[j - coin_val]
return calc[amt] | 188496f5db4252fa27f153d0a0379031847c669d | 6,951 |
def table_dispatch(kind, table, body):
"""Call body with table[kind] if it exists. Raise an error otherwise."""
if kind in table:
return body(table[kind])
else:
raise BaseException, "don't know how to handle a histogram of kind %s" % kind | 18d827baeabbca8d27848ea87a067328fe82d16a | 6,952 |
import base64
def _get_base64(data: str) -> str:
"""Base 64 encodes data."""
ebytes = base64.b64encode(data.encode("utf-8"))
estring = str(ebytes, "utf-8")
return estring | a7bd3080dba077077d96602eb35142db32b003de | 6,954 |
def setSortGroups(sortGroups=None):
"""
Return the sorting groups, either user defined or from the default list
"""
if sortGroups is None: # Default groups
return [('-inf', '+inf'), ('-inf', 100), (101, '+inf')]
else:
sortGroups.insert(0, ('-inf', '+inf'))
return sortGroups | f2e8cff00fe70627e81dcc0ce576f56e4d289228 | 6,955 |
def NOT_TENSOR_FILTER(arg_value):
"""Only keeps a value if it is not a Tensor or SparseTensor."""
return not arg_value.is_tensor and not arg_value.is_sparse_tensor | 14eb28c1824f58bd7ef6ad1da96922891114fe5a | 6,959 |
def tapisize(fieldKeyName):
"""Transforms a string into a Tapis query parameter
"""
return fieldKeyName.lower() | cc8032a6cc9e822193430134bb33da8aef74cf06 | 6,963 |
def calculate_average_resolution(sizes):
"""Returns the average dimensions for a list of resolution tuples."""
count = len(sizes)
horizontal = sum([x[0] for x in sizes]) / count
vertical = sum([x[1] for x in sizes]) / count
return (horizontal, vertical) | 06dac1834989df96ce7bff88c435dd4067bfccbd | 6,964 |
def get_distance(m, M, Av=0):
"""
calculate distance [in pc] from extinction-corrected magnitude
using the equation: d=10**((m-M+5-Av)/5)
Note: m-M=5*log10(d)-5+Av
see http://astronomy.swin.edu.au/cosmos/I/Interstellar+Reddening
Parameters
---------
m : apparent magnitude
M : absolute magnitude
Av : extinction (in V band)
"""
assert (m is not None) & (str(m) != "nan")
assert (M is not None) & (str(M) != "nan")
distance = 10 ** (0.2 * (m - M + 5 - Av))
return distance | b4773065d7cf1bc793400ac344c4ca7a580f8567 | 6,967 |
import json
def load_base_models_json(filename="base_models.json"):
"""Load base models json to allow selecting pre-trained model.
Args:
filename (str) - filename for the json file with pre-trained models
Returns:
base_models - python dict version of JSON key-value pairs
"""
with open(filename) as json_file:
base_models = json.load(json_file)
return base_models | c17f123e192b94e6f87938bca10822ea785e2d91 | 6,969 |
import json
def load_data_from_json(jsonfile):
"""Load the data contained in a .json file and return the corresponding Python object.
:param jsonfile: The path to the .json file
:type jsonfile: str
:rtype: list or dict
"""
jsondata = open(jsonfile).read()
data = json.loads(jsondata)
return data | f0f7a0620be8ffcd15a57fd561dda8525866faa3 | 6,971 |
def get_living_neighbors(i, j, generation):
"""
returns living neighbors around the cell
"""
living_neighbors = 0 # count for living neighbors
neighbors = [(i-1, j), (i+1, j), (i, j-1), (i, j+1),
(i-1, j+1), (i-1, j-1), (i+1, j+1), (i+1, j-1)]
for k, l in neighbors:
if 0 <= k < len(generation) and 0 <= l < len(generation[0]):
if generation[k][l] == 1:
living_neighbors += 1
return living_neighbors | 437229b8152c3b2ce5b90ef6ddef83daa5c24a85 | 6,979 |
def StrToList(val):
""" Takes a string and makes it into a list of ints (<= 8 bits each)"""
return [ord(c) for c in val] | 79ee38dc4952b677896a77379c3cccca8f74eb2c | 6,980 |
def set(data,c):
"""
Set Data to a Constant
Parameters:
* data Array of spectral data.
* c Constant to set data to (may be complex)
"""
data[...,:]=c
return data | cff2592b3973bbd3f9a1a4dbaa6d6ba4b99260bc | 6,983 |
import pickle
def load(directory):
"""Loads pkl file from directory"""
with open(directory, 'rb') as f:
data = pickle.load(f)
return data | d500c6f717535ee95f452abd435be4d8688a59a4 | 6,988 |
def get_seconds(time_string):
"""
Convert e.g. 1m5.928s to seconds
"""
minutes = float(time_string.split("m")[0])
seconds = float(time_string.split("m")[1].split("s")[0])
return minutes * 60.0 + seconds | 5a729d24ab6c437fca536cae8ac3d34a45bb9054 | 6,990 |
def _make_filetags(attributes, default_filetag = None):
"""Helper function for rendering RPM spec file tags, like
```
%attr(0755, root, root) %dir
```
"""
template = "%attr({mode}, {user}, {group}) {supplied_filetag}"
mode = attributes.get("mode", "-")
user = attributes.get("user", "-")
group = attributes.get("group", "-")
supplied_filetag = attributes.get("rpm_filetag", default_filetag)
return template.format(
mode = mode,
user = user,
group = group,
supplied_filetag = supplied_filetag or "",
) | 56898ec1fc974721150b7e1055be3ab3782754a2 | 6,996 |
def split_game_path(path):
"""Split a game path into individual components."""
# filter out empty parts that are caused by double slashes
return [p for p in path.split('/') if p] | 9b939058aa7f8b3371d3e37b0252a5a01dba4e7b | 6,998 |
def is_numeric(value: str):
"""Return True if given value is a number"""
return value.isdigit() | fe61469ab388534a17d079590591378f87078cd3 | 7,001 |
def generate_file_path(package_path, file_name):
"""
Dynamically generate full path to file, including filename and extension.
:param package_path: (array) ordered list of packages in path to test file
:param file_name: (string) name of the file w/ test, including the extension
:return: (string) full path to file, including filename and extension
"""
file_path = ""
for package in package_path:
file_path += package + "/"
return file_path + file_name | a6d2ac12cdc726c4727e23301971e921cab9455b | 7,012 |
import hashlib
def sha1(string):
"""Compute the sha1 hexdigest of the string."""
return hashlib.sha1(string.encode('utf-8')).hexdigest() | b663fc501e24a2331f69847024756b97dabc0cd4 | 7,016 |
import pytz
from datetime import datetime
def now(timezone):
"""Get the current time in the given timezone
Args:
timezone: The desired timezone as a string. eg 'US/Eastern'
"""
utc = pytz.timezone('UTC').localize(datetime.utcnow())
return utc.astimezone(pytz.timezone(timezone)) | ebd89601ebcb945f01c3e68fbe0f5350e4fc2d0a | 7,017 |
def ReadBlackList(path):
"""Read a blacklist of forbidden directories and files.
Ignore lines starting with a # so we can comment the datafile.
Args:
path: file to load the blacklist from.
Returns:
dictionary of path:True mappings
"""
blacklist_file = open(path, 'r')
catalog = []
for entry in blacklist_file:
if not entry or entry[:1] == '#':
pass # ignore comment and empty lines in blacklist file
else:
catalog.append(entry.strip())
return catalog | 694b9bd8c09385677d49e8563ac8f08b923cadb0 | 7,018 |
def _scalePoints(points, scale=1, convertToInteger=True):
"""
Scale points and optionally convert them to integers.
"""
if convertToInteger:
points = [
(int(round(x * scale)), int(round(y * scale)))
for (x, y) in points
]
else:
points = [(x * scale, y * scale) for (x, y) in points]
return points | 3ce3fedfbf7c428386af1571cc1a770bd9f66018 | 7,022 |
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker) | 4fd02292c1d7be672de9ccc926f5880c7b831503 | 7,023 |
def _get_port(config):
"""Get the server's port from configuration."""
if not config.has_option("server", "port"):
return None
port = config.getint("server", "port")
return port | bee579fcfc82ea80c593dc7bd93ff3d39e63ef7b | 7,033 |
def robust_scale(df):
"""Return copy of `df` scaled by (df - df.median()) / MAD(df) where MAD is a function returning the median absolute deviation."""
median_subtracted = df - df.median()
mad = median_subtracted.abs().median()
return median_subtracted/mad | ba9ce747612c99997d890930e7ac7c582ba1af70 | 7,043 |
def _jsarr(x):
"""Return a string that would work for a javascript array"""
return "[" + ", ".join(['"{}"'.format(i) for i in x]) + "]" | 9c9b6df65bf4c01fa1c321445bbb7c86c6d28c5a | 7,044 |
def join_kwargs(**kwargs) -> str:
"""
Joins keyword arguments and their values in parenthesis.
Example: key1{value1}_key2{value2}
"""
return "_".join(key + "{" + value + "}" for key, value in kwargs.items()) | 3054573ec51676bb8d93e2fcabd4cb5097e4b897 | 7,046 |
def extract_column_names(row_list):
"""
Extract names of columns from row list obtained from table csv. The first row contains all row names
:param row_list: List of all rows in csv used for table creation
:return: List of names present in table csv
"""
return row_list[0] | 2adef82a7f583c262922ad28aa0de47b8b9b5e51 | 7,049 |
def index_to_point(index, origin, spacing):
"""Transform voxel indices to image data point coordinates."""
x = origin[0] + index[0] * spacing[0]
y = origin[1] + index[1] * spacing[1]
z = origin[2] + index[2] * spacing[2]
return (x, y, z) | 072f1ad5d1adc1e81d4771725475f6a07f32f3ce | 7,050 |
def MakeEmptyTable(in_table=[[]], row_count=0, column_count=0):
"""
1 Read in *in_table*
2 Create an empty table
of '' values,
which has with the same number of rows and columns as the table,
(where columns based on the first row).
3 If the user has specified *row_count* and/or *column_count*,
then these will be used place of the dimensions of *in_table*.
Therefore *in_table* can be omitted from input if BOTH row_count and
column_count are set.
e.g. MakeEmptyTable(row_count=1000, column_count=200)
And if *in_table* is used, then the other 2 inputs are optional.
e.g. MakeEmptyTable(CSVdata, row_count=1000)
will use column_count of CSVdata but row_count as 1000
Args
in_table: <type 'list'> 2D table as list of lists, which may contain
table data in rows and columns.
e.g. [['abc','def','hij'],
['232','965','TES']
['235','768','QWE']]
row_count: <type 'int'> number of empty rows to create.
If this is not set, then in_table row count will be used.
column_count: <type 'int'> number of empty columns to create in each
row.
If this is not set, then column count of *in_table* first
row will be used.
"""
if not row_count:
row_count = len(in_table) # length of table
if not column_count:
column_count = len(in_table[0]) # length of first row of table
row_contents = [""] * column_count # repeat '' for X columns
blank_table = [row_contents] * row_count # repeat blank row for Y rows
return blank_table | 55b32c2914cb8e2194999e1b8ba2211373ef1bcb | 7,051 |
import math, random
def split_list(data, splits={'train': 8, 'test': 2}, shuffle=True, seed=0):
"""
Split the list according to a given ratio
Args:
data (list): a list of data to split
splits (dict): a dictionary specifying the ratio of splits
shuffle (bool): shuffle the list before
seed (int): random seed used for shuffling
Returns:
a dictionary of the splitted list
"""
data = data.copy() # work on a copy of the oridinal list
n_tot = len(data)
split_tot = float(sum([v for v in splits.values()]))
n_split = {k:math.ceil(n_tot*v/split_tot) for k,v in splits.items()}
if shuffle:
random.seed(seed)
random.shuffle(data)
splitted = {}
cnt = 0
for k, v in n_split.items():
splitted[k] = data[cnt:cnt+v]
cnt += v
return splitted | d9b25512e666a03ec2b589850c47a45231b279a0 | 7,053 |
def constraint_wrapper(fun, constraint):
"""
Wrap a function such that it's first argument is constrained
"""
def inner(x, *args, **kwargs):
"""the wrapped function"""
return fun(constraint(x), *args, **kwargs)
return inner | f9055fe2cd269e4586c545bfa951bdf2ba0677c1 | 7,056 |
def can_cast(value, class_type):
"""
Check if the value can be cast to the class_type, used in the parse tcl string function for tcl expressions like
[Ada inputs 0] or [Ada alias robotblur]
Args:
value (object): The object we're attempting to cast.
class_type (class): The class we're attempting to cast to.
Returns:
bool: If the value can be successfully cast
"""
try:
class_type(value)
return True
except ValueError:
return False | 85c415d2eaadb16a532e209110c1fd0f778cb681 | 7,058 |
def build(argmap, data):
"""Builds an array of arguments from the provided map and data.
The argmap must consist of a mapping of keys to argbuilder functions.
keys in the argmap are indexed into data, and if they are present, the
corresponding values are passed to the corresponding argmap function. The
argmap function returns with one or more array elements to be added to
the returned array.
"""
args = []
for name, fn in argmap.iteritems():
if name in data:
args += fn(data[name])
return args | d9c6c9eede6d6a9ae36fea77dceb3c70cbfbdbbd | 7,059 |
from typing import List
def getTestFragments() -> List[str]:
"""
Returns a small list of testing fragments.
>>> len(getTestFragments())
5
"""
return ['tttttt', 'ttttgg', 'tggaga', 'agacgc', 'cgcggg'] | 4bd66b0a0c90df0d20f3d66d9b789a31419e63f6 | 7,060 |
def get_forecast_metadata_variables(ds):
"""
Returns a list of variables that represent forecast reference time
metadata.
:param netCDF4.Dataset ds: An open netCDF4 Dataset.
:rtype: list
"""
forecast_metadata_standard_names = {
"forecast_period",
"forecast_reference_time",
}
forecast_metadata_variables = []
for varname in ds.variables:
standard_name = getattr(ds.variables[varname], "standard_name", None)
if standard_name in forecast_metadata_standard_names:
forecast_metadata_variables.append(varname)
return forecast_metadata_variables | 83b8fe0eb785c1a3129ec19df680ce135cd3fa82 | 7,062 |
def feet(i):
"""
feet(i)
Return i (in inches) converted to feet.
"""
return i / 12 | bff46c1399aabee1f589dea98c9c43ced30d0756 | 7,063 |
def thcf_partial1(x):
""" Partial derivative of the Three-Hump Camel Function with respect to x1.
"""
partial = x[1] + 4 * x[0] - 4.2 * (x[0] ** 3) + (x[0] ** 5)
return partial # Gradient 1 | cccc2505978cc49fffa0a445157ba5fdaf0abc30 | 7,067 |
def leftmostNonzeroEntries(M):
"""Returns the leftmost nonzero entries of M."""
return [ abs(M[l][M.nonzero_positions_in_row(l)[0]])
for l in range(0,M.dimensions()[0])
if M.nonzero_positions_in_row(l) != [] ] | 9e42297dc3000a41dcdceebff10c4fc53e1709ac | 7,068 |
def is_sequence(arg):
"""Returns True is passed arg is a list or a tuple"""
return isinstance(arg, list) or isinstance(arg, tuple) | 12a0a0186695f8b79a48905a22c0c1c69cde219f | 7,069 |
def super_reduced_string(s):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/reduced-string/problem
Steve has a string of lowercase characters in range ascii[‘a’..’z’]. He wants to reduce the string to its shortest
length by doing a series of operations. In each operation he selects a pair of adjacent lowercase letters that
match, and he deletes them. For instance, the string aab could be shortened to b in one operation.
Steve’s task is to delete as many characters as possible using this method and print the resulting string. If the
final string is empty, print Empty String
Args:
s (str): String to reduce
Returns:
str: the reduced string, or "Empty String" if it's empty
"""
cur_string = s
while True:
found = False
for i in range(1, len(cur_string)):
if cur_string[i-1] == cur_string[i]:
found = True
cur_string = cur_string.replace(cur_string[i]*2, "", 1)
break
if not found:
break
if not cur_string:
cur_string = "Empty String"
return cur_string | 09c48f38a877ff9ae92b985bd793224bd81247c8 | 7,071 |
def rotate_tour(tour, start=0):
"""
Rotate a tour so that it starts at the given ``start`` index. This is
equivalent to rotate the input list to the left.
Parameters
----------
tour: list
The input tour
start: int, optional (default=0)
New start index for the tour
Returns
-------
rotated: list
The rotated tour
"""
idx = tour.index(start)
if idx != 0:
rotated = tour[idx:] + tour[:idx]
else:
rotated = tour
return rotated | b2356aaecb00dc993e88b5f7105c0b4aed495521 | 7,075 |
def filter_polygons(state, header):
"""
Removes any non-polygon sources from the state file.
We are only interested in parsing parcel data, which is
marked as Polygon in the state file.
"""
filtered_state = []
for source in state:
if 'Polygon' in source[header.index('geometry type')]:
filtered_state.append(source)
return filtered_state | d100e6a4e87dccdc42c7217dc1e793e4353237e2 | 7,081 |
import unicodedata
def unicode_normalize(text):
"""Return the given text normalized to Unicode NFKC."""
normalized_text = unicodedata.normalize('NFKC', text)
return normalized_text | 1b6defacd09665412a1b31dd48f5291c4984d044 | 7,083 |
import importlib
def import_obj(clsname, default_module=None):
"""
Import the object given by clsname.
If default_module is specified, import from this module.
"""
if default_module is not None:
if not clsname.startswith(default_module + '.'):
clsname = '{0}.{1}'.format(default_module, clsname)
mod, clsname = clsname.rsplit('.', 1)
mod = importlib.import_module(mod)
try:
obj = getattr(mod, clsname)
except AttributeError:
raise ImportError('Cannot import {0} from {1}'.format(clsname, mod))
return obj | 8cb064348b7b38e1e3f659240f4bc9677237d3fd | 7,084 |
import json
def loads_json(fid, **kwargs):
""" Loads a JSON file and returns it as a dict.
:param path: String or another object that is accepted by json.loads
:param kwargs: See ``json.dump()``.
:return: Content of the JSON file.
"""
assert isinstance(fid, str), fid
return json.loads(fid, **kwargs) | ccdb62568982f6a0862dd5c8167b2e1d177137ea | 7,090 |
def average_precision_at_k(targets, ranked_predictions, k=None):
"""Computes AP@k given targets and ranked predictions."""
if k:
ranked_predictions = ranked_predictions[:k]
score = 0.0
hits = 0.0
for i, pred in enumerate(ranked_predictions):
if pred in targets and pred not in ranked_predictions[:i]:
hits += 1.0
score += hits / (i + 1.0)
divisor = min(len(targets), k) if k else len(targets)
return score / divisor | 57e21a1ca8b8f7fccc0b5c59dbfc799d8618fc93 | 7,095 |
def _PureShape(shape):
"""Make sure shape does not contain int tensors by calling int()."""
return [int(x) for x in shape] | 1cebacd516cbf223833342ecffb6f8d9fe22ff2d | 7,096 |
def char_to_number(value: str, required_type: str) -> str:
"""Converts the string representation of a number (int or float) to a number
Args:
value (str): String representation of a number
required_type (str): Output type desired (bigint or double)
Raises:
Exception: The conversion to a python integer (using int built in method) failed
Exception: The conversion to a python float (using float built in method) failed
NotImplementedError: required_type was neither bigint nor double
Returns:
str: Numerical representation of the input string
"""
value = value.strip("'").strip('"') # Remove potential character markers
if required_type == "bigint":
try:
assert f"{int(value)}" == value # Make sure the str representation does not change
return value
except (TypeError, AssertionError, ValueError):
msg = (
f"A 'bigint' type is expected by Presto for this function, but {value} "
"was provided which either cannot be casted or does not seem to represent an integer."
)
raise Exception(msg)
elif required_type == "double":
try:
assert f"{float(value)}" == value
return value
except (TypeError, AssertionError, ValueError):
msg = (
f"A 'double' type is expected by Presto for this function, but {value} "
"was provided which either cannot be casted or does not seem to represent a float."
)
raise Exception(msg)
else:
raise NotImplementedError | cb58f24100f961d4f260473cee4fa82621eb5887 | 7,097 |
def _validate_index_string(index_string: str) -> bool:
"""
Handle validation of index string from the argument.
a string should be in format 't{%d}' or 'r{%d}' or 'd{%d}'
Parameters:
index_string: the string we want to validate
Returns:
a boolean indicating whether the index string is valid
"""
if len(index_string) < 2:
print("Invalid index string length!")
return False
elif index_string[0] != 't' and index_string[0] != 'r' and index_string[0] != 'd':
print("Invalid index string prefix!")
return False
elif not index_string[1:].isnumeric():
print("Index need to have a number suffix!")
return False
else:
return True | aa6d7d0aba2a0378d2e1c836b78b0ec943cc1bdb | 7,098 |
def onset_by_rain(date, df, window=5, rain_threshold=5):
"""
Finds true storm onset by finding the first date around the landfall that rain exceeds a threshold
Args:
date: the date to look around
df: df with a date and rain column
window: number of days around date to find max (total window size is window*2)
rain_threshold: mm of rain to consider a storm to have started
Returns:
a datetime object and the corresponding index
"""
mask = df['Date'] == date
storm_row = df[mask]
storm_ind = int(storm_row.index[0])
sub_df = df.iloc[(storm_ind - window):(storm_ind + window)]
if sub_df.Rain.dropna().empty: # if there's no rain data
return date, storm_ind
ind = sub_df.Rain.idxmax()
val = df.Rain.iloc[ind]
while val > rain_threshold:
ind -= 1
val = df.Rain.iloc[ind]
# ind += 1
return df['Date'].iloc[ind], ind | 87e4d1f35114974a004c5b923aea05ed835cf9a7 | 7,100 |
def get_odds_labels(nfunc, adfam=False):
"""Labels used for odds in results_df."""
if adfam:
col_names = [r'$P(T={},N={})$'.format(1, i + 1)
for i in range(nfunc)]
col_names += [r'$P(T={},N={})$'.format(2, i + 1)
for i in range(nfunc)]
col_names += [r'$P(T=1)$']
else:
col_names = [r'$P(N={})$'.format(i + 1) for
i in range(nfunc)]
return col_names | ca874d9da52dfe49625305cde62ef7489439eb00 | 7,104 |
def get_validation_context(self):
"""
Retrieves the validation context.
:rtype: String
:return: The validation context.
"""
return self.validation_context | b025b742a6fd5a537752f897eb8ed88ed56e5a21 | 7,105 |
def subfolders_in(whole_path):
"""
Returns all subfolders in a path, in order
>>> subfolders_in('/')
['/']
>>> subfolders_in('/this/is/a/path')
['/this', '/this/is', '/this/is/a', '/this/is/a/path']
>>> subfolders_in('this/is/a/path')
['this', 'this/is', 'this/is/a', 'this/is/a/path']
"""
path_fragments = whole_path.lstrip('/').split('/')
if whole_path.startswith('/'):
path_fragments[0] = '/' + path_fragments[0]
path = path_fragments[0]
subfolders = [path]
for fragment in path_fragments[1:]:
path += '/' + fragment
subfolders.append(path)
return subfolders | a7389811a8acacea87abd55ba47892203e0b95e5 | 7,112 |
def getfrom(v):
"""
pass through function for using the
filter_for decorator directly
"""
return v | 7a01fecbac63bca67fef10bfb39f8641e0cacda7 | 7,131 |
def parse_employee_info(record_list):
""" Parses the employee record information
Example input:
[('3c7ca263-9383-4d61-b507-2c8bd367567f', 123456, 'Austin', 'Grover', <memory at 0x11059def0>, True, 1,
'00000000-0000-0000-0000-000000000000', datetime.datetime(2020, 2, 23, 16, 53, 25, 531305))]
Args:
Employee Record list
Returns:
list of dictionary records containing employee information.
"""
data_list = []
for record in record_list:
user_password = memoryview(record[4]).tobytes().decode("utf-8")
data_record = {
"id": record[0],
"employeeid": record[1],
"firstname": record[2],
"lastname": record[3],
"password": user_password,
"active": record[5],
"classification": record[6],
"managerid": record[7],
"createdon": record[8],
}
data_list.append(data_record)
return data_list | a0c4e9fb57bc452119f362525e9c16f516911922 | 7,139 |
import math
def gas_release_rate(P1, P2, rho, k, CD, area):
"""
Gas massflow (kg/s) trough a hole at critical (sonic) or subcritical
flow conditions. The formula is based on Yellow Book equation 2.22.
Methods for the calculation of physical effects, CPR 14E, van den Bosch and Weterings (Eds.), 1996
Parameters
----------
P1 : float
Upstream pressure
P2 : float
Downstream pressure
rho : float
Fluid density
k : float
Ideal gas k (Cp/Cv)
CD : float
Coefficient of discharge
are : float
Orifice area
Returns
----------
: float
Gas release rate / mass flow of discharge
"""
if P1 > P2:
if P1 / P2 > ((k + 1) / 2) ** ((k) / (k - 1)):
flow_coef = 1
else:
flow_coef = (
2
/ (k - 1)
* (((k + 1) / 2) ** ((k + 1) / (k - 1)))
* ((P2 / P1) ** (2 / k))
* (1 - (P2 / P1) ** ((k - 1) / k))
)
return (
math.sqrt(flow_coef)
* CD
* area
* math.sqrt(rho * P1 * k * (2 / (k + 1)) ** ((k + 1) / (k - 1)))
)
else:
return 0 | 8749b85457e3f9e24e08f2a9f5f059066e3518b8 | 7,140 |
def email2words(email):
"""Return a slightly obfuscated version of the email address.
Replaces @ with ' at ', and . with ' dot '.
"""
return email.replace('@', ' at ').replace('.', ' dot ') | cd8dff104ace7eaad00164ba1161d1c49ce4a0e3 | 7,141 |
from typing import Union
from typing import Callable
def _get_callable_str(*, callable_: Union[Callable, str]) -> str:
"""
Get a callable string (label).
Parameters
----------
callable_ : Callable or str
Target function or method or property or dunder method name.
Returns
-------
callable_str : str
A callable string (label).
"""
if isinstance(callable_, str):
callable_str: str = callable_
else:
callable_str = callable_.__name__
return callable_str | 2b968e3f5ff79701e6f63bb75548ecb228ec5ed7 | 7,143 |
import re
def to_mb(s):
"""Simple function to convert `disk_quota` or `memory` attribute string
values into MB integer values.
"""
if s is None:
return s
if s.endswith('M'):
return int(re.sub('M$', '', s))
elif s.endswith('G'):
return int(re.sub('G$', '', s)) * 1000
return 512 | 870f276552ef90bbd5034551ea8ade0f5160491b | 7,145 |
def length_left(lumber):
"""
Convenience function for calculating the length left in a piece of lumber
:param lumber: a piece of Lumber
:return: length remaining
"""
return lumber.length_left() | 57dfd5e160abdc086759dd41df013181f1217f9d | 7,146 |
def find_matching_paren_pair(s):
"""
Find the first matching pair of parentheses and return their positions
"""
paren_level = -1
open_pos = 0
for i in range(0, len(s)):
if s[i] == "(":
paren_level += 1
if paren_level == 0:
open_pos = i
elif s[i] == ")":
if paren_level == 0:
return (open_pos, i)
paren_level -= 1
raise SyntaxError("Unterminated list '{val}'".format(val=s)) | c50ce61ca96f1babb951d2c1051461be8633d783 | 7,147 |
def IsMerge(op):
"""Return true if `op` is a Merge."""
return op.type == "Merge" or op.type == "RefMerge" | 8b5c7373cd698d23bd1b0df78a5986dded8960ec | 7,148 |
import re
def fraction_to_word(aText, fractions):
"""Spell out fractions written with a '/'. """
aText = re.sub(r"1/2", fractions[0], aText)
aText = re.sub(r"1/3", fractions[1], aText)
aText = re.sub(r"2/3", fractions[2], aText)
aText = re.sub(r"1/4", fractions[3], aText)
aText = re.sub(r"3/4", fractions[4], aText)
aText = re.sub(r"1/5", fractions[5], aText)
return aText | 15a99870f2161a354aa69fadc484435b9d37d477 | 7,154 |
def create_message_context_properties(message_type, message_id, source, identifier, is_cloud_event_format) -> dict:
"""Create message context properties dict from input param values."""
return {
'type': message_type,
'message_id': message_id,
'source': source,
'identifier': identifier,
'is_cloud_event_format': is_cloud_event_format
} | c5edd78abb5e584089456f4d50a656ce46a7666c | 7,155 |
import pathlib
def exists(path: str) -> bool:
"""Checks if path exists
e.g
j.sals.fs.exists("/home/rafy/testing_make_dir/test1") -> True
j.sals.fs.exists("/home/rafy/testing_make_dir/fasdljd") -> False
Args:
path (str): path to check for existence
Returns:
bool: True if exists
"""
return pathlib.Path(path).exists() | a3b3717c947656042d3ddcf9e1107d3f6ec9c06d | 7,161 |
def extract_file_number(file_name):
""" Extract the file number from a file name """
file_name = str(file_name)
dot_contents = file_name.split('.')
hyp_contents = dot_contents[0].split('-')
base_name = hyp_contents[len(hyp_contents) - 1]
return int(base_name[:-1]) | 0b29ba7e75ddfcdc31832641d51f5f0a507021b0 | 7,164 |
def add_address(x, y):
"""Returns a string representation of the sum of the two parameters.
x is a hex string address that can be converted to an int.
y is an int.
"""
return "{0:08X}".format(int(x, 16) + y) | 3e6fef3d5de0216c68c980b24d9f1ab05bc0a043 | 7,165 |
def extract_fcp_data(raw_data, status):
"""
extract data from smcli System_WWPN_Query output.
Input:
raw data returned from smcli
Output:
data extracted would be like:
'status:Free \n
fcp_dev_no:1D2F\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn': 'NONE'\n
status:Free\n
fcp_dev_no:1D29\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn:NONE
"""
raw_data = raw_data.split('\n')
# clear blank lines
data = []
for i in raw_data:
i = i.strip(' \n')
if i == '':
continue
else:
data.append(i)
# process data into one list of dicts
results = []
for i in range(0, len(data), 5):
temp = data[i + 1].split(':')[-1].strip()
# only return results match the status
if temp.lower() == status.lower():
results.extend(data[i:i + 5])
return '\n'.join(results) | bbe9de1f075fa68a4130c44afea9d388b1a678d5 | 7,167 |
import json
def load_data(data):
""" Wrapper to load json data, to be compatible with Python3.
Returns: JSON data
Keyword arguments:
data: might be bytes or str
"""
if type(data) == bytes:
return json.loads(data.decode("utf-8"))
else:
return json.loads(data) | c33c661c2a42d162d06c3e17487e072908fd0bf4 | 7,170 |
def indices_for(df, nprocs):
"""
group rows in dataframe and assign each group to each process
Args:
df: Pandas dataframe object
nprocs: number of processes used
Returns:
indeces grouped to each process
"""
N = df.shape[0]
L = int(N / nprocs)
indices = []
for i in range(nprocs):
for j in range(L):
indices.append(i)
for i in range(N - (nprocs * L)):
indices.append(nprocs - 1)
return indices | c68408e6fcf70b885ca86fb80f8c31b0bd07e334 | 7,172 |
from typing import Tuple
from typing import List
from typing import Dict
from typing import Any
def info() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
"""
Get input and output schemas
:return: OpenAPI specifications. Each specification is assigned as (input / output)
"""
input_sample = [
{
'name': "a",
'type': "string",
'required': True,
'example': "1"
},
{
'name': "b",
'type': "string",
'example': "2"
}
]
output_sample = [
{
'name': "integer",
'type': "string",
'required': True,
'example': '42'
}
]
return input_sample, output_sample | 282f60cacac69865736c58712c4bfbdb2f5e2c24 | 7,180 |
def build_table_separator(outer_left='╠', inner='╬', outer_right='╣'):
"""
Builds formatted unicode header for the stopwatch table.
:return: formatted unicode header
"""
return f"{outer_left}{'═' * 14}{inner}{'═' * 18}{inner}{'═' * 20}{outer_right}" | 22d9aef799e2245a072e9032b9d8dcc2f138386e | 7,186 |
def _descend_namespace(caller_globals, name):
"""
Given a globals dictionary, and a name of the form "a.b.c.d", recursively
walk the globals expanding caller_globals['a']['b']['c']['d'] returning
the result. Raises an exception (IndexError) on failure.
"""
names = name.split('.')
cur = caller_globals
for i in names:
if type(cur) is dict:
cur = cur[i]
else:
cur = getattr(cur, i)
return cur | 111a5277907b60d3f27f037868ac8c84e390e159 | 7,191 |
def _get_extension_point_url_from_name(domain, category, pluggable_name):
"""Get the extension point URL based on a pluggable method name"""
return '{}/{}/{}'.format(domain, category, pluggable_name).replace('//', '/') | a4a34409dac26e42d123c4fda66ddc0947134e00 | 7,192 |
def get_feature(feature_name,
example):
"""Gets Tensorflow feature by name.
Args:
feature_name: The name of the feature.
example: A Tensorflow example.
Returns:
The Tensorflow feature with the given feature name in the example.
Raises:
ValueError: If the given feature name is not in the Tensorflow example.
"""
if feature_name in example.features.feature:
return example.features.feature[feature_name]
else:
raise ValueError('Feature name {} is not in the example {}'.format(
feature_name, example)) | 80e35d7e1fe15e7a455123cbd139139dd977f216 | 7,193 |
def esc(code: int) -> str:
"""
Converts the integer code to an ANSI escape sequence
:param code: code
:return: escape sequence
"""
return f"\033[{code}m" | 6bdc0679ba9b480220bc088bd09d6356dd539f1f | 7,196 |
def get_paper_data(paper_name):
"""Gets paper code and paper type based on paper name"""
split = paper_name.split("_")
code = split[-1]
paper_type = split[-2]
if len(split) == 5:
code = split[-2] + split[-3]
paper_type = split[-1]
elif len(split) == 3:
code = "00"
paper_type = split[-1]
return code, paper_type | 7f5804241a3f97b2c0ea534a4f030841e7c86e3b | 7,199 |
from typing import List
def str_class(s: str) -> str:
"""Expand a string of character ranges.
Example: str_class("a-cx0-9") = "abcx0123456789"
"""
i = 0
n = len(s)
ret: List[str] = []
while i < n:
if i + 2 < n and s[i+1] == '-':
start = ord(s[i])
end = ord(s[i+2])
if start <= end:
ret.extend(chr(c) for c in range(start, end + 1))
else:
ret.extend(chr(c) for c in range(start, end - 1, -1))
i += 3
else:
ret.append(s[i])
i += 1
return ''.join(ret) | 21bbd7b46a964f20377b4f98b203b6bf0ad210c2 | 7,202 |
import math
def is_primes(n) :
"""return 'True' if 'n' is a prime number. False otherwise"""
b =[]
if n==1:
return False # 1 is not a prime
if n==2:
return True
if n > 2 and n % 2 ==0:
return False
max_divisor = math.floor(math.sqrt(n))
for i in range (3, 1+max_divisor,2):
if n % i == 0 :
return False
return True | 8054efd19b2e6a3b0e1de896865ae7e36e1d9125 | 7,206 |
def GetStatus(issue):
"""Get the status of an issue, whether it is explicit or derived."""
return issue.status or issue.derived_status or '' | 4f51142dc4e55adaa27eaf3f3e7e748a49d45df2 | 7,207 |
def set_safe_attr(instance, attr, val):
"""Sets the attribute in a thread safe manner.
Returns if new val was set on attribute.
If attr already had the value then False.
"""
if not instance or not attr:
return False
old_val = getattr(instance, attr, None)
if val is None and old_val is None:
return False
elif val == old_val:
return False
else:
setattr(instance, attr, val)
return True | 92f657a8e8919b47db6f38a31d4c1cad5bea4c93 | 7,208 |
import collections
def ParseIoStatsLine(line):
"""Parses a line of io stats into a IoStats named tuple."""
# Field definitions: http://www.kernel.org/doc/Documentation/iostats.txt
IoStats = collections.namedtuple('IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
fields = line.split()
return IoStats._make([fields[2]] + [int(f) for f in fields[3:]]) | 2764dc96f0720359f906f1b27763738331f63e19 | 7,213 |
def get_pixel_neighbors(height, width):
"""
Estimate the 4 neighbors of every pixel in an image
:param height: image height
:param width: image width
:return: pixel index - neighbor index lists
"""
pix_id = []
neighbor_id = []
for i in range(height):
for j in range(width):
n = []
if i == 0:
n = n + [(i + 1) * width + j]
elif i == height - 1:
n = n + [(i - 1) * width + j]
else:
n = n + [(i + 1) * width + j, (i - 1) * width + j]
if j == 0:
n = n + [i * width + j + 1]
elif j == width - 1:
n = n + [i * width + j - 1]
else:
n = n + [i * width + j + 1, i * width + j - 1]
for k in n:
pix_id.append(i*width+j)
neighbor_id.append(k)
return pix_id, neighbor_id | 3e49081fdc59ff2b0df54b84e0cf8c5983ec7b2c | 7,214 |
from typing import Counter
def check_valid_solution(solution, graph):
"""Check that the solution is valid: every path is visited exactly once."""
expected = Counter(
i for (i, _) in graph.iter_starts_with_index()
if i < graph.get_disjoint(i)
)
actual = Counter(
min(i, graph.get_disjoint(i))
for i in solution
)
difference = Counter(expected)
difference.subtract(actual)
difference = {k: v for k, v in difference.items() if v != 0}
if difference:
print('Solution is not valid!'
'Difference in node counts (expected - actual): {}'.format(difference))
return False
return True | ec22134973153605b3a9b7a2ac7f180ffe55f97e | 7,216 |
def _memoized_fibonacci_aux(n: int, memo: dict) -> int:
"""Auxiliary function of memoized_fibonacci."""
if n == 0 or n == 1:
return n
if n not in memo:
memo[n] = _memoized_fibonacci_aux(n - 1, memo) + \
_memoized_fibonacci_aux(n - 2, memo)
return memo[n] | 9a6d8646139d6ae9f6f63d2e990545fd088407eb | 7,218 |
def pad_sequences(sequences, pad_func, maxlen = None):
"""
Similar to keras.preprocessing.sequence.pad_sequence but using Sample as higher level
abstraction.
pad_func is a pad class generator.
"""
ret = []
# Determine the maxlen
max_value = max(map(len, sequences))
if maxlen is None:
maxlen = max_value
# Pad / truncate (done this way to deal with np.array)
for sequence in sequences:
cur_seq = list(sequence[:maxlen])
cur_seq.extend([pad_func()] * (maxlen - len(sequence)))
ret.append(cur_seq)
return ret | 5879ab8f8df7477b9d73c87b6c8065fcc43a66df | 7,222 |
import re
def clean_text(text: str, remove_punctuation=False) -> str:
"""Cleans the inputted text based on the rules given in the comments.
Code taken from: https://github.com/kk7nc/Text_Classification/
Args:
text (str): the text to be cleaned
remove_punctuation (bool): whether to remove punctuation or not
Returns:
the cleaned text
"""
rules = [
{r">\s+": u">"}, # remove spaces after a tag opens or closes
{r"\s+": u" "}, # replace consecutive spaces
{r"\s*<br\s*/?>\s*": u"\n"}, # newline after a <br>
{r"</(div)\s*>\s*": u"\n"}, # newline after </p> and </div> and <h1/>...
{r"</(p|h\d)\s*>\s*": u"\n\n"}, # newline after </p> and </div> and <h1/>...
{r"<head>.*<\s*(/head|body)[^>]*>": u""}, # remove <head> to </head>
{r'<a\s+href="([^"]+)"[^>]*>.*</a>': r"\1"}, # show links instead of texts
{r"[ \t]*<[^<]*?/?>": u""}, # remove remaining tags
{r"^\s+": u""}, # remove spaces at the beginning
]
if remove_punctuation:
rules.append({r"[.,\/#!$%\^&\*;:{}=\-_`~()]": u""})
for rule in rules:
for (k, v) in rule.items():
regex = re.compile(k)
text = regex.sub(v, text)
text = text.rstrip()
return text.lower() | 0f666041724315696924808c335c0110b7ffc158 | 7,224 |
import webbrowser
def openURL(url):
"""Opens a URL."""
webbrowser.open_new(url)
return True | b2e843a49ddfb4b90e556f4edbaa4e20823f3097 | 7,225 |
def largest_nonadjacent_sum(arr):
"""
Find the largest sum of non-adjacent numbers
"""
before_last = 0
last = 0
for elt in arr:
cur = before_last + elt
before_last = max(before_last, last)
last = max(cur, last)
return last | 39d4ab307e978f4ab9d1bd5a2983292dde3f6933 | 7,227 |
import six
def get_train_random_forest_pai_cmd(model_name, data_table, model_attrs,
feature_column_names, label_name):
"""Get a command to submit a KMeans training task to PAI
Args:
model_name: model name on PAI
data_table: input data table name
model_attrs: model attributes for KMeans
feature_column_names: names of feature columns
label_name: name of the label column
Returns:
A string which is a PAI cmd
"""
# default use numTrees = 1
tree_num = model_attrs.get("tree_num", 1)
assert isinstance(tree_num, six.integer_types), \
"tree_num must be an integer"
feature_cols = ",".join(feature_column_names)
return '''pai -name randomforests -DinputTableName="%s" -DmodelName="%s"
-DlabelColName="%s" -DfeatureColNames="%s" -DtreeNum="%d"''' % (
data_table, model_name, label_name, feature_cols, tree_num) | f826e0b24613b2ea8794524c3a5f982131f9a048 | 7,231 |
import math
def xy(n, image_size):
"""Returns position of pixel n in 2D array"""
x = int(n % image_size)
y = int(math.floor(n / image_size))
return (x,y) | def36d60055e5084b42d73833c4baeeab9723085 | 7,233 |
from typing import Type
from typing import TypeVar
from typing import get_args
def _contains_unbound_typevar(t: Type) -> bool:
"""Recursively check if `t` or any types contained by `t` is a `TypeVar`.
Examples where we return `True`: `T`, `Optional[T]`, `Tuple[Optional[T], ...]`, ...
Examples where we return `False`: `int`, `Optional[str]`, ...
:param t: Type to evaluate.
:return: `True` if the input type contains an unbound `TypeVar`, `False` otherwise.
"""
# Check self
if isinstance(t, TypeVar):
return True
# Check children
for arg in get_args(t):
if _contains_unbound_typevar(arg):
return True
return False | 97e85b3aafea1d9dc86f69078ff39c93b6bd7c19 | 7,240 |
def create_sample_db(ntables):
"""
Create a python description of a sample database
"""
rv = {}
for i in range(1, ntables + 1):
rv["table%s" % i] = {
"columns": [
{
"name": "id",
"type": "integer",
"use_sequence": "table%s_id_seq" % i,
},
{"name": "data", "type": "text"},
],
}
return rv | c49f583b4e7f58f1bbb7ad05902c1fc9010bd35c | 7,242 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.