content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def user_exists(cursor,username):
"""
Test whether a user exists, from its username
Parameters:
==========
cursor: Psycopg2 cursor
Cursor of type RealDict in the postgres database
username: Str
Name of the user
"""
SQL = "SELECT count(*) AS nb FROM users WHERE username=%s"
cursor.execute(SQL,[username])
res = bool(cursor.fetchone()['nb'])
return(res) | c221cbb6dd3c99d1eacfc88c8f2161276680b938 | 17,646 |
def modify_leaves(f, ds):
"""Walks a data structure composed of nested lists/tuples/dicts, and
creates a new (equivalent) structure in which the leaves of the data
structure (i.e the non-list/tuple/dict data structures in the tree) have
been mapped through f."""
if isinstance(ds, list):
return [modify_leaves(f, sds) for sds in ds]
if isinstance(ds, tuple):
return tuple(modify_leaves(f, sds) for sds in ds)
if isinstance(ds, dict):
return {k: modify_leaves(f, sds) for k, sds in ds.items()}
return f(ds) | 54df919ef7ebf94a70a603e321fbe6b47b68b7c1 | 17,648 |
def netmask_to_cidr(netmask):
"""
Convert netmask to cidr format
Notes:
code from https://stackoverflow.com/a/43885814
Args:
netmask (string): netmask address
Returns:
int: cidr value
"""
return sum([bin(int(x)).count('1') for x in netmask.split('.')]) | 613c90a814d30d01494203d06a4b7ae231f19311 | 17,649 |
from typing import List
def lines_to_list(lines: str) -> List[str]:
""" Transform multi-line input to a list. """
return lines.splitlines(keepends=False) | 35b3d803066525f645c45702ec1773470aec964f | 17,651 |
def energy_to_wavelength(energy: float):
"""Conversion from photon energy (eV) to photon wavelength (angstroms)"""
return 1.2398 / energy * 1e4 | 19c3b56852546330a6343d47fce742556b40e2ed | 17,652 |
import re
def to_camel(s):
"""Convert string s from `snake_case` to `camelCase`"""
return re.sub(r"(?!^)_([a-zA-Z])", lambda m: m.group(1).upper(), s) | 28ac46b93230fb567321adf940e0fd37131b4903 | 17,661 |
def parse_data(data: str) -> list:
"""
Takes a data input string, splits and returns the components.
Example:
Input: "1-3 a: abcde"
Output: [1, 3, "a", "abcde"}
"""
reqs, text = [i.strip() for i in data.split(":")]
req_occurances, req_letter = reqs.split()
req_low, req_high = req_occurances.split("-")
return [
int(req_low),
int(req_high),
req_letter,
text,
] | d0f974877292a11ef968edf05ca41afdbc7ae5e5 | 17,662 |
def orientation(p, q, r):
"""
Finds the orientation of an ordered set of vertices(p, q, r).
p: First vertex represented as a tuple.
q: Second vertex represented as a tuple.
r: Third vertex represented as a tuple.
returns:
0 : Collinear points
1 : Clockwise points
2 : Counterclockwise
"""
val = ((q[1] - p[1]) *(r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1]))
if val == 0:
#Collinear
return 0
if val > 0:
# Clock
return 1
else:
# Counterclock
return 2 | 3651c0b6eee3550c427ad03d229d947337e7eed1 | 17,670 |
def L1(y_output, y_input):
""" L1 Loss Function calculates the sum of the absolute difference between the predicted and the input"""
return sum(abs(y_input - y_output)) | d1b53f4aa2a47b2fe3179c657e6337dcf951a980 | 17,671 |
from pathlib import Path
def get_dataset_file(working_directory, resource_name, resource_extension):
"""
:param working_directory: the directory containing the the requested resource
:param resource_name: the name of the resource file in the directory
:param resource_extension: the file extension of the requested resource file. Please note that the extension param
must not begin with "." character as it gets already considered in the function
:return: an object of `pathlib.PosixPath` which can be directly opened
"""
return Path(working_directory, resource_name + "." + resource_extension) | 6a08a04940e0d2e169c8b89f47e14a9fcd646d35 | 17,672 |
def expand_test_result_df(df_test):
"""Adds columns to a DataFrame with test results
Args:
df_test DataFrame as produced by trainer.ModelTrainer, i.e. with columns
'tp', 'fp', 'fn', 'correct', 'total_examples' and 'examples_above_threshold'
Returns:
the input DataFrame with additional columns for 'precision', 'recall',
'acc'uracy, 'f1' measure and 'coverage' percentage.
"""
#print('type of df_test', str(type(df_test)))
#print('keys in df_test', df_test.keys())
df = df_test
epsilon = 0.00001 # avoid division by zero
df['precision'] = df['tp'] / (df['tp'] + df['fp'] + epsilon)
df['recall'] = df['tp'] / (df['tp'] + df['fn'] + epsilon)
df['acc'] = df['correct'] / df['examples_above_threshold']
df['f1'] = 2*df['tp'] / (2*df['tp'] + df['fp'] + df['fn'] + epsilon)
df['coverage'] = df['examples_above_threshold']/ (df['total_examples'] + epsilon)
return df | dea64054f8fb372d9777b6bdc9b0064843bbd459 | 17,673 |
def is_image_file(filename):
""" Check if given file is image file or not
Parameters
-------
filename: str
input file path
Returns
-------
img_flag: bool
flag for image
"""
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
'.ppm', '.PPM',
'.bmp', '.BMP',
'tif', 'TIF', 'tiff', 'TIFF',
]
img_flag = any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
return img_flag | 3ddb6589a421b91e52cacb3b0bae75c1d682d981 | 17,674 |
def check_collision(board, shape, offset):
"""
See if the matrix stored in the shape will intersect anything
on the board based on the offset. Offset is an (x, y) coordinate.
"""
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
if cell and board[cy + off_y][cx + off_x]:
return True
return False | f66e1739abf334c7b90ba5c129d18d67c4978e81 | 17,675 |
def dstport_to_icmptc(dstport):
"""
Destination port to ICMP type- and code - definition taken from
https://www.erg.abdn.ac.uk/users/gorry/course/inet-pages/icmp-code.html
https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
Args:
dstport `int` destination port number
Return:
`int` icmp type, `int` icmp code
"""
return int(dstport / 256), dstport % 256 | 9a1697431205a18468ef06edec7f86f5056f9419 | 17,681 |
def _serp_query_string(parse_result):
"""
Some search engines contain the search keyword in the fragment so we
build a version of a query string that contains the query string and
the fragment.
:param parse_result: A URL.
:type parse_result: :class:`urlparse.ParseResult`
"""
query = parse_result.query
if parse_result.fragment != '':
query = '{}&{}'.format(query, parse_result.fragment)
return query | 277a1de9409384e5868f9cd8a473da7950e22d2d | 17,683 |
def get_destination_filename(path, prefix="t", overwrite=False):
"""
Get the output file name.
:param pathlib.Path path: Destination path
:param str prefix: prefix of filename.
:param bool overwrite: if True then
:return: file name for output
:rtype: str
"""
if overwrite:
name = path.name
else:
name = "-".join([prefix, path.name])
return name | af57d27b04fa1040fa0584ca7e6250101a32d77f | 17,687 |
from typing import AsyncGenerator
def mock_subscriber(topic_event) -> AsyncGenerator:
"""A mock subscriber."""
async def _f():
yield topic_event
return _f() | 25b65cddb2dfba1ff655e9675e6af4353bd40260 | 17,690 |
def filter_count_matrix(count_matrix, n_reads=50000, n_cells=2, cpm_thresh=0.5):
"""
Remove poorly sequenced cells and genes with low occurrence.
Filter cells if the total aligned read counts falls below a provided
threshodl. Filter genes with both low counts-per-million and low occurrence
among cells.
Args:
count_matrix (pd.DataFrame): raw read-count matrix.
n_reads (int): minimum number of read counts a cell must have to avoid
filtering. Default is 50,000.
n_cells (int, optional): minimum number of cells required to exhibit
the minimum expression level to avoid filtering. Default is 2.
cpm_thresh (float, optional): minimum counts-per-million in lowly
mapped genes. Default is 0.5
Returns:
(pd.DataFrame): filtered dataframe.
References:
Cell Filtering:
Rizzetto, S., Eltahla, A. A., Lin, P., Bull, R., Lloyd, A. R., Ho,
J. W. K., … Luciani, F. (2017). Impact of sequencing depth and read
length on single cell RNA sequencing data of T cells.
Scientific Reports, 7(1), 12781.
https://doi.org/10.1038/s41598-017-12989-x
https://www.nature.com/articles/s41598-017-12989-x
Gene Filtering:
Chen Y, Lun ATL and Smyth GK. From reads to genes to pathways:
differential expression analysis of RNA-Seq experiments using
Rsubread and the edgeR quasi-likelihood pipeline
[version 2; referees: 5 approved]. F1000Research 2016, 5:1438
(doi: 10.12688/f1000research.8987.2)
https://f1000research.com/articles/5-1438/v2
"""
# drop cells with low coverage
cell_counts = count_matrix.sum()
bad_cells = cell_counts.index.values[cell_counts < n_reads]
count_matrix.drop(bad_cells, axis=1, inplace=True)
# drop genes with low expression and low occurrence
cpm = count_matrix.apply(lambda x: x / cell_counts[x.name] * 10**6,
axis=0)
low_genes = cpm.apply(lambda x: sum(x > cpm_thresh) < n_cells, axis=1)
low_genes = low_genes.index.values[low_genes]
return count_matrix.drop(labels=low_genes, axis=0) | 66a9f36389afb2b7014a049e52a33c982593a124 | 17,692 |
def clean_dict(dictionary):
"""
Returns a new but cleaned dictionary.
* Keys with None type values are removed
* Keys with empty string values are removed
This function is designed so we only return useful data
"""
newdict = dict(dictionary)
for key in dictionary.keys():
if dictionary.get(key) is None:
del newdict[key]
if dictionary[key] == "":
del newdict[key]
return newdict | c48f8894b554b7c3b5f8601a9ee7edc9964989e1 | 17,693 |
def luhn_checksum(s: str) -> int:
"""Compute Luhn's check digit over a string of digits"""
LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7,
9) # sum_of_digits (index * 2)
evens = sum(int(p) for p in s[-1::-2])
odds = sum(LUHN_ODD_LOOKUP[int(p)] for p in s[-2::-2])
return (evens + odds) % 10 | d28705e84d211282d842e3ea1c8be71b3fb88be8 | 17,694 |
def print_pretty_dict(d):
"""
Create a dictionary like a table in CLI. One line by key.
The first line (header) is Wikipedia Language Name
:param d: The dictionary
:return: A pretty dictionary for your CLI
"""
d = dict(sorted(d.items(), key=lambda item: item[0]))
pretty_dict = "{:<10} {:<10}\n".format('Wikipedia', 'Language Name')
for key, value in d.items():
pretty_dict += "{:<10} {:<10}\n".format(key, value)
return pretty_dict | 41c5ae87d4d0d441f367cf74aad06b0450c6e212 | 17,698 |
def markersdates(markers):
"""returns the list of dates for a list of markers
"""
return [m[4] for m in markers] | 108b3f903eb9ca8f9d510821703c5a7704cddc5a | 17,701 |
from tqdm import tqdm
def tqdm_wrap(iterable):
"""Use tqdm if it is installed."""
try:
return tqdm(iterable)
except ModuleNotFoundError:
return iterable | a7ac31102d75b58105378cf43657f1dbe2f8592e | 17,703 |
def kmer_coverage(C, L, k):
"""Converts contig coverage to kmer coverage
Args:
* C: Coverage
* L: Template length
* k: hash length
"""
return C * (L - k + 1) / float(L) | b422b92e1c4269e24f43d88844383d56406a189b | 17,707 |
def box(request):
"""
Parametrized fixture determining whether/how to transform fill_value.
Since fill_value is defined on a per-test basis, the actual transformation
(based on this fixture) is executed in _check_promote.
Returns
-------
boxed : Boolean
Whether fill_value should be wrapped in an np.array.
box_dtype : dtype
The dtype to pass to np.array([fill_value], dtype=box_dtype). If None,
then this is passed on unmodified, and corresponds to the numpy default
dtype for the given fill_value.
* (True, None) # fill_value wrapped in array with default dtype
* (True, object) # fill_value wrapped in array with object dtype
* (False, None) # fill_value passed on as scalar
"""
return request.param | b2faac79c6d78fa36342457bde0dcb4204022ab0 | 17,708 |
def interface(host):
"""Return an IP address for a client connection given the server host.
If the server is listening on '0.0.0.0' (INADDR_ANY)
or '::' (IN6ADDR_ANY), this will return the proper localhost.
"""
if host == '0.0.0.0':
# INADDR_ANY, which should respond on localhost.
return '127.0.0.1'
if host == '::':
# IN6ADDR_ANY, which should respond on localhost.
return '::1'
return host | 868ae36fdc45af5e64e53ed2222c18d04fc0c9c6 | 17,709 |
import posixpath
def split_path(path):
"""Convert PATH to (parent-path, name), unless it is None.
"""
return posixpath.split(path) if path is not None else None | 2a1aed6ed265ec8fee20375a0db41f9d9f94df35 | 17,712 |
import hashlib
def hash_large_file(file, chunk_size=1280000000, digest=True):
"""fast hashing of file
see https://stackoverflow.com/a/1131238/5172579
"""
with open(file, "rb") as f:
file_hash = hashlib.sha1()
chunk = f.read(chunk_size)
while chunk:
file_hash.update(chunk)
chunk = f.read(chunk_size)
if digest:
return file_hash.hexdigest()
return file_hash | 7603d2464e48fac70bc74cd3af8c5748ebeb2760 | 17,719 |
from pathlib import Path
def path_exists(filename: str) -> bool:
"""
Checks for the given file path exists.
"""
return Path(filename).exists() | 689c20e3417923e3538a62e35c861d2fd0cc11c5 | 17,721 |
def checker(z):
"""
:param z: a 2d array of complex type.
For each entry in z, it lies in an unique unit square with left-bottom corner
at lattice site (m, n) where (m, n) are both integers. Return a bool array of
the same shape with its corresponding entry is True if m, n have the same parity
else it's False.
"""
fx = ((z.real / 2) % 1) * 2 - 1
fy = ((z.imag / 2) % 1) * 2 - 1
return fx * fy > 0 | 3c5e17272441c5d33380ee217e1e518de67199b2 | 17,723 |
def compute_daily_returns(df):
"""Compute and return the daily return values."""
daily_returns = df.copy() # copy given dataframe to match size and column names
# compute daily returns starting from row 1 as no data present for day before day 0
# another way to do this is: (df[1:]/df[:-1].values) - 1
daily_returns[1:] = (df/df.shift(1)) - 1
daily_returns.iloc[0, :] = 0 # set daily returns for row 0 to 0
return daily_returns | 2b73673dab8bcb0dee4e8a890d720ca933b8981d | 17,724 |
def _literal_to_r_str(value):
"""Convert a python value to a corresponding R string.
>>> _literal_to_r_str(True)
"TRUE"
>>> _literal_to_r_str(6)
"8"
>>> _literal_to_r_str("test")
"'test'"
"""
_literal_to_str = {True: "TRUE", False: "FALSE", None: "NULL"}
try:
return _literal_to_str[value]
except KeyError:
# quote a string
if isinstance(value, str):
return "'{}'".format(value)
else:
return str(value) | 6fc6445fb4295458973dd977363536f741ab05f1 | 17,725 |
def drop_quasi_zero(df, thresh=0.05):
"""
Drop Quasi Zero Features
Returns a passed pandas DataFrame without columns containing too few
non-zero values.
Parameters
----------
df : pandas DataFrame
Dataset whose columns will be dropped.
thresh : float, optional
Minimum percentage of non-zero values in any given column for it to be
kept in the dataset. Default value is 0.05.
Returns
-------
pandas DataFrame
"""
drop_list = []
for el in df.columns.values:
non_zero = df[el][df[el] != 0].shape[0] / df.shape[0]
if non_zero < thresh:
drop_list.append(el)
print('Dropping column: {} | Non-zero values ratio: {}%'.format(
el, round(100 * non_zero, 3)))
return df.drop(drop_list, axis=1) | 187254b59b34f7c788ea5a3c162d6b869e852e9a | 17,728 |
import re
def pascal_to_snake(string: str) -> str:
"""
Converts pascal-case to snake-case.
>>> pascal_to_snake(string="HelloAndGoodMorning") # Returns "hello_and_good_morning"
"""
words = re.findall(pattern="[A-Z][^A-Z]*", string=string)
words_lower_cased = list(map(str.lower, words))
return "_".join(words_lower_cased) | 2ef5f0056d099194ffbd1aa8062fefea7664f4eb | 17,737 |
def fluid_properties(fluid_str):
"""
Return the physical density and kinematic viscosity for the prescribed
fluid.
"""
fluid_lib = {'water':(1000., 1.0e-6),
'glycol':(965.3,0.06/965.3),
'glycerin':(1260.0,1.49/1260.0)}
if fluid_str in list(fluid_lib.keys()):
return fluid_lib[fluid_str]
else:
print('valid fluids are:')
for keys in fluid_lib:
print(" '%s' " % keys)
raise KeyError('invalid fluid specified') | ea2784b5b9c9767e43dba787910fd5c32da8a266 | 17,739 |
def losocv_split(subjectIDs):
"""Create leave-one-subject-out cross-validation train/test splits.
Args:
subjectIDs (list): subjectID corresponding to each example.
Returns:
splits (list of lists): each fold's train and test indices.
subjectIDset (list): unique IDs, in held-out-test-set order
"""
subjectIDset = list(set(subjectIDs))
splits = []
for subject in subjectIDset:
test_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]==subject]
train_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]!=subject]
splits.append([train_idx,test_idx])
return splits, subjectIDset | 9f7d5051e34669a5c085cf08e65bcfd66eaaa9c7 | 17,741 |
import string
def letter_for(index):
"""Convert an index into a letter (or letter pair). a-z, then aa-az-zz"""
if index < 26:
return string.ascii_lowercase[index]
return (string.ascii_lowercase[(index // 26) - 1] # First letter in pair
+ string.ascii_lowercase[index % 26]) # Second letter | aad7edbbd8941339e93b8a732857258d28d73033 | 17,744 |
def KJKGtoBTULB(hkjkg):
"""
Convertie l'enthalpie en kJ/kg vers btu/lb
Conversion: 1 kJ/kg = 0.429923 Btu/lb
:param hkjkg: Enthalpie [kJ/kg]
:return hbtulb: Enthalpie [btu/lb]
"""
hbtulb = hkjkg * 0.429923
return hbtulb | 3b26742b2e56c7e265bf94d0af15f485e091f395 | 17,745 |
def merge_list(lst, *to_merged_list):
"""
Merge multiple lists into the first list
:param lst:
:param to_merged_list:
:return:
"""
for item in to_merged_list:
lst.extend(item)
return lst | 29b17583c73854363277a65862efc130ae19346a | 17,750 |
def isstr(obj):
"""Return whether an object is instance of `str`."""
return isinstance(obj, str) | 33e1cea9b8a60d224dc395f4446f175c0b967dd0 | 17,751 |
def convert_clip_ids_to_windows(clip_ids):
""" Inverse function of convert_windows_to_clip_ids
Args:
clip_ids: list(int), each is a index of a clip, starting from 0
Returns:
list(list(int)), each sublist contains two integers which are clip indices.
[10, 19] meaning a 9 clip window [20, 40] (seconds), if each clip is 2 seconds.
>>> test_clip_ids = [56, 57, 58, 59, 60, 61, 62] + [64, ] + [67, 68, 69, 70, 71]
>>> convert_clip_ids_to_windows(test_clip_ids)
[[56, 62], [64, 64], [67, 71]]
"""
windows = []
_window = [clip_ids[0], None]
last_clip_id = clip_ids[0]
for clip_id in clip_ids:
if clip_id - last_clip_id > 1: # find gap
_window[1] = last_clip_id
windows.append(_window)
_window = [clip_id, None]
last_clip_id = clip_id
_window[1] = last_clip_id
windows.append(_window)
return windows | 05600f2eb248ce61359a02cbcd4bd75035c6a55f | 17,752 |
def _is_compiled(url):
"""
Returns True if wheel with provided url is precompiled.
The logic in this method is a less efficient version of
-cp[0-9]{2}- regex matching.
"""
prefix = "-cp"
start = 0
for _ in range(len(url)):
start = url.find(prefix, start)
if start == -1 or start + 6 >= len(url):
break
if url[start + len(prefix)].isdigit() and \
url[start + len(prefix) + 1].isdigit() and \
url[start + len(prefix) + 2] == "-":
return True
start += len(prefix)
return False | fada4e14da5c92f7189737a3c50041f9a93acfe1 | 17,760 |
def first_and_last_n_chars(s, n1=30, n2=30):
"""
Utility function to display first n1 characters and last n2 characters of a long string
(Adjusts display if string is less than n1+n2 char long)
:param s: string
:return: string for display
"""
first_len = min(len(s), n1)
first = s[:first_len]
last_len = min(len(s) - len(first), n2)
last = s[-last_len:] if last_len > 0 else ''
if first_len == len(s):
return first
elif first_len + last_len == len(s):
return "{}{}".format(first, last)
else:
return "{}...{}".format(first, last) | f23b02a65f1c8c03a71498b0bcb9cc2941fd8060 | 17,761 |
from pathlib import Path
def create_folder(folder_name, folder_path=""):
"""
Create a folder with a given name in a given path.
Also creates all non-existing parent folders.
Parameters
----------
folder_name : str
Name of the folder to be created.
folder_path : str
Optional; default: current path.
Either relative or absolute path of the folder to be created.
Returns
-------
pathlib.Path
Full path of the created folder.
"""
path = Path(folder_path) / folder_name
# Creating the folder and all non-existing parent folders.
path.mkdir(parents=True, exist_ok=True)
return path | ed6f239c210cc9697fe6b4bb45189cc54abda970 | 17,764 |
def log_json(request_id, message, context={}):
"""Create JSON object for logging data."""
stmt = {"message": message, "request_id": request_id}
stmt.update(context)
return stmt | b04c7101fcbc8bd800bd888e726ff131da198854 | 17,766 |
def claim_account_legacy(request):
"""Render a page explaining that claim links are no longer valid."""
return {} | d48db70e437fad3d8048902e640226780d3f4fb4 | 17,767 |
def comma_conjoin(inlist, conjunction):
"""Parses the elements of a list into a string joined by commas,
with an 'and' before the final element. Oxford comma!
"""
if len(inlist) == 0:
return ""
elif len(inlist) == 1:
return str(inlist.pop())
elif len(inlist) == 2:
return (" " + conjunction + " ").join(inlist)
text = ", " + conjunction + " " + inlist.pop()
text = ", ".join(inlist) + text
return text | 1a60d7e8752436796fc518832bfece1a97914ff0 | 17,768 |
def compare_data(plt_type, correct, given):
"""
Determines whether the given data matches any of the data found in the
correct data. This handles plots of different types: if a histogram
was plotted with the expected data for a line plot, it will return True.
Args:
plt_type (str): The expected type of this plot
correct (List of Int or List of List of Int): The expected data.
given (Dict): The actual plotted data and information
Returns:
bool: Whether the correct data was found in the given plot.
"""
# Infer arguments
if plt_type == 'hist':
correct_xs = None
correct_ys = correct
elif not correct:
correct_xs = []
correct_ys = []
elif isinstance(correct[0], (tuple, list)):
# We were given a list of lists of ints
correct_xs, correct_ys = correct
else:
# Assume it is a singular list
correct_xs = list(range(len(correct)))
correct_ys = correct
if given['type'] == 'hist':
return correct_ys == given['values']
elif plt_type == 'hist':
return correct_ys == given['y']
else:
return correct_xs == given['x'] and correct_ys == given['y'] | 0bbd217906d86c2117c8c1b7a66a768386ca116b | 17,773 |
def UINT(value): # noqa: N802
"""Converts a value that matches \d+ into an integer."""
if value is None:
raise ValueError('None is not a valid integer')
if not value.isdigit():
raise ValueError('Only positive numbers are allowed')
return int(value) | 3d58dadf97fe26d7bfa00acb3451a39a5e5845bb | 17,777 |
import math
def get_distance(loc1, loc2):
""" Computes the Euclidian distance between two 2D points."""
x_diff = loc1.x - loc2.x
y_diff = loc1.y - loc2.y
return math.sqrt(x_diff**2 + y_diff**2) | 3a603ace039ea887cabd8b16d2b04d84d939c112 | 17,778 |
def get_rank(target, ranks):
"""
Get rank of a target entity within all ranked entities.
Args:
target (str): Target entity which rank should be determined.
ranks (list): List of tuples of an entity and its rank.
Returns:
int: Rank of entity or -1 if entity is not present in ranks.
"""
for i in range(len(ranks)):
word, rank = ranks[i]
if word == target:
return i
return -1 | 0e247669af757a5ffa5fff016262eb677f7c3cb8 | 17,781 |
def get_attributes(obj):
"""
Fetches the attributes from an object.
:param obj: The object.
:type obj: object
:returns: A dictionary of attributes and their values from the object.
:rtype: dict
"""
return {k: getattr(obj, k) for k in dir(obj) if not k.startswith("__")} | 6e1cea3ed8ad2fa1c00f7c2eb86efb4a629e9f06 | 17,790 |
def bernoulli_lh(ho, ha, s, n):
"""
Returns the likelihood ratio for independently distributed bernoulli random variables.
Parameters
----------
ho : float
null hypothesis
ha : float
alternative hypothesis
s : float or int
number of successes in sample
n : float or int
total number of elements in sample
Returns
-------
float
likelihood ratio of model
"""
null_lh = (ho ** s) * (1 - ho)**(n - s)
alt_lh = (ha ** s) * (1 - ha)**(n - s)
return alt_lh / null_lh | 95b52f3f48a173a6a40fab8ae7c776a7082ebb02 | 17,796 |
def get_predicted_gender(spanish_sent):
"""
Return the gender of the first entity in the spanish
translation.
"""
first_word = spanish_sent.split()[0].lower()
if first_word == "el":
return "male"
elif first_word == "la":
return "female"
else:
return "neutral" | 94f43d37f29af4e3f1314a5e177d2e8036036a0a | 17,800 |
import torch
def make_onehot_kernel(kernel_size, index):
"""
Make 2D one hot square kernel, i.e. h=w
k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1
"""
kernel = torch.zeros(kernel_size, kernel_size)
kernel.view(-1)[index] = 1
return kernel.view(1, 1, kernel_size, kernel_size) | be324887a77f454e9f2c306e1bc9eddd8c001bb8 | 17,801 |
def manual_expo_mode(state):
""" Bool to express if mode is manual. """
return state["expo_mode"] == "manual" | b9797a0e85ef9474b8819555afc33cf020031fc1 | 17,802 |
def whitelist(squat_candidates, whitelist_filename="whitelist.txt"):
"""Remove whitelisted packages from typosquat candidate list.
Args:
squat_candidates (dict): dict of packages and potential typosquatters
whitelist_filename (str): file location for whitelist
Returns:
dict: packages and post-whitelist potential typosquatters
"""
# Create whitelist
whitelist = []
with open(whitelist_filename, "r") as file:
for line in file:
# Strip out end of line character
whitelist.append(line.strip("\n"))
# Remove packages contained in whitelist
whitelist_set = set(whitelist)
for pkg in squat_candidates:
new_squat_candidates_set = set(squat_candidates[pkg]) - whitelist_set
new_squat_candidates_list = list(new_squat_candidates_set)
# Update typosquat candidate list
squat_candidates[pkg] = new_squat_candidates_list
return squat_candidates | 5d8305d7ee721988420a6035d2df9e0b52404b7b | 17,804 |
def generate_family_characteristics(df, family_id, group_ids):
"""
Given either an HMIS or a Connecting Point dataframe, add columns regarding family structure.
:param df: HMIS or Connecting point dataframe.
:type hmis: Pandas.Dataframe.
:param family_id: column name of family identifier.
:type family_id: str.
:param group_ids: grouping column names.
:type group_ids: [str].
"""
df['With Child?'] = df.groupby(group_ids)['Child?'].transform(any)
df['With Adult?'] = df.groupby(group_ids)['Adult?'].transform(any)
df['With Family?'] = df['With Child?'] & df['With Adult?']
df['Family?'] = df.groupby(family_id)['With Family?'].transform(any)
return df | f4d37d396352f6e9236e6710d8115bc6a8d3e632 | 17,809 |
import functools
def int_ip_from_string(ip_string):
"""
Convert ip4 address from string representation into int (4 bytes).
Parameters
----------
ip_string : string
ip4 address as string (dot-separated)
Returns
-------
int
4-byte integer ip4 address representation
"""
addr_segments = map(int, reversed(ip_string.split('.')))
return functools.reduce(lambda hi, lo: (hi << 8) | lo, addr_segments, 0) | d6a2cd4d93c54887697396533203d1b716e53cfe | 17,811 |
def get_values_as_str(history, key):
"""Retrieve values from sequence of dictionaries."""
return [i[key] for i in history] | 46687f63ce519d532f44f4aa6eb836eb079cadba | 17,815 |
def merge_compute(left, right):
"""
Merge two dictionnaries but computing integer values instead of overriding.
Left override every right values except when both left and right value are
integers then right value will be incremented by left value.
Arguments:
left (dict): The dict to merge into right.
right (dict): The merge in the left dict.
Returns:
dict: Merged dict from left to right.
"""
for k, v in left.items():
# Only compute item if both left and right values are integers, else
# left override right value
if k in right and type(v) is int and type(right[k]) is int:
right[k] += v
else:
right[k] = v
return right | 37823d5c3bd94c94685aed1f965c77d787900633 | 17,818 |
def verify_params(post_dict):
"""
Verify that a post dict contains the expected parameters.
"""
if ('target' not in post_dict) or ('source' not in post_dict):
return False
if not post_dict['target'] or not post_dict['source']:
return False
return True | b0dc66873e1771a898eb0b12c778bc2c4341e69f | 17,820 |
def _get_special_ids(tokenizer):
"""Gets the ids of special [T] and [P] tokens."""
trigger_token_id = tokenizer.convert_tokens_to_ids('[T]')
if trigger_token_id == tokenizer.unk_token_id:
raise ValueError('Tokenizer does not have special [T] token.')
predict_token_id = tokenizer.convert_tokens_to_ids('[P]')
if predict_token_id == tokenizer.unk_token_id:
raise ValueError('Tokenizer does not have special [P] token.')
return trigger_token_id, predict_token_id | ad3458150df641a3ae3ae3ce5d4481b438345757 | 17,828 |
import shutil
def mpi_executable(preferred_executable=None):
"""
Return an mpi executable found on the current system.
Depending on your MPI implementation, the executable name
to run an MPI application may differ. This function will
check which one is available and return the first valid one
it finds. Valid in this case means that it can be found with
methods like `which`.
To override which executable to check first you can pass
your preferred executable as an argument.
Parameters
----------
preferred_executable : str, optional
The first executable to check for on the system. If it
isn't found, will continue with the regular search for
valid MPI executables.
Returns
-------
str
The name of a valid MPI executable.
Raises
------
RuntimeError
If no valid MPI executable could be found, a RuntimeError
is raised.
"""
if preferred_executable:
if shutil.which(preferred_executable):
return preferred_executable
else:
raise RuntimeError(
f"The given preferred mpi executable `{preferred_executable}` "
"was not found on this system"
)
executables = ["mpirun", "mpiexec", "srun"]
for executable in executables:
if shutil.which(executable):
return executable
raise RuntimeError(
"Could not find an mpi installation. Make sure your PATH is set correctly."
) | ab197a7591da0609a4af3eee57e8b8947ab19d9d | 17,830 |
def abs2(numero):
"""
(num) -> num
Calcula el valor absoluto de un número
>>> abs2(10)
10
>>> abs2(-8)
8
>>> abs2(0)
0
:param numero: el numero a evaluar
:return: el valor absoluto del número
"""
if numero < 0:
# La palabra clave return se utiliza para retonar el resultado de
# Nuestra función
return -numero
return numero | 826ab80cc1af250e9e332e72c81ffcc5a5383d83 | 17,838 |
import requests
def fetch_output(input_prefix, input_value, output_prefix, enable_semantic_search=False):
"""Find APIs which can produce the output_prefix
:arg str input_prefix: The prefix of the input, e.g. ncbigene, hgnc.symbol.
:arg str output_prefix: The prefix of the output, e.g. ncbigene, hgnc.symbol.
:arg str input_value: The actual value of the input
:arg boolean enable_semantic_search:
:returns: list -- api endpoints which can produce the output prefix
"""
if enable_semantic_search:
response = requests.get('http://biothings.io/explorer/api/v2/semanticquery?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'.
replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value))
else:
response = requests.get('http://biothings.io/explorer/api/v2/directinput2output?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'.
replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value))
if response.ok:
doc = response.json()
return doc['data'] if doc else []
else:
doc = response.json()
if 'error message' in doc:
print(doc['error message'])
else:
print("No results could be found for your query!")
return [] | 1ed8f27923695ab7587f49083070aecb040987f6 | 17,839 |
def _remove_duplicate_transactions(transactions, reference_transactions):
"""Returns a list of transactions that are not present in reference_transactions."""
return [t for t in transactions if t not in reference_transactions] | 8e2ff00dd845acd9ab63f93b8d998fe897a7d21e | 17,850 |
def get_outdir_simupara(fp, line):
"""
get output directory and simulation parameters (in format: time steps)
:param fp: opened file
:param line: current line
:return: output directory and simulation parameters
Note: output directory is '' if not specified and
list of simulation parameters is empty if not specified
"""
outdir = ''
simupara = []
while line:
line = fp.readline()
if not line:
break
if line == '--\n':
continue
line = line.strip('\n')
line = line.split(' ')
if len(line) == 1:
outdir = str(line[0])
else:
simupara = line
return outdir, simupara | 22da06da0466657a5905ba3333b270787cf98a58 | 17,854 |
def get_indexing(string):
"""
Parse numpy-like fancy indexing from a string.
Args:
string (str): string represent the indices to take
a subset of from array. Indices for each dimension
are separated by `,`; indices for different dimensions
are separated by `;`.
e.g.: For a numpy array `arr` of shape (3,3,3), the string "1,2;1,2"
means taking the sub-array `arr[[1,2], [1,2]]
Returns:
final_indexing (tuple): the parsed indexing.
"""
index_ls = string.strip().split(";")
final_indexing = []
for index in index_ls:
index_single_dim = index.split(",")
index_single_dim = [int(i) for i in index_single_dim]
final_indexing.append(index_single_dim)
return tuple(final_indexing) | 327054d0387f4bda1f3345d2886541a20e96e962 | 17,857 |
import pytz
def timezonize(timezone):
"""Convert a string representation of a timezone to its pytz object, or do nothing if the argument is already a pytz timezone."""
# Check if timezone is a valid pytz object is hard as it seems that they are spread arount the pytz package.
# Option 1): Try to convert if string or unicode, else try to
# instantiate a datetiem object with the timezone to see if it is valid
# Option 2): Get all memebers of the pytz package and check for type, see
# http://stackoverflow.com/questions/14570802/python-check-if-object-is-instance-of-any-class-from-a-certain-module
# Option 3) perform a hand.made test. We go for this one, tests would fail if it gets broken
if not 'pytz' in str(type(timezone)):
timezone = pytz.timezone(timezone)
return timezone | 1145bb17bf9c50985d88770e8fa0437bfb3e9a18 | 17,859 |
import time
def convert_to_timestamp(seconds):
"""
Converts an integer into a string 'HH:MM:SS'.
We differ a bit from python's standard time library
in that if a number is negative, instead of reversing
back into the 24hr mark, we return 00:00:00.
"""
if seconds < 0:
return '0:00'
tt = time.strftime('%H:%M:%S', time.gmtime(seconds))
if tt[:2] == '00':
tt = tt[3:] # slice off leading ':' also
if tt[:1] == '0':
tt = tt[1:]
return tt | 45428766c440c15366030b07cac95c304cefee89 | 17,862 |
def unpack_string(byte_stream):
"""Return decoded ASCII string from bytestring.
Decode a bytes object via UTF-8 into a string
Args:
byte_stream (bytes): arbitrary length
Returns:
string: UTF-8 decoded string
"""
out_string = byte_stream.decode("utf-8", "replace")
return out_string | 5caaf20a41a05fb0026cb9131dd4825f036ec837 | 17,863 |
import importlib
def import_from_module_name(module_name):
"""Imports a module and returns it and its name."""
module = importlib.import_module(module_name)
return module, module_name | 87d38536dc23c3ef86bcc1d0b5b0ec937ba397d6 | 17,866 |
def get_range_around(range_value, current_item, padding):
"""
Returns a range of numbers around the given number.
This is useful for pagination, where you might want to show something
like this::
<< < ... 4 5 (6) 7 8 .. > >>
In this example `6` would be the current page and we show 2 items around
that page (including the page itself).
Usage::
{% load libs_tags %}
{% get_range_around page_obj.paginator.num_pages page_obj.number 5
as pages %}
:param range_amount: Number of total items in your range (1 indexed)
:param current_item: The item around which the result should be centered
(1 indexed)
:param padding: Number of items to show left and right from the current
item.
"""
total_items = 1 + padding * 2
left_bound = padding
right_bound = range_value - padding
if range_value <= total_items:
range_items = range(1, range_value + 1)
return {
'range_items': range_items,
'left_padding': False,
'right_padding': False,
}
if current_item <= left_bound:
range_items = range(1, range_value + 1)[:total_items]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
if current_item >= right_bound:
range_items = range(1, range_value + 1)[-total_items:]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
range_items = range(current_item - padding, current_item + padding + 1)
return {
'range_items': range_items,
'left_padding': True,
'right_padding': True,
} | 28a18ff5e998ed6b5eb7ec7a0aaf540037ac1946 | 17,870 |
def get_solved (sol_dict):
"""Returns the solved variables of a solution dictionary"""
return filter(lambda k: sol_dict[k] is not None, sol_dict) | a712caa09be029f0ad7054248cce7762644a6644 | 17,877 |
def strip_stac_item(item: dict) -> dict:
"""
Strips a stac item, removing not stored fields
:param item dict: input stac item
:rtype: dict
:return: stripped stac item
"""
strip = item
s3_key = None
for link in item["links"]:
if link["rel"] == "self":
s3_key = link["href"]
assert s3_key is not None, "Can't find self key"
# Remove fields that will not be stored
strip.pop("stac_version")
strip.pop("stac_extensions")
strip.pop("type")
strip.pop("links")
strip.pop("bbox")
strip.pop("assets")
# https://cbers-stac-0-6.s3.amazonaws.com/CBERS4/PAN5M/156/107/CBERS_4_PAN5M_20150610_156_107_L4.json
strip["s3_key"] = "/".join(s3_key.split("/")[3:])
return strip | 1909b44f316875f0cd0d65fff8bd62329cd229e5 | 17,881 |
def prepare(raw_input):
"""
Input is organized in three chunks: (1) rules, (2) my ticket, (3) tickets nearby.
Must be prepared following different rules.
(1) label ':' lower 'or' upper ',' lower 'or' upper
(2) header line "your ticket:" followed by one line of comma separated integers
(3) same as (2), but many lines of comma separated integers
The CSV lines have always as many fields as there are rules in (1).
"""
rules, my_ticket, tickets_nearby = [line for line in raw_input.split('\n\n')]
return rules, my_ticket, tickets_nearby | a784da2ad5ef864d3114187766acd8af2b69b868 | 17,889 |
def string_empty(string: str) -> bool:
"""Return True if the input string is None or whitespace."""
return not (bool(string) or isinstance(string, str) and bool(string.strip())) | cca19df1eda7039f7299c49fcb9832ce11847524 | 17,895 |
def get_attr_flows(results, key='variable_costs'):
"""
Return all flows of an EnergySystem for a given attribute,
which is not zero.
Parameters
----------
results : dict
Results dicionary of the oemof.solph optimisation including the
Parameters with key 'param'.
key : str
Returns
-------
list : List of flows, where a non zero attribute value is given either
at the 'scalars' or 'sequences'.
"""
param = results['param']
list_keys = list(param.keys())
var_scalars = [
x for x in list_keys
if key in param[x]['scalars'].keys()
if abs(param[x]['scalars'][key]) > 0
]
var_sequences = [
x for x in list_keys
if key in param[x]['sequences'].keys()
if abs(param[x]['sequences'][key].sum()) > 0
]
var_cost_flows = var_scalars + var_sequences
return var_cost_flows | 756ec66cdf7b01cdbc6b872b4f30cf27e4ff524f | 17,897 |
from typing import Callable
import hmac
import hashlib
def generate_hash_key(chain_url: str, privkey: bytes, strategy: Callable):
"""Generate a hash key to use as `client_id`, using the :mod:`hmac` library.
The message is the concatenation of `chain_url` plus the `__name__` attribute of the
`strategy`.
The `privkey` is used to sign it using `sha256`.
The result is hexdigested before we return it.
"""
k = hmac.new(privkey, (chain_url + strategy.__name__).encode("UTF-8"), hashlib.sha256)
return k.hexdigest() | 1bb6b69f06c6ecb245b8ea8b4c772ede872c460c | 17,898 |
def _rename_artifact(ctx, tpl_string, src_file, packaging_type):
"""Rename the artifact to match maven naming conventions."""
artifact = ctx.new_file(ctx.bin_dir, tpl_string % (ctx.attr.artifact_id, ctx.attr.version, packaging_type))
ctx.action(
inputs = [src_file],
outputs = [artifact],
command = "cp %s %s" % (src_file.path, artifact.path),
)
return artifact | 6c5bf6928e79bfaf20f222bea9815643aaff3dc5 | 17,901 |
def get_config_data(config_data):
"""
Parameters
----------
config_data : list containing config parameters
Returns
-------
[config parameters] : various parameters and values from the config file
"""
exp_type = config_data['RobotSettings']['commandFile']
reward_dur = config_data['ExperimentSettings']['rewardWinDur']
x_p = config_data['RobotSettings']['xCommandPos']
y_p = config_data['RobotSettings']['yCommandPos']
z_p = config_data['RobotSettings']['zCommandPos']
x0 = config_data['RobotSettings']['x0']
y0 = config_data['RobotSettings']['y0']
z0 = config_data['RobotSettings']['z0']
r = config_data['RobotSettings']['x']
t1 = config_data['RobotSettings']['y']
t2= config_data['RobotSettings']['z']
return exp_type, reward_dur, x_p, y_p, z_p, x0, y0, z0,r,t1,t2 | b6f8199547d8177660336c8c0aab09deddf96118 | 17,902 |
import six
def is_list_of_strings(vals):
"""Returns True if val is a list (or enumerable) of strings. False otherwise"""
try:
# check if everything is a string
for val in vals:
if not isinstance(val, six.string_types):
return False
except:
# vals is not enumerable
return False
# everything is a string
return True | 61c48b7b43acc1ab8f86c47273606a830ccb0bab | 17,905 |
def getPruning(table, index):
"""Extract pruning value"""
if ((index & 1) == 0):
res = table[index // 2] & 0x0f
else:
res = (table[index // 2] & 0xf0) >> 4
return res
# return table[index] & 0xf | 45e9e81bef153edfa7003f5256a3e9b169a6a2c0 | 17,909 |
import torch
def append_homog(tensor: torch.Tensor, homog_value: float = 1.) -> torch.Tensor:
"""Appends a homogeneous coordinate to the last dimension of a Tensor.
Args:
tensor: A Tensor.
homog_value: Value to append as homogeneous coordinate to the last dimension
of `tensor`. (Default: 1.0)
Returns:
A Tensor identical to the input but one larger in the last dimension. The
new entries are filled with ones.
"""
shape = list(tensor.shape)
shape[-1] = 1
appendage = torch.ones(shape, dtype=tensor.dtype, device=tensor.device) * homog_value
return torch.cat([tensor, appendage], -1) | d07361ded84608d0aa2d4587b5418be0a6f5c395 | 17,910 |
def get_fitness(solution):
"""Return the fitness value of the passed solution"""
if not solution:
return 0
# the fitness of a valid solution coincides with its value
return solution.value | bd3abcb2600ee074ce33b543c7a19b58924e3415 | 17,911 |
def pretty_size(size, unit=1024):
"""
This function returns a pretty representation of a size value
:param int|long|float size: the number to to prettify
:param int unit: 1000 or 1024 (the default)
:rtype: str
"""
suffixes = ["B"] + [i + {1000: "B", 1024: "iB"}[unit] for i in "KMGTPEZY"]
if unit == 1000:
suffixes[1] = 'kB' # special case kB instead of KB
# cast to float to avoid losing decimals
size = float(size)
for suffix in suffixes:
if abs(size) < unit or suffix == suffixes[-1]:
if suffix == suffixes[0]:
return "%d %s" % (size, suffix)
else:
return "%.1f %s" % (size, suffix)
else:
size /= unit | abeafa97d45212c0170b981e9448e63acc1a54d2 | 17,914 |
import logging
import requests
def request_image(image_url):
"""Attempts to download the file at the URL specified and, if available,
returns it as a raw response object."""
if image_url is None:
logging.error("Image URL is None")
return None
logging.info(f"Downloading roll image {image_url}")
response = requests.get(image_url, stream=True)
if response.status_code == 200:
response.raw.decode_content = True
return response
logging.error(f"Unable to download {image_url} - {response}")
return None | c5d0847859b9e036ebf0121f1c8a71492d0923f6 | 17,916 |
def set_title(title=None, property_name=None, channel=None):
"""Set title for map.
Parameters:
title (str):
If ``None``, try to set automatically from property (and channel)
name(s). For no title, set to ''. Default is ``None``.
property_str (str):
Map property name. Default is ``None``.
channel (str):
Map channel name. Default is ``None``.
Returns:
str
"""
if title is None:
property_name = property_name if property_name is not None else ''
channel = channel if channel is not None else ''
title = ' '.join((property_name, channel))
title = ' '.join(title.split('_')).strip()
return title | 6483c8df9fafaa7e46ef667801a24a921ae871da | 17,919 |
from pathlib import Path
def ready_for_postinstall(chroot):
"""True if the chroot path exists and nothing mounted inside, False otherwise"""
path = Path(chroot)
return path.exists() and not any(path.iterdir()) | c684cf9f4940f4f6e102752a2456737b5de56cff | 17,920 |
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
damping_prob = 1 / len(corpus)
if len(corpus[page]) == 0: # If page has no links, all pages have equal prob
distribution = {ipage: damping_prob for ipage in corpus}
else:
linked_prob = 1 / len(corpus[page])
distribution = {ipage: damping_prob * (1 - damping_factor) for ipage in corpus}
for ipage in corpus[page]: # Add links probabilities
distribution[ipage] += damping_factor * linked_prob
return distribution | c7ea20809acce0f9290f3b5890122c7d34acd416 | 17,923 |
def average_speed(s1 : float, s0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_speed:
Returns the average speed.
Where:
Delta Space = (space1[s1] - space0[s0])
Delta Time = (time1[t1] - time0[t0])
"""
return ((s1-s0)/(t1-t0)); | 8125fb0454433288b506ffd277fa6b1bd21b06c9 | 17,930 |
from typing import Coroutine
from typing import Any
import asyncio
def await_async(task: Coroutine[Any, Any, Any]) -> Any:
"""Await async task in sync function.
Parameters
----------
task : Coroutine
task to be awaited, eg f() where f is async function.
Returns
-------
Any
Result returned from awaited task.
"""
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(task) | 9dc6c3301dcc91036e05059b695b3edba9de61a1 | 17,931 |
import socket
import struct
def send_packet(mac_str):
"""Send magic packet to given mac address
:param mac_str: mac address string separated by :
:return:
"""
def broadcast(magic):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(magic, ('<broadcast>', 9))
def build_packet(mac_str):
bytes = [255] * 6 + [int(x, 16) for x in mac_str.split(':')] * 16
magic = struct.pack('B' * 102, *bytes)
return magic
magic_packet = build_packet(mac_str)
broadcast(magic_packet) | cd6a8fe49d43b1970995068302f611839b454187 | 17,940 |
import hashlib
def hash_file(file_content):
"""Create an md5 of file contents"""
the_hash = hashlib.md5(file_content.encode('utf-8'))
return the_hash.hexdigest() | ba3b84edb78ccb70e4fb37b8cc6c8eb73c0d108b | 17,944 |
from datetime import datetime
def unrets_date(rets_date):
"""
Converts a RETS date (ISO 8601 format) into a Python datetime
:param rets_date: a RETS/ISO 8601 date
:type rets_date: str
:rtype: datetime.datetime
:return: rets_date as a Python datetime
"""
# For datetimes with microseconds
return datetime.strptime(rets_date, '%Y-%m-%dT%H:%M:%S.%f') | 9f8cae6880ac4d2f285eff856db09da0a39ec4ee | 17,946 |
def add_nones(word):
"""Change word into a list and add None at its beginning, end, and between every other pair of elements. Works whether the word is a str or a list.
"""
def yield_it(word_string):
yield None
it = iter(word_string)
yield next(it)
for x in it:
yield None
yield x
yield None
if isinstance(word, str):
return list(yield_it(word.split(' ')))
else:
return list(yield_it(word)) | 63c8437361f2f0cc200665d7cdf6a41aab7eaf67 | 17,948 |
import six
def make_key(*args, **kwargs):
"""
Given any number of lists and strings will join them in order as one
string separated by the sep kwarg. sep defaults to u"_".
Add exclude_last_string=True as a kwarg to exclude the last item in a
given string after being split by sep. Note if you only have one word
in your string you can end up getting an empty string.
Example uses:
>>> from mongonaut.forms.form_utils import make_key
>>> make_key('hi', 'my', 'firend')
>>> u'hi_my_firend'
>>> make_key('hi', 'my', 'firend', sep='i')
>>> 'hiimyifirend'
>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'], sep='i')
>>> 'hiimyifirendithisibeiwhat'
>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'])
>>> u'hi_my_firend_this_be_what'
"""
sep = kwargs.get('sep', u"_")
exclude_last_string = kwargs.get('exclude_last_string', False)
string_array = []
for arg in args:
if isinstance(arg, list):
string_array.append(six.text_type(sep.join(arg)))
else:
if exclude_last_string:
new_key_array = arg.split(sep)[:-1]
if len(new_key_array) > 0:
string_array.append(make_key(new_key_array))
else:
string_array.append(six.text_type(arg))
return sep.join(string_array) | b5c48386304ab248518a7330605fbcfb37ecad23 | 17,950 |
def has_restart_file(job):
"""Check if the job has a restart file."""
return job.isfile("fort.77") | d6d5bd748722ab5bdc83461728d5a02e539d25ed | 17,951 |
import hashlib
def compute_file_checksum(path):
""" Compute a SHA1 checksum for a file. """
with open(path, 'rb') as f:
checksum = hashlib.sha1(f.read()).hexdigest()
return checksum | d253d98039a916be19ee3ad1c3d050b2dd251c1e | 17,955 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.