content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def parseText(text1, nlp):
"""Run the Spacy parser on the input text that is converted to unicode."""
doc = nlp(text1)
return doc | 99d6a585358a700f8fc48c5dc4fc761a03ab42a7 | 705,984 |
import socket
def is_port_open(host, port, timeout=5):
"""
verifies if a port is open in a remote host
:param host: IP of the remote host
:type host: str
:param port: port to check
:type port: int
:param timeout: timeout max to check
:type timeout: int
:return: True if the port is open
:rtype: bool
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
return result is 0 | 1805727a3bb007cd5686475de686cf7bceac83a1 | 705,986 |
def _gen_find(subseq, generator):
"""Returns the first position of `subseq` in the generator or -1 if there is no such position."""
if isinstance(subseq, bytes):
subseq = bytearray(subseq)
subseq = list(subseq)
pos = 0
saved = []
for c in generator:
saved.append(c)
if len(saved) > len(subseq):
saved.pop(0)
pos += 1
if saved == subseq:
return pos
return -1 | ec89e787a61d684e2a7d0c8c2d0fb9c89cf73ada | 705,996 |
def maximum_difference_sort_value(contributions):
"""
Auxiliary function to sort the contributions for the compare_plot.
Returns the value of the maximum difference between values in contributions[0].
Parameters
----------
contributions: list
list containing 2 elements:
a Numpy.ndarray of contributions of the indexes compared, and the features' names.
Returns
-------
value_max_difference : float
Value of the maximum difference contribution.
"""
if len(contributions[0]) <= 1:
max_difference = contributions[0][0]
else:
max_difference = max(
[
abs(contrib_i - contrib_j)
for i, contrib_i in enumerate(contributions[0])
for j, contrib_j in enumerate(contributions[0])
if i <= j
]
)
return max_difference | cd7f66ec252199fb01b9891440d0f7da370c7b8e | 705,998 |
def add(number1, number2):
"""
This functions adds two numbers
Arguments:
number1 : first number to be passed
number2 : second number to be passed
Returns: number1*number2
the result of two numbers
Examples:
>>> add(0,0)
0
>>> add(1,1)
2
>>> add(1.1,2.2)
3.3000000000000003
"""
return number1 + number2 | 5db1a461f65672d5fc1201a82657fada30220743 | 706,001 |
def calculate_timeout(start_point, end_point, planner):
"""
Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr.
Args:
start_point: initial position
end_point: target_position
planner: to get the shortest part between start_point and end_point
Returns:
time limit considering a fixed speed of 5 km/hr
"""
path_distance = planner.get_shortest_path_distance(
[start_point.location.x, start_point.location.y, 0.22], [
start_point.orientation.x, start_point.orientation.y, 0.22], [
end_point.location.x, end_point.location.y, end_point.location.z], [
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0 | cb7ae44df9b6a89d2e171046fa0bdfe3f81445c5 | 706,002 |
import json
def get_aws_regions_from_file(region_file):
"""
Return the list of region names read from region_file.
The format of region_file is as follows:
{
"regions": [
"cn-north-1",
"cn-northwest-1"
]
}
"""
with open(region_file) as r_file:
region_data = json.load(r_file)
return sorted(r for r in region_data.get("regions")) | 639da8c6417295f97621f9fd5321d8499652b7b2 | 706,006 |
def split(x, divider):
"""Split a string.
Parameters
----------
x : any
A str object to be split. Anything else is returned as is.
divider : str
Divider string.
"""
if isinstance(x, str):
return x.split(divider)
return x | e77a162777d9bb13262e4686ba1cb9732ebab221 | 706,008 |
def is_blank(value):
"""
Returns True if ``value`` is ``None`` or an empty string.
>>> is_blank("")
True
>>> is_blank(0)
False
>>> is_blank([])
False
"""
return value is None or value == "" | 6a30f9f6726701a4b7a9df8957503111a5222558 | 706,010 |
def pass_through_formatter(value):
"""No op update function."""
return value | 202ea761db9e1fa858718c61df3a7fd18f02826c | 706,015 |
def check_branch(payload, branch):
"""
Check if a push was on configured branch.
:param payload: Payload from web hook.
:param branch: Name of branch to trigger action on.
:return: True if push was on configured branch, False otherwise.
"""
if "ref" in payload:
if payload["ref"] == branch:
return True
return False | 88bd0ebae330ee169e97a40aee208b2f92ee4a32 | 706,016 |
import re
def nice(name):
"""Generate a nice name based on the given string.
Examples:
>>> names = [
... "simple_command",
... "simpleCommand",
... "SimpleCommand",
... "Simple command",
... ]
>>> for name in names:
... nice(name)
'Simple Command'
'Simple Command'
'Simple Command'
'Simple Command'
Arguments:
name (str): The string from which generate the nice name.
Returns:
str: The generated nice name.
"""
# The regular expression will match all upper case characters except the
# one that starts the string and insert a space before it.
return re.sub(r"(?<!^)([A-Z])", r" \1", name).replace("_", " ").title() | ab96675423812a85744bb76e7f62d08bbbac2eea | 706,018 |
def findwskeyword(keyword, sol):
"""Find and return a value for a keyword in the list of the wavelength solution"""
i = sol.index(keyword)
j = sol[i:].index('\n')
return sol[i:i + j].split('=')[1].strip() | b3cc028415d74ecfd7ec3868ae591d7b4d3b8860 | 706,020 |
def format_time(time):
""" Converts datetimes to the format expected in SAML2 XMLs. """
return time.strftime("%Y-%m-%dT%H:%M:%SZ") | 03651b72aa0b177ac1ac3f1ccafdba6fe967a11a | 706,021 |
def get_delivery_voucher_discount(voucher, total_price, delivery_price):
"""Calculate discount value for a voucher of delivery type."""
voucher.validate_min_amount_spent(total_price)
return voucher.get_discount_amount_for(delivery_price) | 8ede095730c1d29d01949dff47b4a2893d29720c | 706,022 |
def process_results(unprocessed, P, R, G):
"""Process the results returned by the worker pool, sorting them by
policy and run e.g. results[i][j][k] are the results from policy i
on run j on graph k. Parameters:
- unprocessed: Unprocessed results (as returned by the worker pool)
- P: number of policies
- R: number of runs
- G: number of graphs/SCMs/test cases
"""
results = []
for i in range(P):
policy_results = []
for r in range(R):
run_results = unprocessed[(i*G*R + G*r):(i*G*R + G*(r+1))]
policy_results.append(run_results)
results.append(policy_results)
return results | 24c2854723b3fc33c3fee58595f84d789e861fbc | 706,025 |
def gen_spacer(spacer_char="-", nl=2):
"""
Returns a spacer string with 60 of designated character, "-" is default
It will generate two lines of 60 characters
"""
spacer = ""
for i in range(nl):
spacer += spacer_char * 60
spacer += "\n"
return spacer | 7434f191dafdf500c2fc3e67373afc664e543ce0 | 706,027 |
import torch
def get_number_of_voxels_per_class(labels: torch.Tensor) -> torch.Tensor:
"""
Computes the number of voxels for each class in a one-hot label map.
:param labels: one-hot label map in shape Batches x Classes x Z x Y x X or Classes x Z x Y x X
:return: A tensor of shape [Batches x Classes] containing the number of non-zero voxels along Z, Y, X
"""
if not len(labels.shape) in [5, 4]:
raise Exception("labels must have either 4 (Classes x Z x Y x X) "
"or 5 dimensions (Batches x Classes x Z x Y x X), found:{}"
.format(len(labels.shape)))
if len(labels.shape) == 4:
labels = labels[None, ...]
return torch.count_nonzero(labels, dim=(2, 3, 4)) | 568a91639a42cf3cd3debe365c5a963512d95dfc | 706,030 |
import pydoc
def read_docstring(object_):
"""
Returns object docstring without the FILE information.
"""
fmt = "```\n{}\n```\n"
docs = pydoc.plain(pydoc.render_doc(object_)).split("FILE")[0].rstrip()
return fmt.format(docs) | 5c21f6eadf400ac9316e3f44d98464536b9b7536 | 706,031 |
def _get_num_ve_sve_and_max_num_cells(cell_fracs):
"""
Calculate the num_ve, num_sve and max_num_cells
Parameters
----------
cell_fracs : structured array, optional
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
Returns
-------
num_ve : int
Number of the total voxels
num_sve : int
Number of the total subvoxels, eqaul to or greater than num_ve
max_num_cells : int
Max number of cells (subvoxels) in a voxel
"""
num_sve = len(cell_fracs)
num_ve = len(set(cell_fracs["idx"]))
max_num_cells = -1
for i in range(num_sve):
max_num_cells = max(max_num_cells, len(cell_fracs[cell_fracs["idx"] == i]))
return num_ve, num_sve, max_num_cells | c0d154898bbfeafd66d89a2741dda8c2aa885a9a | 706,040 |
def extent2(texture):
""" Returns the extent of the image data (0.0-1.0, 0.0-1.0) inside its texture owner.
Textures have a size power of 2 (512, 1024, ...), but the actual image can be smaller.
For example: a 400x250 image will be loaded in a 512x256 texture.
Its extent is (0.78, 0.98), the remainder of the texture is transparent.
"""
return (texture.tex_coords[3], texture.tex_coords[7]) | 16c6d220ad48201fd133ed11c97452bf0831c0d8 | 706,042 |
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# Store the total length of the hand
hand_len = 0
# For every letter in the hand
for key in hand.keys():
# Add the number of times that letter appears in the hand
# to the variable storing hand length
hand_len += hand[key]
# Return the number of letters in the current hand
return hand_len | 297f8af5943bf87bb7999a1212d54430857de12b | 706,043 |
import torch
def valid_from_done(done):
"""Returns a float mask which is zero for all time-steps after a
`done=True` is signaled. This function operates on the leading dimension
of `done`, assumed to correspond to time [T,...], other dimensions are
preserved."""
done = done.type(torch.float)
valid = torch.ones_like(done)
valid[1:] = 1 - torch.clamp(torch.cumsum(done[:-1], dim=0), max=1)
return valid | 0ca2bd0f9e23605091b2f8d1bc15e67e1632b82b | 706,046 |
def rescale_list_to_range(original, limits):
"""
Linearly rescale values in original list to limits (minimum and maximum).
:example:
>>> rescale_list_to_range([1, 2, 3], (0, 10))
[0.0, 5.0, 10.0]
>>> rescale_list_to_range([1, 2, 3], (-10, 0))
[-10.0, -5.0, 0.0]
>>> rescale_list_to_range([1, 2, 3], (0j, 10j))
[0j, 5j, 10j]
:param original: Original list or list-like to be rescaled.
:type original: list
:param limits: Tuple of two floats, min and max, to constrain the new list
:type limits: tuple
:return: Original list rescaled to fit between min and max
:rtype: list
"""
new_min, new_max = limits[0:2]
old_min, old_max = min(original), max(original)
return (new_max + new_min) / 2 * original / old_min if old_min == old_max \
else [new_max * (v - old_min) / (old_max - old_min) +
new_min * (old_max - v) / (old_max - old_min) for v in original] | bdd38bb24b597648e4ca9045ed133dfe93ad4bd8 | 706,047 |
def get_ratings(labeled_df):
"""Returns list of possible ratings."""
return labeled_df.RATING.unique() | 2b88b1703ad5b5b0a074ed7bc4591f0e88d97f92 | 706,048 |
def A004086(i: int) -> int:
"""Digit reversal of i."""
result = 0
while i > 0:
unit = i % 10
result = result * 10 + unit
i = i // 10
return result | b0a65b7e203b7a92f7d6a1846888798c369ac869 | 706,057 |
def add_volume (activity_cluster_df,
activity_counts):
"""Scales log of session counts of each activity and merges into activities dataframe
Parameters
----------
activity_cluster_df : dataframe
Pandas dataframe of activities, skipgrams features, and cluster label from DBSCAN
activity_counts: dictionary
Dictionary (from activities.create_corpus func) of activity and session counts
Returns
-------
pandas dataframe of activities, skipgrams features, x-value, y-value, and activity volume percentiles
"""
assert isinstance(activity_counts, dict) == True, "activity_counts should be a dictionary."
assert len(activity_counts) >= len(activity_cluster_df), "activity_counts must contain the same number or more activity entries than activity_cluster_df."
# Map activities to capture unique session ID acount in activities dataframe
activity_cluster_df['volume_pctl'] = activity_cluster_df.index.map(activity_counts)
# Replace absolute volume with percentile rank integer
activity_cluster_df['volume_pctl'] = activity_cluster_df['volume_pctl'].rank(pct=True) * 100
return activity_cluster_df | 1ea67909e2c48500ca2f022a3ae5ebcbe28da6c8 | 706,059 |
def prefetched_iterator(query, chunk_size=2000):
"""
This is a prefetch_related-safe version of what iterator() should do.
It will sort and batch on the default django primary key
Args:
query (QuerySet): the django queryset to iterate
chunk_size (int): the size of each chunk to fetch
"""
# walk the records in ascending id order
base_query = query.order_by("id")
def _next(greater_than_id):
"""Returns the next batch"""
return base_query.filter(id__gt=greater_than_id)[:chunk_size]
batch = _next(0)
while batch:
item = None
# evaluate each batch query here
for item in batch:
yield item
# next batch starts after the last item.id
batch = _next(item.id) if item is not None else None | e8a8feeea8073161283018f19de742c9425e2f94 | 706,060 |
def skip(line):
"""Returns true if line is all whitespace or shebang."""
stripped = line.lstrip()
return stripped == '' or stripped.startswith('#!') | 4ecfb9c0f2d497d52cc9d9e772e75d042cc0bcce | 706,063 |
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 1, x1:x2] = color
image[y2:y2 + 1, x1:(x2+1)] = color
image[y1:y2, x1:x1 + 1] = color
image[y1:y2, x2:x2 + 1] = color
return image | 4d1e713c6cb6a3297b4f7d8ab9682205947770da | 706,064 |
def do_nothing(ax):
"""Do not add any watermark."""
return ax | 6fbe32dc45ca1a945e1c45bf0319770c4d683397 | 706,066 |
def _scan_real_end_loop(bytecode, setuploop_inst):
"""Find the end of loop.
Return the instruction offset.
"""
start = setuploop_inst.next
end = start + setuploop_inst.arg
offset = start
depth = 0
while offset < end:
inst = bytecode[offset]
depth += inst.block_effect
if depth < 0:
return inst.next
offset = inst.next | 9cff8ab77563a871b86cdbb14236603ec58e04b6 | 706,067 |
from pathlib import Path
def add_filename_suffix(file_path: str, suffix: str) -> str:
"""
Append a suffix at the filename (before the extension).
Args:
path: pathlib.Path The actual path object we would like to add a suffix
suffix: The suffix to add
Returns: path with suffix appended at the end of the filename and before extension
"""
path = Path(file_path)
return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix)) | 546bb95f694ee5d5cb26873428fcac8453df6a54 | 706,069 |
def find_correspondance_date(index, csv_file):
"""
The method returns the dates reported in the csv_file for the i-subject
:param index: index corresponding to the subject analysed
:param csv_file: csv file where all the information are listed
:return date
"""
return csv_file.EXAMDATE[index] | 915b9a493247f04fc1f62e614bc26b6c342783c8 | 706,074 |
import unicodedata
def normalize_to_ascii(char):
"""Strip a character from its accent and encode it to ASCII"""
return unicodedata.normalize("NFKD", char).encode("ascii", "ignore").lower() | 592e59ae10bb8f9a04dffc55bcc2a1a3cefb5e7e | 706,075 |
import functools
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper | 2264ca5978485d8fc13377d17eb84ee522a040b9 | 706,077 |
def arcmin_to_deg(arcmin: float) -> float:
""" Convert arcmin to degree """
return arcmin / 60 | 9ef01181a319c0c48542ac57602bd7c17a7c1ced | 706,078 |
def is_hex_value(val):
"""
Helper function that returns True if the provided value is an integer in
hexadecimal format.
"""
try:
int(val, 16)
except ValueError:
return False
return True | 6ba5ac1cfa9b8a4f8397cc52a41694cca33a4b8d | 706,079 |
def list_to_str(input_list, delimiter=","):
"""
Concatenates list elements, joining them by the separator specified by the
parameter "delimiter".
Parameters
----------
input_list : list
List with elements to be joined.
delimiter : String, optional, default ','.
The separator used between elements.
Returns
-------
String
Returns a string, resulting from concatenation of list's elements, separeted by the delimiter.
"""
return delimiter.join(
[x if isinstance(x, str) else repr(x) for x in input_list]
) | 4decfbd5a9d637f27473ec4a917998137af5ffe0 | 706,080 |
import zlib
import marshal
def serialize(object):
"""
Serialize the data into bytes using marshal and zlib
Args:
object: a value
Returns:
Returns a bytes object containing compressed with zlib data.
"""
return zlib.compress(marshal.dumps(object, 2)) | 650cbc8937df5eae79960f744b69b8b12b623195 | 706,085 |
def str_to_dtype(s):
"""Convert dtype string to numpy dtype."""
return eval('np.' + s) | e0ff793404af5a8022d260fde5878329abbac483 | 706,086 |
def duo_username(user):
""" Return the Duo username for user. """
return user.username | 92b2bfd5f6f3027787db493880139a8564597946 | 706,090 |
def parse_line(line,):
"""Return a list of 2-tuples of the possible atomic valences for a given line from
the APS defining sheet."""
possap = []
for valence, entry in enumerate(line[4:]):
if entry != "*":
possap.append((valence, int(entry)))
return possap | d27ed66cb35084c9927cae8658d7ea8a421c69a4 | 706,091 |
def _gen_off_list(sindx):
"""
Given a starting index and size, return a list of numbered
links in that range.
"""
def _gen_link_olist(osize):
return list(range(sindx, sindx + osize))
return _gen_link_olist | 863ccdc08f6a7cadccc3c5ccfd0cb92a223aadda | 706,097 |
import requests
def get_playlist_object(playlist_url, access_token):
"""
playlist_url : url of spotify playlist
access_token : access token gotten from client credentials authorization
return object containing playlist tracks
"""
playlist_id = playlist_url.split("/")[-1]
playlist_endpoint = f"https://api.spotify.com/v1/playlists/{playlist_id}"
get_header = {
"Authorization" : "Bearer " + access_token
}
# API request
response = requests.get(playlist_endpoint, headers=get_header)
playlist_object = response.json()
return playlist_object | 8c7ed1a1b9574e2e0870d3091452accf5909f982 | 706,100 |
def compact_interval_string(value_list):
"""Compact a list of integers into a comma-separated string of intervals.
Args:
value_list: A list of sortable integers such as a list of numbers
Returns:
A compact string representation, such as "1-5,8,12-15"
"""
if not value_list:
return ''
value_list.sort()
# Start by simply building up a list of separate contiguous intervals
interval_list = []
curr = []
for val in value_list:
if curr and (val > curr[-1] + 1):
interval_list.append((curr[0], curr[-1]))
curr = [val]
else:
curr.append(val)
if curr:
interval_list.append((curr[0], curr[-1]))
# For each interval collapse it down to "first, last" or just "first" if
# if first == last.
return ','.join([
'{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0])
for pair in interval_list
]) | b479b45dc68a0bce9628a19be17185437f3edca6 | 706,103 |
def _follow_word_from_node(node, word):
"""Follows the link with given word label from given node.
If there is a link from ``node`` with the label ``word``, returns the end
node and the log probabilities and transition IDs of the link. If there are
null links in between, returns the sum of the log probabilities and the
concatenation of the transition IDs.
:type node: Lattice.Node
:param node: node where to start searching
:type word: str
:param word: word to search for
:rtype: tuple of (Lattice.Node, float, float, str)
:returns: the end node of the link with the word label (or ``None`` if the
word is not found), and the total acoustic log probability, LM log
probability, and transition IDs of the path to the word
"""
if word not in node.word_to_link:
return (None, None, None, None)
link = node.word_to_link[word]
if link.word is not None:
return (link.end_node,
link.ac_logprob if link.ac_logprob is not None else 0.0,
link.lm_logprob if link.lm_logprob is not None else 0.0,
link.transitions if link.transitions is not None else "")
end_node, ac_logprob, lm_logprob, transitions = \
_follow_word_from_node(link.end_node, word)
if end_node is None:
return (None, None, None, None)
else:
if link.ac_logprob is not None:
ac_logprob += link.ac_logprob
if link.lm_logprob is not None:
lm_logprob += link.lm_logprob
if link.transitions is not None:
transitions += link.transitions
return (end_node, ac_logprob, lm_logprob, transitions) | a21a20ee4ad2d2e90420e30572d41647b3938f4b | 706,108 |
def convert_xrandr_to_index(xrandr_val: float):
"""
:param xrandr_val: usually comes from the
config value directly, as a string (it's
just the nature of directly retrieving
information from a .ini file)
:return: an index representation
of the current brightness level, useful
for switch functions (where we switch
based on indexes and not string values)
Example: 0.2 is converted to 1
"""
return int(xrandr_val * 10 - 1) | eed5f7a6c79f7dcb29c627521d31dc59e5cd430b | 706,111 |
def merge_dict_recursive(base, other):
"""Merges the *other* dict into the *base* dict. If any value in other is itself a dict and the base also has a dict for the same key, merge these sub-dicts (and so on, recursively).
>>> base = {'a': 1, 'b': {'c': 3}}
>>> other = {'x': 4, 'b': {'y': 5}}
>>> want = {'a': 1, 'x': 4, 'b': {'c': 3, 'y': 5}}
>>> got = merge_dict_recursive(base, other)
>>> got == want
True
>>> base == want
True
"""
for (key, value) in list(other.items()):
if (isinstance(value, dict) and
(key in base) and
(isinstance(base[key], dict))):
base[key] = merge_dict_recursive(base[key], value)
else:
base[key] = value
return base | 10ea2bbcf7d2ee330c784efff684974339d48b5d | 706,114 |
def url_path_join(*items):
"""
Make it easier to build url path by joining every arguments with a '/'
character.
Args:
items (list): Path elements
"""
return "/".join([item.lstrip("/").rstrip("/") for item in items]) | d864c870f9d52bad1268c843098a9f7e1fa69158 | 706,115 |
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... print v.check(('test(%s)' % entry), 3)
(3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'test': 'a b c', 'min': '1'})
(3, (), {'test': 'a, b, c', 'min': '5'})
(3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
(3, (), {'test': '-99', 'min': '-100'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
(3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
(3, (), {'test': 'x=fish(3)'})
"""
return (value, args, keywargs) | c011c9386392c4b8dc8034fee33bfcfdec9845ed | 706,119 |
def format_sample_case(s: str) -> str:
"""format_sample_case convert a string s to a good form as a sample case.
A good form means that, it use LR instead of CRLF, it has the trailing newline, and it has no superfluous whitespaces.
"""
if not s.strip():
return ''
lines = s.strip().splitlines()
lines = [line.strip() + '\n' for line in lines]
return ''.join(lines) | cd691f2bfc8cc56db85f2a55ff3bf4b5afd5f30e | 706,120 |
def height(grid):
"""Gets the height of the grid (stored in row-major order)."""
return len(grid) | b90bdb029518cfdaaa4bf93dd77b8996e646b322 | 706,125 |
import inspect
def getNumArgs(obj):
"""Return the number of "normal" arguments a callable object takes."""
sig = inspect.signature(obj)
return sum(1 for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_ONLY or
p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD) | c2e9edef0b2d8c18a0f9e2af90a6a1573705d590 | 706,129 |
def read_text(file, num=False):
""" Read from txt [file].
If [num], then data is numerical data and will need to convert each
string to an int.
"""
with open(file,'r') as f:
data = f.read().splitlines()
if num:
data = [int(i) for i in data]
return data | f9b61d254b1c2188ae6be3b9260f94f0657bcd3a | 706,130 |
import logging
def create_provider_router(neutron_client, project_id):
"""Create the provider router.
:param neutron_client: Authenticated neutronclient
:type neutron_client: neutronclient.Client object
:param project_id: Project ID
:type project_id: string
:returns: Router object
:rtype: dict
"""
routers = neutron_client.list_routers(name='provider-router')
if len(routers['routers']) == 0:
logging.info('Creating provider router for external network access')
router_info = {
'router': {
'name': 'provider-router',
'tenant_id': project_id
}
}
router = neutron_client.create_router(router_info)['router']
logging.info('New router created: %s', (router['id']))
else:
logging.warning('Router provider-router already exists.')
router = routers['routers'][0]
return router | c9eb1de728d141d73c9f7b169df87c01829892f6 | 706,132 |
from typing import List
import shlex
def split(string: str) -> List[str]:
"""
Split string (which represents a command) into a list.
This allows us to just copy/paste command prefixes without having to define a full list.
"""
return shlex.split(string) | 360fceeba7d6280e27068f61d2420cfd9fbfbcc2 | 706,133 |
import re
def remove_useless_lines(text):
"""Removes lines that don't contain a word nor a number.
Args:
text (string): markdown text that is going to be processed.
Returns:
string: text once it is processed.
"""
# Useless lines
useless_line_regex = re.compile(r'^[^\w\n]*$', re.MULTILINE | re.UNICODE)
processed_text = useless_line_regex.sub(r'', text)
return processed_text | fd33cdb243b6887d11846736f922bb4e1332d549 | 706,140 |
import string
def remove_punctuation(list_of_string, item_to_keep=""):
"""
Remove punctuation from a list of strings.
Parameters
----------
- list_of_string : a dataframe column or variable containing the text stored as a list of string sentences
- item_to_keep : a string of punctuation signs you want to keep in text (e.g., '!?.,:;')
"""
# Update string of punctuation signs
if len(item_to_keep) > 0:
punctuation_list = "".join(
c for c in string.punctuation if c not in item_to_keep
)
else:
punctuation_list = string.punctuation
# Remove punctuation from each sentence
transtable = str.maketrans("", "", punctuation_list)
return [sent.translate(transtable) for sent in list_of_string] | cb9190bc160f8e725479b531afab383c6857ceac | 706,141 |
def get_search_keywords(testcase):
"""Get search keywords for a testcase."""
crash_state_lines = testcase.crash_state.splitlines()
# Use top 2 frames for searching.
return crash_state_lines[:2] | 15c1611aeff33f9d8bba843f076b31abfb4023ba | 706,142 |
def make_protein_index(proteins):
"""Indexes proteins
"""
prot_index = {}
skip = set(['sp', 'tr', 'gi', 'ref', ''])
for i, p in enumerate(proteins):
accs = p.accession.split('|')
for acc in accs:
if acc in skip:
continue
prot_index[acc] = i
return prot_index | be54ca3a123fe13efbb8c694187dd34d944fd654 | 706,143 |
def python(cc):
"""Format the character for a Python string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
if codepoint > 0xFFFF:
return "\\U%08x" % codepoint
return "\\u%04x" % codepoint | b0c2042c653043c0831a35ffc13d73850e29af2f | 706,144 |
def reshape_signal_batch(signal):
"""Convert the signal into a standard batch shape for use with cochleagram.py
functions. The first dimension is the batch dimension.
Args:
signal (array): The sound signal (waveform) in the time domain. Should be
either a flattened array with shape (n_samples,), a row vector with shape
(1, n_samples), a column vector with shape (n_samples, 1), or a 2D
matrix of the form [batch, waveform].
Returns:
array:
**out_signal**: If the input `signal` has a valid shape, returns a
2D version of the signal with the first dimension as the batch
dimension.
Raises:
ValueError: Raises an error of the input `signal` has invalid shape.
"""
if signal.ndim == 1: # signal is a flattened array
out_signal = signal.reshape((1, -1))
elif signal.ndim == 2: # signal is a row or column vector
if signal.shape[0] == 1:
out_signal = signal
elif signal.shape[1] == 1:
out_signal = signal.reshape((1, -1))
else: # first dim is batch dim
out_signal = signal
else:
raise ValueError('signal should be flat array, row or column vector, or a 2D matrix with dimensions [batch, waveform]; found %s' % signal.ndim)
return out_signal | 344ce1a9a695e99fa470a5d849afb40bc381c9df | 706,145 |
def map_field_name_to_label(form):
"""Takes a form and creates label to field name map.
:param django.forms.Form form: Instance of ``django.forms.Form``.
:return dict:
"""
return dict([(field_name, field.label)
for (field_name, field)
in form.base_fields.items()]) | dfc2779f498fb479553602a72d9520d398746302 | 706,147 |
import time
def compute(n=26):
""" Computes 2 to the power of n and returns elapsed time"""
start = time.time()
res = 0
for i in range(2**n):
res += 1
end = time.time()
dt = end - start
print(f'Result {res} in {dt} seconds!')
return dt | d816c587302830f0acd20a59905c8634fcf20b49 | 706,148 |
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
} | 5ec6b21b09372e71e6dcf8c60f418bcbc4beee64 | 706,151 |
def pow(x, n):
""" pow(x, n)
Power function.
"""
return x**n | 09d62a68607bf0dab8b380a0c3ee58c6ed4497d6 | 706,153 |
def dot_product(u, v):
"""Computes dot product of two vectors u and v, each represented as a tuple
or list of coordinates. Assume the two vectors are the same length."""
output = 0
for i in range(len(u)):
output += (u[i]*v[i])
return output | 6362776bef32870d3b380aecbb2037483e049092 | 706,154 |
def next_page(context):
"""
Get the next page for signup or login.
The query string takes priority over the template variable and the default
is an empty string.
"""
if "next" in context.request.GET:
return context.request.GET["next"]
if "next" in context.request.POST:
return context.request.POST["next"]
if "next" in context:
return context["next"]
return "" | 6abc1c8ef260366e53f335a27ee42f0356c91b63 | 706,156 |
def add_placeholders(components):
"""Add placeholders for missing DATA/INSTANCE components"""
headers = [s[:2] for s in components]
for prefix in ("CD", "CR"):
if prefix not in headers:
components.append(prefix + ("C" * 11))
return components | 303f1590042acc60aa753e5e317417de01fafafc | 706,161 |
import hashlib
def md5(s, raw_output=False):
"""Calculates the md5 hash of a given string"""
res = hashlib.md5(s.encode())
if raw_output:
return res.digest()
return res.hexdigest() | 238c2a6c6b06a046de86e514698c7ef5622f770b | 706,162 |
def remove_uoms(words):
"""
Remove uoms in the form of e.g. 1000m 1543m3
Parameters
----------
words: list of words to process
Returns
-------
A list of words where possible uom have been removed
"""
returnWords=[]
for word in words:
word=word.replace('.', '', 1)
word=word.replace(',', '', 1)
if word[0:len(word)-1].isnumeric()==False and word[0:len(word)-1].isdecimal()==False:
#we do not have a match on e.g. 1543m
if word[0:len(word)-2].isnumeric()==False and word[0:len(word)-2].isdecimal()==False:
#we do not have a match on e.g. 1543m3
#add it
returnWords.append(word)
return returnWords | cdb2caf274a58b61c57ebe4fba167ec6275ddf6f | 706,165 |
def format_value(v):
"""
Formats a value to be included in a string.
@param v a string
@return a string
"""
return ("'{0}'".format(v.replace("'", "\\'"))
if isinstance(v, str) else "{0}".format(v)) | 8b8d5452ecf938b4e9e9956577f1a3f1102e49bc | 706,166 |
def check_invalid(string,*invalids,defaults=True):
"""Checks if input string matches an invalid value"""
# Checks string against inputted invalid values
for v in invalids:
if string == v:
return True
# Checks string against default invalid values, if defaults=True
if defaults == True:
default_invalids = ['INC','inc','incomplete','NaN','nan','N/A','n/a','missing']
for v in default_invalids:
if string == v:
return True
# For valid strings
return False | 6e9e20beebe8e0b0baed680219fd93453d7f4ce3 | 706,167 |
def get_registry_image_tag(app_name: str, image_tag: str, registry: dict) -> str:
"""Returns the image name for a given organization, app and tag"""
return f"{registry['organization']}/{app_name}:{image_tag}" | 16c71f99ff3a3c2514c24cb417b93f3b88f7cf42 | 706,170 |
def get_output_names(hf):
"""
get_output_names(hf)
Returns a list of the output variables names in the HDF5 file.
Args:
hf: An open HDF5 filehandle or a string containing the HDF5
filename to use.
Returns:
A sorted list of the output variable names in the HDF5 file.
"""
return sorted(map(str, hf['/output/data'].keys())) | 6607197166c9a63d834398b188e996a811b081ce | 706,172 |
def get_all_child_wmes(self):
""" Returns a list of (attr, val) tuples representing all wmes rooted at this identifier
val will either be an Identifier or a string, depending on its type """
wmes = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
wmes.append( (wme.GetAttribute(), wme.ConvertToIdentifier()) )
else:
wmes.append( (wme.GetAttribute(), wme.GetValueAsString()) )
return wmes | fb66aef96ca5fd5a61a34a86052ab9014d5db8a4 | 706,177 |
def has_three_or_more_vowels(string):
"""Check if string has three or more vowels."""
return sum(string.count(vowel) for vowel in 'aeiou') >= 3 | 8b0b683ebe51b18bdc5d6f200b41794a4cb3a510 | 706,180 |
import re
def edit_text_file(filepath: str, regex_search_string: str, replace_string: str):
"""
This function is used to replace text inside a file.
:param filepath: the path where the file is located.
:param regex_search_string: string used in the regular expression to find what has to be replaced.
:param replace_string: the string which will replace all matches found using regex_search_string.
:return: None
:raise RuntimeError: if regex_search_string doesn't find any match.
"""
# open the file and read the content
with open(filepath, "r") as f:
text_file = f.read()
# find all matches
matches = re.finditer(regex_search_string, text_file)
if matches is None:
raise RuntimeError("No match has been found using the given regex_search_string!")
# replace all matches with replace_string
for match in matches:
text_file = text_file.replace(match.group(0), replace_string)
# overwrite the file
with open(filepath, "w") as f:
f.write(text_file)
return None | e0f5945a96f755a9c289262c3d19552c0e1b40fd | 706,183 |
def read_links(title):
"""
Reads the links from a file in directory link_data.
Assumes the file exists, as well as the directory link_data
Args:
title: (Str) The title of the current wiki file to read
Returns a list of all the links in the wiki article with the name title
"""
with open(f"link_data/{title}", "r") as f:
read_data = f.read()
return read_data.split("\n")[:-1] | 50f128bcf4cd36bc783bc848ab2e6b6280973ea3 | 706,185 |
def get_hyperparams(data, ind):
"""
Gets the hyperparameters for hyperparameter settings index ind
data : dict
The Python data dictionary generated from running main.py
ind : int
Gets the returns of the agent trained with this hyperparameter
settings index
Returns
-------
dict
The dictionary of hyperparameters
"""
return data["experiment_data"][ind]["agent_hyperparams"] | 3734f4cf00564a1aa7c852091d366e6e42b6d55b | 706,189 |
def blend0(d=0.0, u=1.0, s=1.0):
"""
blending function trapezoid
d = delta x = xabs - xdr
u = uncertainty radius of xabs estimate error
s = tuning scale factor
returns blend
"""
d = float(abs(d))
u = float(abs(u))
s = float(abs(s))
v = d - u #offset by radius
if v >= s: #first so if s == 0 catches here so no divide by zero below
b = 0.0
elif v <= 0.0:
b = 1.0
else: # 0 < v < s
b = 1.0 - (v / s)
return b | d501db66c34f28421c1517dcd3052fa7b2ee8643 | 706,190 |
def check_win(mat):
"""
Returns either:
False: Game not over.
True: Game won, 2048 is found in mat
"""
if 2048 in mat: # If won, teriminal state is needed for RL agent
return True # Terminal state
else:
return False | 0824bc059cfa32b275c7b63f98d22e8a5b667e06 | 706,191 |
def mtl_to_json(mtl_text):
""" Convert Landsat MTL file to dictionary of metadata values """
mtl = {}
for line in mtl_text.split('\n'):
meta = line.replace('\"', "").strip().split('=')
if len(meta) > 1:
key = meta[0].strip()
item = meta[1].strip()
if key != "GROUP" and key != "END_GROUP":
mtl[key] = item
return mtl | 310be04e9fbf756e9cf5ead60e53aae974d2ed50 | 706,192 |
def str_to_col_grid_lists(s):
"""
Convert a string to selected columns and selected grid ranges.
Parameters:
s: (str) a string representing one solution.
For instance, *3**9 means 2 out of 5 dimensions are selected; the second and the last columns are selected,
and their corresponding grid ranges are 3 and 9. The function will return (1, 4) and (3, 9).
Return:
selected_cols (list): list of columns selected as indicated by the string.
selected_ranges (list): list of grid ranges selected as indicated by the string.
"""
selected_cols, selected_ranges = [], []
for i in range(len(s)):
if s[i] != "*":
selected_cols.append(i)
selected_ranges.append(int(s[i]))
return selected_cols, selected_ranges | 4f5c67afa0dc97070b08223acbe6764010fd213a | 706,194 |
def _join_type_and_checksum(type_list, checksum_list):
"""
Join checksum and their correlated type together to the following format:
"checksums": [{"type":"md5", "checksum":"abcdefg}, {"type":"sha256", "checksum":"abcd12345"}]
"""
checksums = [
{
"type": c_type,
"checksum": checksum,
}
for c_type, checksum in zip(type_list, checksum_list)
]
return checksums | 7f09ee72c6f51ad87d75a9b5e74ad8ef4776323f | 706,195 |
def apparent_attenuation(og, fg):
"""Apparent attenuation
"""
return 100.0 * (float(og) - float(fg)) / float(og) | e22ce07229baa4eacb7388280630d6097e21f364 | 706,203 |
def _p_value_color_format(pval):
"""Auxiliary function to set p-value color -- green or red."""
color = "green" if pval < 0.05 else "red"
return "color: %s" % color | ae58986dd586a1e6cd6b6281ff444f18175d1d32 | 706,207 |
import copy
def filter_parts(settings):
"""
Remove grouped components and glyphs that have been deleted or split.
"""
parts = []
temp = copy.copy(settings['glyphs'])
for glyph in settings['glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
parts.append(glyph)
temp.remove(glyph)
settings['glyphs'] = temp
# Remove from the training glyphs as well
temp2 = copy.copy(settings['training_glyphs'])
for glyph in settings['training_glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
temp2.remove(glyph)
settings['training_glyphs'] = temp2
return parts | f8d6a59eeeb314619fd4c332e2594dee3543ee9c | 706,208 |
def _parse_tree_height(sent):
"""
Gets the height of the parse tree for a sentence.
"""
children = list(sent._.children)
if not children:
return 0
else:
return max(_parse_tree_height(child) for child in children) + 1 | d6de5c1078701eeeb370c917478d93e7653d7f4f | 706,211 |
def create_indices(dims):
"""Create lists of indices"""
return [range(1,dim+1) for dim in dims] | 1a83b59eb1ca2b24b9db3c9eec05db7335938cae | 706,213 |
def destagger(var, stagger_dim, meta=False):
"""Return the variable on the unstaggered grid.
This function destaggers the variable by taking the average of the
values located on either side of the grid box.
Args:
var (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A variable
on a staggered grid.
stagger_dim (:obj:`int`): The dimension index to destagger.
Negative values can be used to choose dimensions referenced
from the right hand side (-1 is the rightmost dimension).
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is False.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`:
The destaggered variable. If xarray is enabled and
the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
var_shape = var.shape
num_dims = var.ndim
stagger_dim_size = var_shape[stagger_dim]
# Dynamically building the range slices to create the appropriate
# number of ':'s in the array accessor lists.
# For example, for a 3D array, the calculation would be
# result = .5 * (var[:,:,0:stagger_dim_size-2]
# + var[:,:,1:stagger_dim_size-1])
# for stagger_dim=2. So, full slices would be used for dims 0 and 1, but
# dim 2 needs the special slice.
full_slice = slice(None)
slice1 = slice(0, stagger_dim_size - 1, 1)
slice2 = slice(1, stagger_dim_size, 1)
# default to full slices
dim_ranges_1 = [full_slice] * num_dims
dim_ranges_2 = [full_slice] * num_dims
# for the stagger dim, insert the appropriate slice range
dim_ranges_1[stagger_dim] = slice1
dim_ranges_2[stagger_dim] = slice2
result = .5*(var[tuple(dim_ranges_1)] + var[tuple(dim_ranges_2)])
return result | 89bb08618fa8890001f72a43da06ee8b15b328be | 706,219 |
from typing import List
from typing import Tuple
def calculateCentroid(
pointCloud : List[Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Calculate centroid of point cloud.
Arguments
--------------------------------------------------------------------------
pointCloud (float 3-tuple list) -- list of xyz coordinates.
Returns
--------------------------------------------------------------------------
centroid (float 3-tuple) -- centroid of points in point cloud.
"""
numPoints = len(pointCloud)
x, y, z = [], [], []
for point in pointCloud:
x.append(point[0])
y.append(point[1])
z.append(point[2])
x, y, z = sum(x) / numPoints, sum(y) / numPoints, sum(z) / numPoints
return x, y, z | 0e8d6d578a0a983fe1e68bff22c5cc613503ee76 | 706,220 |
import torch
from typing import Tuple
def get_median_and_stdev(arr: torch.Tensor) -> Tuple[float, float]:
"""Returns the median and standard deviation from a tensor."""
return torch.median(arr).item(), torch.std(arr).item() | d8fca5a97f00d14beecaa4b508442bc7a3637f86 | 706,226 |
def generate_arn(service, arn_suffix, region=None):
"""Returns a formatted arn for AWS.
Keyword arguments:
service -- the AWS service
arn_suffix -- the majority of the arn after the initial common data
region -- the region (can be None for region free arns)
"""
arn_value = "arn"
aws_value = "aws"
region_qualified = region if region else ""
return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}" | 53dcf55c3fb15784770d1c2d62375d1e750469f8 | 706,229 |
def prod_list(lst):
"""returns the product of all numbers in a list"""
if lst:
res = 1
for num in lst:
res *= num
return res
else:
raise ValueError("List cannot be empty.") | 8179e2906fb4b517d02972fd4647095d37caf6cd | 706,230 |
from typing import Dict
from typing import List
import pathlib
import json
def json_loader(path_to_json_file: str) -> Dict[str, List[str]]:
"""Reads a JSON file and converts its content in a dictionary.
Parameters
----------
path_to_json_file: str
The path to the JSON file.
Returns
-------
Dict[str, List[str]]
A dictionary of source codes with the corresponding lists of instrument symbols of
interest for each source.
"""
with pathlib.Path(path_to_json_file).open('r') as infile:
return json.loads(infile.read()) | d3f26504078e72e1522981a4d8ca5b60c3b8cf23 | 706,233 |
def rucklidge(XYZ, t, k=2, a=6.7):
"""
The Rucklidge Attractor.
x0 = (0.1,0,0)
"""
x, y, z = XYZ
x_dt = -k * x + y * (a - z)
y_dt = x
z_dt = -z + y**2
return x_dt, y_dt, z_dt | 9d10aa89fb684a95474d45399ae09a38b507913c | 706,242 |
def _none_or_int_or_list(val):
"""Input conversion - expecting None, int, or a list of ints"""
if val is None:
return None
elif isinstance(val, list):
return list(map(int, val))
else:
return int(val) | 1958c64175a1cd63f8a42044b40b84d7cf8baed2 | 706,248 |
def hour_of_day(datetime_col):
"""Returns the hour from a datetime column."""
return datetime_col.dt.hour | 18b2f6e16ccbcb488f3863968466fda14f669d8b | 706,249 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.