content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _union_all(iterables):
"""Return a set representing the union of all the contents of an
iterable of iterables.
"""
out = set()
for iterable in iterables:
out.update(iterable)
return out | 673bc7493007c6cf781d84490023cea7139f1e93 | 8,828 |
from typing import List
def construct_relative_positions(pos: int, max_length: int) -> List[int]:
"""Construct relative positions to a specified pos
Args:
pos: the pos that will be `0`
max_length: max sequence length
Returns:
a list of relative positions
Raises:
ValueError: if pos is less than 0 or greater equal than max_length
"""
if pos < 0 or pos >= max_length:
raise ValueError(f"pos: {pos} is not in [0, {max_length})")
positions = list(range(0, max_length, 1))
positions = list(map(lambda x: abs(x - pos), positions))
return positions | 152c59d288f797ef87f9e0dbf1b415b71f1fe9e7 | 8,829 |
import pathlib
def _match_path(p1, p2):
"""Compare two paths from right to left and return True if they could refer
to the same file.
As a special case, if the second argument is None, or empty, it is always
considered a match. This simplifies query logic when the target does not
have a path component.
If p2 starts with "./" then the paths must math entirely. This to allow
addressing in the case where a path is a prefix of another.
"""
if not p2:
return True
part1 = pathlib.Path(p1).parts
part2 = pathlib.Path(p2).parts
if p2.startswith(".") and part2 and not part2[0].startswith("."):
minlen = 0
else:
minlen = min(len(part1), len(part2))
return part1[-minlen:] == part2[-minlen:] | 7935e4312c444c9e2d0ee62611e1db5d6af210ad | 8,832 |
def add_matches(flight_matches, flight_ids):
"""
Add new matches to the flight_ids dict.
Returns the number of newly matched flights.
"""
matches = 0
for i in flight_matches.index:
prev_id = flight_matches.loc[i, 'FLIGHT_ID_x']
next_id = flight_matches.loc[i, 'FLIGHT_ID_y']
if next_id not in flight_ids:
flight_ids[next_id] = prev_id
matches += 1
return matches | 93d086cd580ac13622c4acaa359fa2a65b718ff3 | 8,845 |
def get_requirements_from_file(requirements_file):
"""
Get requirements from file.
:param str req_file: Name of file to parse for requirements.
:return: List of requirements
:rtype: list(str)
"""
requirements = []
with open(requirements_file) as f:
for line in f:
line = line.strip()
if line and not line.startswith(("#", "-e")):
requirements.append(line)
return requirements | 5ec6ad1f4c2b22aae1cfa7eb3888b3279ffeca31 | 8,846 |
from typing import Tuple
from typing import List
def extract_encoded_headers(payload: bytes) -> Tuple[str, bytes]:
"""This function's purpose is to extract lines that can be decoded using the UTF-8 decoder.
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\n".encode("utf-8"))
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'')
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\nThat IS totally random.".encode("utf-8"))
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'That IS totally random.')
"""
result: str = ""
lines: List[bytes] = payload.splitlines()
index: int = 0
for line, index in zip(lines, range(0, len(lines))):
if line == b"":
return result, b"\r\n".join(lines[index + 1 :])
try:
result += line.decode("utf-8") + "\r\n"
except UnicodeDecodeError:
break
return result, b"\r\n".join(lines[index + 1 :]) | d1f3a371419b81b0e7ede1a7f90401cf7a89559f | 8,847 |
def filter_ignore(annotations, filter_fns):
""" Set the ``ignore`` attribute of the annotations to **True** when they do not pass the provided filter functions.
Args:
annotations (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of annotations
filter_fns (list or fn): List of filter functions that get applied or single filter function
Returns:
(dict or list): boxes after filtering
"""
if callable(filter_fns):
filter_fns = [filter_fns]
if isinstance(annotations, dict):
for _, values in annotations.items():
for anno in values:
if not anno.ignore:
for fn in filter_fns:
if not fn(anno):
anno.ignore = True
break
else:
for anno in annotations:
if not anno.ignore:
for fn in filter_fns:
if not fn(anno):
anno.ignore = True
break
return annotations | f59e6c481eb744245ae9503ae07ed88d1f3f8253 | 8,850 |
def update_dict_params_for_calibration(params):
"""
Update some specific parameters that are stored in a dictionary but are updated during calibration.
For example, we may want to update params['default']['compartment_periods']['incubation'] using the parameter
['default']['compartment_periods_incubation']
:param params: dict
contains the model parameters
:return: the updated dictionary
"""
if "n_imported_cases_final" in params:
params["data"]["n_imported_cases"][-1] = params["n_imported_cases_final"]
for location in ["school", "work", "home", "other_locations"]:
if "npi_effectiveness_" + location in params:
params["npi_effectiveness"][location] = params["npi_effectiveness_" + location]
for comp_type in [
"incubation",
"infectious",
"late",
"hospital_early",
"hospital_late",
"icu_early",
"icu_late",
]:
if "compartment_periods_" + comp_type in params:
params["compartment_periods"][comp_type] = params["compartment_periods_" + comp_type]
return params | 8aaf9cb030076adfddb7c8d5740a2c8cc5c21c06 | 8,851 |
def counting_sort(numbers):
"""Sort given numbers (integers) by counting occurrences of each number,
then looping over counts and copying that many numbers into output list.
Running time: O(n + k) where k is the range of numbers, because if k is really high then affects the run time significantly.
Memory usage: O(k) because the number of total arrays is equal to the value of k"""
# creating a temp array with 0, times however many based on the max value of numbers + 1 b/c indexing starts at 1
temp_array = [0] * (max(numbers) + 1)
# loop through numbers
for num in numbers:
# if the temp array's index of that number is 0 - meaning it's empty then set it to be 1
if temp_array[num] == 0:
temp_array[num] = 1
# else there's already something in there so just add one to it
else:
temp_array[num] += 1
numbers = []
# loop through the temp_array
for y in range(len(temp_array)):
# if it's index is not equal to 0 then we add the count; and looping until it's index value is 0
while temp_array[y] != 0:
numbers.append(y)
temp_array[y] -= 1
return numbers | 7123a215f685ff251c13cfd5210fe1887fe3795f | 8,852 |
import math
import statistics
def _get_average_da(streams_da: dict) -> dict:
"""Calculate average data availability among all data streams"""
total_results = {}
for k, v in streams_da.items():
for i, j in v.items():
if i not in total_results:
total_results[i] = []
total_results[i].append(j)
return {k: math.ceil(statistics.mean(v)) for k, v in total_results.items()} | db2fde9e13b4cbb5ce43d5f3c2d2ff2abd30f487 | 8,857 |
def is_sequencing(lane_info):
"""
Determine if we are just sequencing and not doing any follow-up analysis
"""
if lane_info['experiment_type'] in ('De Novo','Whole Genome'):
return True
else:
return False | 822125f8603969a4624e07188874aae40f8752d3 | 8,860 |
def get_cluster_name(tags):
"""
Get the cluster name from the list of specified tags
:param tags: tags
:type tags: [str]
:returns: cluster name
:rtype: str
"""
for tag in tags:
if tag.startswith("storm.cluster.name:"):
return tag.replace("storm.cluster.name:", "")
return None | 2b811f32d5c61bb093d6a68fcaecddbdce3be057 | 8,862 |
from typing import BinaryIO
import io
import wave
import struct
def fixture_two_chunk_plain_wav() -> BinaryIO:
"""Creates a fixture WAVE file with two distinct sections.
The audio is 100Hz mono. Each section 10 samples long. Samples in the first
alternate between +/-(1 << 5) and in the second between +/-(1 << 10).
Returns:
File-like object with the bytes of the fixture WAVE file, positioned at the
beginning.
"""
sample_rate = 100
chunk_duration_samples = 10
plain_wav_io = io.BytesIO()
with wave.open(plain_wav_io, 'wb') as writer:
writer.setnchannels(1)
writer.setsampwidth(2)
writer.setframerate(sample_rate)
signs = [pow(-1, i) for i in range(chunk_duration_samples)]
for magnitude in [(1 << 5), (1 << 10)]:
writer.writeframes(
struct.pack('<%dh' % len(signs), *[magnitude * s for s in signs]))
plain_wav_io.seek(0)
return plain_wav_io | 3c6d06409b40228348c3e5697b8fdc1b9bc73c90 | 8,863 |
def _testProduct_to_dict(product_obj, ctx):
"""
Returns testProduct instance in dict format.
Args:
product_obj (_TestProduct): testProduct instance.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
dict: product_obj as a dictionary.
"""
return {"productId": product_obj.product_id,
"productName": product_obj.name,
"price": product_obj.price,
"tags": product_obj.tags,
"dimensions": product_obj.dimensions,
"warehouseLocation": product_obj.location} | 7db80ae68cb6966273e53a4f0fb2d9aad52fa119 | 8,869 |
def hxltm_hastag_de_csvhxlated(csv_caput: list) -> list:
"""hxltm_hastag_de_csvhxlated [summary]
Make this type of conversion:
- 'item__conceptum__codicem' => '#item+conceptum+codicem'
- 'item__rem__i_ara__is_arab' => '#item+rem+i_ara+is_arab'
- '' => ''
Args:
csv_caput (list): Array of input items
Returns:
[list]:
"""
resultatum = []
for item in csv_caput:
if len(item):
resultatum.append('#' + item.replace('__', '+').replace('?', ''))
else:
resultatum.append('')
return resultatum | 1ab1503c26c86c969e699236f97842ae74ae0ae5 | 8,870 |
import pickle
def load_pickle(file, decompress=True):
"""
Load a .pickle file.
:param file: file .pickle to load.
:param decompress: the compress or not the file
:return: loaded data.
"""
with open(file, "rb") as f:
if decompress:
data = pickle.load(f)
else:
data = f.read()
return data | ce86a034c87ddd3a74de40465d60cb2f55d1089c | 8,877 |
def shp2geojsonOgr(layer):
"""Shapefile to Geojson conversion using ogr."""
cmd = 'ogr2ogr -f GeoJSON -t_srs'\
+ ' crs:84'\
+ ' {layer}.geojson'\
+ ' {layer}.shp'
cmd = cmd.format(layer=layer)
return cmd | a1bbf42d83cf9d26542c02eb1a16da971a7d0a9e | 8,882 |
def lut_canonical_potential_edge(potential_edge):
"""Returns a canonical name of a potential edge, with respect to LUT height.
Parameters
----------
potential_edge : str
Instantiated name of the potential edge to be canonicized.
Returns
-------
str
A canonical potential edge.
"""
prefix, u, v = potential_edge.split("__")
lut_span = int(v.split('_')[1]) - int(u.split('_')[1])
if lut_span < 0:
offset_str = "lutm%d_" % abs(lut_span)
else:
offset_str = "lutp%d_" % abs(lut_span)
canonical = "__".join([prefix, '_'.join(u.split('_')[2:]),\
offset_str + '_'.join(v.split('_')[2:])])
return canonical | ccae7b98de4aa18a2ffa72c0faf6b0fe7b001db0 | 8,883 |
def encode_boolean(value):
"""
Returns 1 or 0 if the value is True or False.
None gets interpreted as False.
Otherwise, the original value is returned.
"""
if value is True:
return 1
if value is False or value is None:
return 0
return value | 4a6442438d3a7e85597ac76d2f48ce44ba505be2 | 8,885 |
def associate_node_id(tr, node=""):
"""
Returns a dictionary with key 'id' and value as the ID associated
with the node string.
"""
return {"id": tr.get_uml_id(name=node)} | 5e6eb1076cdeed9abc8b00d1de60a255f6292dd3 | 8,891 |
import re
def find_nonAscii(text):
""" Return the first appearance of a non-ASCII character (in a `Match` object), or `None`. """
regex = re.compile(r'([^\x00-\x7F])+')
return re.search(regex, text) | bc299752eab5088214f9e1f62add388bf0721153 | 8,895 |
def linear_search(arr, x):
"""
Performs a linear search
:param arr: Iterable of elements
:param x: Element to search for
:return: Index if element found else None
"""
l = len(arr)
for i in range(l):
if arr[i] == x:
return i
return None | 2cb03eef6c9bb1d63df97c1e387e9cbfe703769a | 8,896 |
def out_labels(G, q):
"""Returns a list of each of the labels appearing on the edges
starting at `q` in `G`.
Parameters
----------
G : labeled graph
q : vertex in `G`
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1, 2, label="a")
>>> G.add_edge(1, 3, label="a")
>>> G.add_edge(1, 1, label="b")
>>> sd.out_labels(G, 1)
['a', 'a', 'b']
"""
return [label for (_, _, label) in G.out_edges(q, data="label")] | 9849b96b562c74259b631907335a40f807e11709 | 8,900 |
def convert_tf_config_to_jax_bert(config):
"""Convert TF BERT model config to be compatible with JAX BERT model.
Args:
config: dictionary of TF model configurations
Returns:
dictionary of param names and values compatible with JAX BERT model
"""
unnecessary_keys = ['initializer_range', 'backward_compatible',
'embedding_size']
for key in unnecessary_keys:
if key in config:
config.pop(key)
# change TF parameter names to match JAX parameter names
mapping = {
'attention_dropout_rate': 'attention_probs_dropout_prob',
'hidden_activation': 'hidden_act',
'dropout_rate': 'hidden_dropout_prob',
'emb_dim': 'hidden_size',
'mlp_dim': 'intermediate_size',
'max_len': 'max_position_embeddings',
'num_heads': 'num_attention_heads',
'num_layers': 'num_hidden_layers'
}
for jax_key, tf_key in mapping.items():
config[jax_key] = config.pop(tf_key)
return config | 2e527dbdbef404ebf3015eca2aa9eea2b9d892e0 | 8,902 |
def find_sequences_before(context, strip):
"""
Returns a list of sequences that are before the strip in the current context
"""
return [s for s in context.sequences if s.frame_final_end <= strip.frame_final_start] | d49a950c06c2a92d076d9790055c21d30afdd627 | 8,903 |
def get_percent_alloc(values):
"""
Determines a portfolio's allocations.
Parameters
----------
values : pd.DataFrame
Contains position values or amounts.
Returns
-------
allocations : pd.DataFrame
Positions and their allocations.
"""
return values.divide(
values.sum(axis='columns'),
axis='rows'
) | 7f4ec48b2adbdb812292930e7fda50038b6d5e96 | 8,906 |
def format_latex(ss):
"""
Formats a string so that it is compatible with Latex.
:param ss: The string to format
:type ss: string
:return: The formatted string
:rtype: string
"""
tt = (str(ss).replace('_', ' ')
.replace('%', '\%')
)
return tt | 5081e65375faf592f2f1fb52d11b0dcee99fa85f | 8,909 |
def MatchingFileType(file_name, extensions):
"""Returns true if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)]) | 5fe5121d270cdfc13f6f9f3c72471fc3572b0efe | 8,911 |
def escape_path(value: bytes) -> str:
"""
Take a binary path value, and return a printable string, with special
characters escaped.
"""
def human_readable_byte(b: int) -> str:
if b < 0x20 or b >= 0x7F:
return "\\x{:02x}".format(b)
elif b == ord(b"\\"):
return "\\\\"
return chr(b)
return "".join(human_readable_byte(b) for b in value) | 07a0c28cd531d8e3bd4330afe1d4d51265cd80c4 | 8,912 |
def raw_reward_threshold(threshold):
"""Return a reward processor that cut off at a threshold."""
def fn(metadata):
if metadata['raw_reward'] > threshold:
return 1.
elif metadata['raw_reward'] > 0:
return -1
return metadata['raw_reward']
return fn | 1efbd90c352d99c6e65b05214d8ccb82bb155606 | 8,915 |
import socket
def get_hostname() -> str:
"""
Get the current hostname, or fall back to localhost.
"""
try:
return socket.getfqdn()
except:
return 'localhost' | c53bd9fae0fbbae0c0b4f84e64064d7bfd2fd61e | 8,917 |
from typing import Dict
import pkgutil
import io
def _load_categories(filepath: str) -> Dict[str, str]:
"""Load data for domain category matching.
Args:
filepath: relative path to csv file containing domains and categories
Returns:
Dictionary mapping domains to categories
"""
data = pkgutil.get_data(__name__, filepath)
if not data:
raise FileNotFoundError(f"Couldn't find file {filepath}")
content = io.TextIOWrapper(io.BytesIO(data), encoding='utf-8')
categories = {}
for line in content.readlines():
domain, category = line.strip().split(',')
categories[domain] = category
return categories | 25ec3c4808d4a9624112e277c0597c868a12572c | 8,922 |
def get_execution_platform(command, filename):
"""
<Purpose>
Returns the execution platform based on a best-guess approach using
the specified command, as well as the a file's extension. The
command takes precedence over the file extension. If the extension
is not recognized, then it will be assumed that it is repyV2.
<Arguments>
command: The command that should be parsed.
filename: The file whose repy version should be returned.
<Side Effects>
None
<Exceptions>
None
<Returns>
A string indicating which version of repy a program is in, based on
its file extension. This will be either "v1" or "v2".
"""
if command.endswith('v2'):
return 'repyV2'
elif command.endswith('v1'):
return 'repyV1'
# Information on extensions for repy programs can be found on #1286.
if filename.endswith('.r2py'):
return 'repyV2'
else:
return 'repyV1' | ceb6afab191269b032bc6122978f630682cac9ca | 8,928 |
import torch
def bucketize(tensor, bucket_boundaries):
"""Equivalent to numpy.digitize
Notes
-----
Torch does not have a built in equivalent yet. I found this snippet here:
https://github.com/pytorch/pytorch/issues/7284
"""
result = torch.zeros_like(tensor, dtype=torch.int32)
for boundary in bucket_boundaries:
result += (tensor > boundary).int()
return result | ee48e11de50e52278ddf940e32c04e330dceed97 | 8,935 |
import re
import inspect
def _get_task_path(wrapped, instance) -> str:
"""Get the synthetic URL path for a task, based on the `wrapt` parameters."""
funcname = wrapped.__name__
if funcname.startswith("_") and not funcname.endswith("_"):
funcname = re.sub(r"^_+", repl="", string=funcname, count=1)
if instance is None:
return funcname
else:
if inspect.isclass(instance):
return "/".join([instance.__name__, funcname])
else:
return "/".join([instance.__class__.__name__, funcname]) | 16ca96d29abddfa104afc5a0ec466e0bd1d202dc | 8,936 |
def make_email(slug):
"""Get the email address for the given slug"""
return '{}@djangogirls.org'.format(slug) | f07dc679d4ee2d3e13939e5b13897b98766f5037 | 8,939 |
def s_input(prompt : str = ">", accepted_inputs : list = ["break"], case_sensitive : bool = False, fail_message : str = "") -> str:
"""Keeps asking for user input until the answer is acceptable.
Args:
prompt (str, optional): User is prompted with this each time. Defaults to ">".
accepted_inputs (list, optional): List of inputs that allows the user to continue. Defaults to ["break"].
case_sensitive (bool, optional): Whether or not the input is case sensitive. Defaults to False.
fail_message (str, optional): The message to print when the input is invalid. Leave blank for no message.
Returns:
str: The valid user input. Will be lowercase if case_sensitive is False.
"""
user_input = ""
first = True #For checking if the fail message should print or not
while user_input not in accepted_inputs:
if fail_message != "" and not first:
print(fail_message) #Prints the assigned fail message if it isn't the first iteration
user_input = input(prompt) #Gets user input
if not case_sensitive:
user_input = user_input.lower() #Sets the input to lower if needed
first = False #Ensures that it is not the first iteration anymore
return user_input | 8adda3fefe9111167af387e569d080e88e239e4e | 8,941 |
import requests
from bs4 import BeautifulSoup
def priprav_bs(url, params):
"""BeautifulSoup z celé stránky
url: str
params: dict
Vrátí: bs4.BeautifulSoup
"""
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:80.0) Gecko/20100101 Firefox/80.0'}
r = requests.get(url, params=params, headers=headers)
r.encoding = 'utf-8'
return BeautifulSoup(r.text, 'html.parser') | 30775e56960829413211524d615ca0dea6bc8b0c | 8,945 |
from functools import reduce
def solve(ar):
"""
Given an array of 5 integers, return the minimal and maximal sum of 4 out of
5 of the integers.
"""
# Just sort the list of integers in place and take the sum of the first 4
# then the last 4.
ar.sort()
minSum = reduce((lambda x, y: x + y), ar[0:4])
maxSum = reduce((lambda x, y: x + y), ar[1:5])
return (minSum, maxSum) | 68d650c51cbe611c51c0b5754c61b541cb1838f8 | 8,946 |
def calculate_border(grid_dims, width, height):
"""Calculate each line in all borders.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
width: float width in pixels
height: float height in pixels
Returns:
list: containing dictionaries keys `(x, y)` and values for the two points for each line in grid
"""
return [
{
'x': [c_idx * width] * 2,
'y': [0, height * grid_dims[0]],
} for c_idx in range(grid_dims[1] + 1)
] + [
{
'x': [0, width * grid_dims[1]],
'y': [r_idx * height] * 2,
} for r_idx in range(grid_dims[0] + 1)
] | b4ec0e063034547783e871abf1a46d943648df67 | 8,947 |
def _parse_face(face_row):
"""Parses a line in a PLY file which encodes a face of the mesh."""
face = [int(index) for index in face_row.strip().split()]
# Assert that number of vertices in a face is 3, i.e. it is a triangle
if len(face) != 4 or face[0] != 3:
raise ValueError(
'Only supports face representation as a string with 4 numbers.')
return face[1:] | c0cf7472705544c3089a6c1c82190bcb8bd5f463 | 8,948 |
def get_mark(name, task):
"""Getting marks of students for certain student and task"""
return int(input('Mark for {}, task {} > '.format(name, task))) | 00d8a0bf1ab97f600a3e6d2c8f488563419a95e1 | 8,959 |
def metadata_parser(f):
"""
Parses a metadata file into dictionary.
The metadata file is expected to have the following format:
id;name;dtype
where:
- id denotes packet id (unsigned char or 1 byte uint)
- name is the data channel name (str)
- dtype is expected datatype (str)
:param f: A file object with the path to metadata.
:type f: file object
:return: metadata, a dict where id and name is one-to-one, and both
are keywords.
"""
metadata = {'ids': {}, 'names': {}}
for line in f:
line = line.strip().split(';')
if (line[0] in metadata['ids'] or line[1] in metadata['names']):
print('Warning: overlapping entry on id %s or name "%s"' % (line[0], line[1]))
entry = {
'id': int(line[0]),
'name': line[1],
'type': line[2]
}
metadata['ids'][line[0]] = entry
metadata['names'][line[1]] = entry
return metadata | 91ccec2a0231f35e0693173e67bfda5498f941f5 | 8,969 |
def fio_json_output_with_error(fio_json_output):
"""
Example of fio --output-format=json output, with io_u error. Based on
actual test run.
"""
err_line = (
"fio: io_u error on file /mnt/target/simple-write.0.0: "
"No space left on device: write offset=90280222720, buflen=4096"
)
return err_line + "\n" + fio_json_output | babfcd242a47091dc9b8acd29f24b6ebb398c679 | 8,970 |
def has_equal_properties(obj, property_dict):
"""
Returns True if the given object has the properties indicated by the keys of the given dict, and the values
of those properties match the values of the dict
"""
for field, value in property_dict.items():
try:
if getattr(obj, field) != value:
return False
except AttributeError:
return False
return True | d96b17124121af5db31c9db096b5010aff01b233 | 8,972 |
def _GetCoveredBuilders(trybot_config):
"""Returns a dict mapping masters to lists of builders covered in config."""
covered_builders = {}
for master, builders in trybot_config.iteritems():
covered_builders[master] = builders.keys()
return covered_builders | e759be62c1c57045dca98e40f83beda6a7ddf7e5 | 8,974 |
def lookup(cubeWithData, cubeWithMap, sharedIndex):
"""
Returns the value of cubeWithData indexed by the index of cubeWithMap.
cubeWithData must be indexed by sharedIndex and cubeWithData values must correspond to elements of sharedIndex.
For example: Let's say you have a cube with an estimated inflation rate by Country ("inflation_rate" is the name of the cube; "country" is the name of the index) and you want to assign it to the corresponding Company depending on its location. On the other hand, there's a many-to-one map where each Company is allocated to a single Country ("country_to_company_allocation"). The sharedIndex, in this case, is Country ("country").
As a result,
cp.lookup( inflation_rate , country_to_company_allocation , country )
will return the estimated inflation rate by Company.
"""
_final_cube = ((cubeWithMap == sharedIndex) * cubeWithData).sum(sharedIndex)
return _final_cube | 979e4c3be85be484d1deb3ef48b78dae9f0527cf | 8,979 |
def notimplemented(f):
"""Takes a function f with a docstring and replaces it with a function which
raises NotImplementedError(f.__doc__). Useful to avoid having to retype
docstrings on methods designed to be overridden elsewhere."""
def wrapper(self,*args,**kws):
raise NotImplementedError(f.__doc__)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper | eefdee57d0ebb0727e9238bc7f678d90b36100a6 | 8,980 |
def get_channel_index(image, label):
"""
Get the channel index of a specific channel
:param image: The image
:param label: The channel name
:return: The channel index (None if not found)
"""
labels = image.getChannelLabels()
if label in labels:
idx = labels.index(label)
return idx
return None | 3980e83f61ac755f1fbcadef27964a405a0aaf31 | 8,981 |
def parse_both_2(image_results):
""" parses the tags and repos from a image_results with the format:
{
'image': [{
'pluginImage': {
'ibmContainerRegistry': 'internalRepo/name'
'publicRegistry': 'repo/name'
},
'driverImage': {
'ibmContainerRegistry': 'internalRepo/name'
'publicRegistry': 'repo/name'
},
'pluginBuild': 'X.X.X',
'driverBuild': 'X.X.X',
'pullPolicy': 'Always'
}],
'pluginImage': [{EXACT SAME CONTENTS AS ABOVE}],
'driverImage': [{EXACT SAME CONTENTS AS ABOVE}]
}
Current known apps with this format:
ibm-object-storage-plugin
"""
tags = []
repos = []
image_info = image_results['image'][0]
for k, v in image_info.items():
if "Build" in k:
tags.append(v)
elif "Image" in k:
repos.append(v['publicRegistry'])
return tags, repos | 020cde41855d3bca26797cd9786e2733a50b6a00 | 8,983 |
def get_bit(byte, bit_num):
""" Return bit number bit_num from right in byte.
@param int byte: a given byte
@param int bit_num: a specific bit number within the byte
@rtype: int
>>> get_bit(0b00000101, 2)
1
>>> get_bit(0b00000101, 1)
0
"""
return (byte & (1 << bit_num)) >> bit_num | 4f25c4ccdc4c3890fb4b80d42d90bfb94d6799c3 | 8,985 |
def authorize_payment(payment):
"""Activate client's payment authorization page from a PayPal Payment."""
for link in payment.links:
if link.rel == "approval_url":
# Convert to str to avoid Google App Engine Unicode issue
approval_url = str(link.href)
return approval_url | e489a1a2029535a8400f427a523665c49c872ef6 | 8,987 |
import ipaddress
def is_ip_address(ipaddr):
""" Simple helper to determine if given string is an ip address or subnet """
try:
ipaddress.ip_interface(ipaddr)
return True
except ValueError:
return False | 56abc5a1a82f6a2e0c7532182867fdfae76a3b89 | 8,992 |
import json
def isjson(value):
"""
Return whether or not given value is valid JSON.
If the value is valid JSON, this function returns ``True``, otherwise ``False``.
Examples::
>>> isjson('{"Key": {"Key": {"Key": 123}}}')
True
>>> isjson('{ key: "value" }')
False
:param value: string to validate JSON
"""
try:
decoded_json = json.loads(value)
except ValueError:
return False
return True | 0527a07500337c8ce8e39a428c71556d6e91c5dd | 8,993 |
import logging
def get_child_logger(*names: str):
"""Returns a child logger of the project-level logger with the name toolshed.<name>."""
return logging.getLogger("toolshed." + '.'.join(names)) | a1760d34a620ffa3caf8abaca6cfb911209cf074 | 8,996 |
def find_correct_weight(program_weights, program, correction):
"""Return new weight for node."""
return program_weights[program] + correction | 994c25efef10fa37971372f444a879e816708830 | 8,998 |
def get_instance_id(finding):
"""
Given a finding, go find and return the corresponding AWS Instance ID
:param finding:
:return:
"""
for kv in finding['attributes']:
if kv['key'] == 'INSTANCE_ID':
return kv['value']
return None | f4f6826dc02664b95ca8fdc91d89a6429192b871 | 9,002 |
def dVdc_calc(Vdc,Ppv,S,C):
"""Calculate derivative of Vdc"""
dVdc = (Ppv - S.real)/(Vdc*C)
return dVdc | 59d2708726e078efb74efce0bac2e397ba846d89 | 9,004 |
def check_order(df, topcol, basecol, raise_error=True):
"""
Check that all rows are either depth ordered or elevation_ordered.
Returns 'elevation' or 'depth'.
"""
assert basecol in df.columns, f'`basecol` {basecol} not present in {df.columns}'
if (df[topcol] > df[basecol]).all():
return 'elevation'
elif (df[topcol] < df[basecol]).all():
return 'depth'
elif raise_error:
raise ValueError('Dataframe has inconsistent top/base conventions')
else:
return None | 9b4e7b9938bb2fe14ab99d5c111883a0f6d73337 | 9,005 |
def _get_sdk_name(platform):
"""Returns the SDK name for the provided platform.
Args:
platform: The `apple_platform` value describing the target platform.
Returns:
A `string` value representing the SDK name.
"""
return platform.name_in_plist.lower() | 0bc7f446472f44e52ea0b11cda7397e48848f0ef | 9,009 |
import pathlib
def file_exists(file_path):
""" Returns true if file exists, false if it doesnt """
file = pathlib.Path(file_path)
return file.is_file() | d8219f71cf891d2d4e9c95670bd90b957becfdc5 | 9,011 |
import hashlib
import json
def hasher(obj):
"""Returns non-cryptographic hash of a JSON-serializable object."""
h = hashlib.md5(json.dumps(obj).encode())
return h.hexdigest() | 967ba4a1513bbe4a191900458dfce7a1001a8125 | 9,012 |
def _root_sort_key(root):
"""
Allow root comparison when sorting.
Args:
root (str or re.Pattern): Root.
Returns:
str: Comparable root string.
"""
try:
return root.pattern
except AttributeError:
return root | 51a7e51b58cbdf8c3277844903950282a5368815 | 9,013 |
def get_layer(keras_tensor):
"""
Returns the corresponding layer to a keras tensor.
"""
layer = keras_tensor._keras_history[0]
return layer | 6b3c950d9bf9c81895c4e7d4d436cd48359143bd | 9,014 |
import random
def subsample_files_in_tree(root, filename_pattern, size):
"""
Sub-sample list of filenames under root folder.
This ensures to keep sample balance among folders.
Arguments:
root: Root folder to search files from.
filename_pattern: Wildcard pattern like: '*.png'.
size:
(0, 1): size to sub-sample; 0.5 for 50%.
1 or 1.: 100%.
integer > 1: Number of samples.
Returns:
List of sub-sampled files.
Note that number of files in a folder could be less than size,
if original number of files is less than size. No oversampling.
"""
files = []
folders = [f for f in root.glob('**') if f.is_dir()]
for folder in folders:
candidates = [str(f) for f in folder.glob(filename_pattern)]
n_sample = int(len(candidates) * size) if size < 1. else \
len(candidates) if int(size) == 1 else min(size, len(candidates))
if n_sample <= 0: continue
files.extend(random.sample(candidates, n_sample))
return files | 6bffdf683071d712f0b1ccb382a145a74f642d24 | 9,016 |
import random
def pick_card(deck_to_pick_from):
"""Returns a random card from the deck"""
return random.choice(deck_to_pick_from) | 2267058ed9833d7b67dbc3142c98a88a4e3cefb3 | 9,019 |
def split_apt(field):
"""
Parses the ADDRESS field (<site address>, <apt number> <municipality>) from the CLEMIS CFS Report
and returns the apartment number.
"""
if ',' in field:
f = field.split(', ')
f = f[1]
f = f.split(' ')
apt = f[0]
else:
apt = None
return apt | 881f73ebe3de52ebd3ff31448ad488e2586be5bf | 9,022 |
def get_inverse_transform(transform):
"""Generates a transform which is the inverse of the provided transform"""
inverse_transform = [0] * len(transform)
for i in range(len(transform)):
inverse_transform[transform[i]] = i
return inverse_transform | 80d577292c98a84eecbcfb84cef935245385b63b | 9,028 |
def extract_fields(gh_json, fields):
"""
extract_fields Extract field from GH API data
Extract fields from GH API data and standardize name of keys
Parameters
----------
gh_json : json
JSON content from Github
fields : dict
A list of fields to extract and the name we want to use as standard.
"""
data = list()
for entry in gh_json:
cell = dict()
for field in fields:
cell[fields[field]] = entry[field]
data.append(cell)
return data | 0c14128c6e400075b982e0eb92eca65d329d6b5d | 9,030 |
from functools import reduce
def sum(l):
"""
Returns the sum of the items in the container class.
This is more general than the build-in 'sum' function, because it is not specific for numbers.
This function uses the '+' operator repeatedly on the items in the contrainer class.
For example, if each item is a list, this will return the concatenation of all of them
"""
return reduce(lambda x,y: x+y, l) | c64bc8aec1af669af69494aa37fd515d3d7efad5 | 9,036 |
import re
def get_version(versionfile):
"""Extract the __version__ from a given Python module."""
match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(versionfile).read(), re.M)
if match:
return match.group(1)
else:
raise RuntimeError("Unable to find version string in {file}.".format(file=versionfile)) | f319b575d74e3ecea3895785e1101f72913488ec | 9,039 |
import socket
def hostname_resolves(hostname):
"""Checks to see if hostname is DNS resolvable"""
try:
socket.gethostbyname(hostname)
return 1
except socket.error:
return 0 | 7339b03da62863d109c543e85f04eace1261a31e | 9,040 |
import math
def _gain2db(gain):
"""
Convert linear gain in range [0.0, 1.0] to 100ths of dB.
Power gain = P1/P2
dB = 10 log(P1/P2)
dB * 100 = 1000 * log(power gain)
"""
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log10(min(gain, 1))), 0)) | 1bd602e0db397b3730c4f2b3439aeb351e6bd854 | 9,041 |
import random
def generate_int(data_format):
"""
Generate an integer based on the given data width and sign.
"""
is_signed = data_format['is_signed']
width = data_format['width']
if is_signed:
result = random.randrange(-2 ** (width - 1) + 1, 2 ** (width - 1) - 1)
else:
result = random.randrange(0, 2 ** width - 1)
return result | 644d8e71b949ff01290d357732509d1f0a62db08 | 9,042 |
def mock_run_applescript(script):
"""Don't actually run any applescript in the unit tests, ya dingbat.
This function should return whatever type of object
dialogs._run_applescript returns.
Returns:
tuple
"""
return (1, "", "") | fdcb8e1e0e283963cec55c8fa1d98e745bd5e784 | 9,043 |
import re
def _defaults_to_code(val):
"""
Make sure that any defaults that are surrounded by << >> are in code quotes so that they render properly.
e.g.: <<display_name>> converts to '<<display_name>>'
"""
return re.sub(r"(<{2}.*>{2})", r"`\1`", val) | f98aa716fab13143a29659ff746336913d9d4ee7 | 9,045 |
def total_schedule(schedule):
"""Return the total number of 15 minute windows in which the schedule
is set to replicate in a week. If the schedule is None it is
assumed that the replication will happen in every 15 minute
window.
This is essentially a bit population count.
"""
if schedule is None:
return 84 * 8 # 84 bytes = 84 * 8 bits
total = 0
for byte in schedule:
while byte != 0:
total += byte & 1
byte >>= 1
return total | 9c0231a0f6e2e4617b5c958ea337420f73811309 | 9,047 |
from typing import List
from functools import reduce
def product(li: List[int]) -> int:
"""Calculates the product of all the numbers in a list.
doctests:
>>> product([2, 1, 4])
8
>>> product([3, 1, 4, 2, 5, 8])
960
"""
x = reduce(lambda a, b: a * b, li)
return x | 8afe00bb6056accc694ab955a48b6be85d8a30bf | 9,050 |
from typing import Optional
from typing import List
def _convert_names(
names, max_levels: Optional[int] = None, err_msg: Optional[str] = None
) -> List[str]:
"""Helper function that converts arguments of index, columns, values to list.
Also performs check on number of levels. If it exceeds `max_levels`,
raise ValueError with `err_msg`.
"""
result = None
if isinstance(names, str):
result = [names]
elif isinstance(names, list):
result = names
else:
result = list(names)
if max_levels and len(result) > max_levels:
raise ValueError(err_msg)
return result | d67fb93b039306e7dac973abffe1e08089993c0d | 9,059 |
def is_MC(parcels):
"""
Dummy for Maricopa County.
"""
return (parcels.county == 'MC').astype(int) | 6e8af2675f1ba40d642ada0d07e133aeb9dd0d70 | 9,060 |
def objScale(obj,factor):
"""
Object scaling function, gets obj and scale factor, returns an array of the scaled size
"""
oldSize = obj.get_size()
newSize = []
for i in oldSize:
newSize.append(int(i/float(factor)))
return newSize | 3104fc4e126299400a5a119fff0d8bc9d3ea32f7 | 9,061 |
def coloring(color, text):
"""Print a text in a specified color"""
color_sequences = {
'default': '\033[0m',
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m',
'lightblue': '\033[36m',
'white': '\033[37m',
}
return color_sequences[color] + text + color_sequences['default'] | 3953d72329a01453f52fd099bb20624c7661aa87 | 9,067 |
import math
def haversine(rad):
"""
Returns the haversine function of an angle in radians.
"""
return (1 - math.cos(rad)) / 2 | 294c901795aa499c42f3d67e6d6a3d5efecd46a8 | 9,070 |
import math
def _realroots_quadratic(a1, a0):
"""gives the real roots of x**2 + a1 * x + a0 = 0"""
D = a1*a1 - 4*a0
if D < 0:
return []
SD = math.sqrt(D)
return [0.5 * (-a1 + SD), 0.5 * (-a1 - SD)] | ad61307a09b9f5cbf444f0bd75448b39b09b2e96 | 9,074 |
def __prepare_line(string, dir_source, replace_string):
"""
Prepare the line before it is being written into the content file
"""
if not replace_string == None:
string = string.replace(dir_source, replace_string)
return string | cbec6deab5c66960c5e8d57b52392e4ed3cf2b3d | 9,081 |
def find_closest_raster(return_period,aoi_col='AoI_RP{}y_unique',depth_col='RP{}_max_flood_depth'):
"""
Find the closest AoI and Flood raster column name for given return period
Arguments:
*return_period* (float): Return period of the flood for which to find the nearest inundation raster
*aoi_col* (str): the format of the column name to find, default can be changed to anything in G.es.attributes
*depth_col* (str): the format of the column name to find, default can be changed in G.es.attributes
Returns:
*aoi_col* (str): e.g. 'AoI_RP10y_majority'
*depth_col* (str) : e.g. 'RP500_max_flood_depth'
"""
assert return_period > 0
available_rps = [10,20,50,100,200,500]
nearest = min(available_rps, key=lambda x:abs(x-return_period))
#note the difference in notation: AoI: 'RP10...', flood: 'RP010...'
aoi_col = aoi_col.format(nearest)
if len(str(nearest)) == 2: # e.g. RP10 -> RP010
depth_col = depth_col.format('0'+str(nearest))
elif len(str(nearest)) == 3: # e.g. RP200 -> RP200
depth_col = depth_col.format(nearest)
else:
raise ValueError('Does not know how to handle value nearest = {}, valid are e.g. 10, 500'.format(nearest))
return aoi_col, depth_col | 177041afc9a52d4942ab4095b7383cfc8e17652b | 9,083 |
def _is_bn_diff_doctypes(dyad):
"""Check if a dyad is between two different doctypes.
Args:
dyad (tuple): two-item tuple where each item is a dict which represents a document
Returns:
ind (bool): True if the dyad is between two different doctypes
"""
if dyad[0]["doctype"] != dyad[1]["doctype"]:
ind = True
else:
ind = False
return ind | 2480cbca808164b2fec14fd13808cf5ebfb0dcc3 | 9,084 |
def line(char='-', length=48):
"""Generates a string of characters with a certain length"""
return ''.join([char for _ in range(length)]) | 32de8abb95ab7e73912e2b37f0996361ed181c5b | 9,087 |
import logging
def group_by_size(input_tensors, bytes_per_pack):
"""Groups `input_tensors` into chunks of `bytes_per_pack`.
The method preserves the original order of `input_tensors`. The grouping is
best effort, each pack could have more or less bytes than `bytes_per_pack`.
It only groups values with known shape.
Args:
input_tensors: a list of Tensor.
bytes_per_pack: an integer.
Returns:
A list of packs of Tensor. All values are grouped into one pack if
`bytes_per_pack` is zero or any of the value has unknown shape.
"""
if bytes_per_pack == 0:
return [input_tensors]
packs = []
last_pack_size = 0
for value in input_tensors:
num_elements = value.shape.num_elements()
if num_elements is None:
# Can't pack values with unknown shape.
logging.warning(
'not packing values due to the unknown or inconsistent shape of %s',
value)
return [input_tensors]
size = num_elements * value.dtype.size
# Try to keep each pack as close to bytes_per_pack as possible, while each
# pack is at least bytes_per_pack large. I.E. we err on the side of having
# few but large packs.
if not packs or last_pack_size > bytes_per_pack:
packs.append([])
last_pack_size = 0
packs[-1].append(value)
last_pack_size += size
return packs | 9ab5805898678b1541f116e5ef5ae1b9a1c42791 | 9,088 |
def adapters(text):
"""
Parse lines of text into a list of adapters (represented by their joltage),
supplemented by the outlet (0) and your device (maximum + 3).
"""
adapters = list(sorted(map(int, text.splitlines())))
adapters = [0] + adapters + [max(adapters) + 3]
return adapters | cb5aa44963506e8d0ea6aa0aeb89d094bfbb0bc8 | 9,089 |
def is_empty_line(line: str) -> bool:
"""Checks whether a line is empty."""
return line.strip("\n").strip("\t").strip() == "" | ad58cc78e5f25353419682343c34c21e2679304d | 9,096 |
def _is_tarfile(filename):
"""Returns true if 'filename' is TAR file."""
return (filename.endswith(".tar") or filename.endswith(".tar.gz") or
filename.endswith(".tgz")) | 761b776e0e8078ddd4bee694e0a9d853dd2e31fd | 9,098 |
def distance_diff_catl(ra, dist, gap):
"""
Computes the necessary distance between catalogues
Parameters
-----------
ra: float
1st distance
dist: float
2nd distance
Returns
-----------
dist_diff: float
amount of distance necessary between mocks
"""
ra = float(ra)
dist = float(dist)
gap = float(gap)
## Calculation of distance between catalogues
dist_diff = (((ra + gap)**2 - (dist/2.)**2.)**(0.5)) - ra
return dist_diff | 2a523d1c9c132dc8fcb65bd8d633bf24fcf46f42 | 9,100 |
def short_information(title, index=0):
"""
Takes in track information and returns everything as a short formatted String.
Args:
title (str): track title string
index (str): optional track number string
Returns:
A short formatted string of all track information.
"""
if " - " in title:
split_title = str(title).split(" - ", 1)
if index:
return "{} {}".format(index, split_title[1])
else:
return "{}".format(split_title[1])
else:
if index:
return "{} {}".format(index, title)
else:
return title | 6754af1f2327eb5d9f37f4d25aa4f808d4793553 | 9,104 |
def add_css_file_extension(name):
"""
Appends the CSS file extension to a string.
:return: name with '.css' append at the end append at the end
:rType: string
"""
return '%s.css' % name | fbe4569e4660cc4145bac36a5ea88ae87ec4c319 | 9,105 |
def graph_to_entities_json(g):
"""
Converts the given graph to entities JSON.
:param g: a graph
:return: an array of JSON
"""
entities = []
for u, v in g.edges():
entity = {
"Device": "",
"IP": "",
"Identity": "",
"Location": "",
"Cookie": ""
}
source = g.nodes()[u]
target = g.nodes()[v]
entity[source["label"]] = source["id"]
entity[target["label"]] = target["id"]
entities.append(entity)
return entities | ef790764c9e6ff4f652c41a5af1d5da3e4d98733 | 9,111 |
def add_frame_div_parent(cell_info):
"""
Adds the frame a cells parent divides on to cell info.
Args:
cell_info (dict): dict that maps cells to cell info
Returns:
dict: cell info with added frame_div_parent
"""
new_info = cell_info.copy()
for info in new_info.values():
if info['parent']:
parent = info['parent']
info['frame_div_parent'] = new_info[parent]['frame_div']
else:
info['frame_div_parent'] = None
return new_info | 585feeaaf2a353ea2481cda41d547a004ecb8adc | 9,117 |
def validate_metadata(metadata, parameters):
"""validate metatdata.
Ensure metadata references parameter workload_context,
and that it is a string.
Return error message string or None if no errors.
"""
for value in metadata.values():
if isinstance(value, dict):
if "get_param" in value:
if value["get_param"] == "workload_context":
wc = parameters.get("workload_context", {})
if wc.get("type") == "string":
break
else:
return (
'must have parameter "workload_context"' ' of type "string"'
)
break
else:
return None | 177a1133bacd9e7560be9604cd03542eaf5944ff | 9,123 |
def flux(component):
"""Determine flux in every channel
Parameters
----------
component: `scarlet.Component` or array
Component to analyze or its hyperspectral model
"""
if hasattr(component, "get_model"):
model = component.get_model()
else:
model = component
return model.sum(axis=(1, 2)) | b95b0aa926ee2cc2c78e90c425b47f04bc0a4d4c | 9,127 |
import _ast
def BinOpMap(operator):
"""Maps operator strings for binary operations to their _ast node."""
op_dict = {
'+': _ast.Add,
'-': _ast.Sub,
'*': _ast.Mult,
'**': _ast.Pow,
'/': _ast.Div,
'//': _ast.FloorDiv,
'%': _ast.Mod,
'<<': _ast.LShift,
'>>': _ast.RShift,
'|': _ast.BitOr,
'&': _ast.BitAnd,
'^': _ast.BitXor,
}
return op_dict[operator]() | 0b332b1043b31b123daf8812e6f2ecb4e3974f19 | 9,128 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.