content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def can_translate(user):
"""Checks if a user translate a product"""
return user.permissions['perm_translate'] | c6797346d8bd61637927af808bb9355a9220a91e | 702,590 |
import torch
def mask_finished_scores(score, flag):
"""
If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score
and the rest -inf score.
Args:
score: A real value array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A real value array with shape [batch_size * beam_size, beam_size].
"""
beam_width = score.size(-1)
zero_mask = torch.zeros_like(flag, dtype=torch.bool)
if beam_width > 1:
unfinished = torch.cat(
(zero_mask, flag.repeat([1, beam_width - 1])), dim=1)
finished = torch.cat(
(flag.bool(), zero_mask.repeat([1, beam_width - 1])), dim=1)
else:
unfinished = zero_mask
finished = flag.bool()
score.masked_fill_(unfinished, -float('inf'))
score.masked_fill_(finished, 0)
return score | 87d5d8fb45a44c54cd690280ce0baf0c4fe8dab5 | 702,597 |
import itertools
def args_combinations(*args, **kwargs):
"""
Given a bunch of arguments that are all a set of types, generate all
possible possible combinations of argument type
args is list of type or set of types
kwargs is a dict whose values are types or set of types
"""
def asset(v):
if isinstance(v, set):
return v
else:
return {v}
keys = list(kwargs.keys())
for curr_args in itertools.product(*[asset(a) for a in args]):
for curr_kwargs in itertools.product(*[asset(kwargs[k]) for k in keys]):
yield curr_args, {k: v for (k, v) in zip(keys, curr_kwargs)} | 8e90ce285322bd17a97e4bdb75e230f7015f4b2d | 702,598 |
def RGBStringToList(rgb_string):
"""Convert string "rgb(red,green,blue)" into a list of ints.
The purple air JSON returns a background color based on the air
quality as a string. We want the actual values of the components.
Args:
rgb_string: A string of the form "rgb(0-255, 0-255, 0-255)".
Returns:
list of the 3 strings representing red, green, and blue.
"""
return rgb_string[4:-1].split(',') | f94650ed977b5a8d8bb85a37487faf7b665f2e76 | 702,600 |
import six
def _metric_value(value_str, metric_type):
"""
Return a Python-typed metric value from a metric value string.
"""
if metric_type in (int, float):
try:
return metric_type(value_str)
except ValueError:
raise ValueError("Invalid {} metric value: {!r}".
format(metric_type.__class__.__name__, value_str))
elif metric_type is six.text_type:
# In Python 3, decode('unicode_escape) requires bytes, so we need
# to encode to bytes. This also works in Python 2.
return value_str.strip('"').encode('utf-8').decode('unicode_escape')
else:
assert metric_type is bool
lower_str = value_str.lower()
if lower_str == 'true':
return True
if lower_str == 'false':
return False
raise ValueError("Invalid boolean metric value: {!r}".format(value_str)) | 39a3b0e5bfe2180e1897dd87872f8e08925e8847 | 702,601 |
def getMDistance(plug):
"""
Gets the MDistance value from the supplied plug.
:type plug: om.MPlug
:rtype: om.MDistance
"""
return plug.asMDistance() | 43cd8dfd2c698ad1cc88771c2c69f3b5e502f202 | 702,604 |
def splitFragP(uriref, punct=0):
"""split a URI reference before the fragment
Punctuation is kept.
e.g.
>>> splitFragP("abc#def")
('abc', '#def')
>>> splitFragP("abcdef")
('abcdef', '')
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i:]
else:
return uriref, "" | cc179fd8f064f3e87f18a9968f6f98ff0d584eb6 | 702,608 |
import yaml
import re
def create_kubeconfig_for_ssh_tunnel(kubeconfig_file, kubeconfig_target_file):
"""
Creates a kubeconfig in which the Server URL is modified to use a locally set up SSH tunnel. (using 127.0.0.1 as an address)
Returns a tuple consisting of:
- the original IP/Servername of the K8s API
- the original Port of the K8s API
"""
with open (kubeconfig_file, "r") as f:
kubeconfig = yaml.load(f.read(), Loader=yaml.FullLoader)
original_server_address = kubeconfig["clusters"][0]["cluster"]["server"]
address_pattern = re.compile('https://([^:]*):([0-9]+)')
match = address_pattern.match(original_server_address)
if not match:
print('Error: No API address found in kubeconfig')
exit(1)
original_api_hostname = match.group(1)
original_api_port = match.group(2)
kubeconfig["clusters"][0]["cluster"]["server"] = f"https://127.0.0.1:{original_api_port}"
with open (kubeconfig_target_file, "w") as f:
f.write(yaml.dump(kubeconfig, default_flow_style=False))
f.close()
return (original_api_hostname, original_api_port) | 39c85681486abda0008a040ad13a37032fc182b5 | 702,611 |
def get_lines_from_file(loc):
"""Reads the file and returns a list with every line.
Parameters:
loc (str): location of the file.
Returns:
list: list containing each of the lines of the file.
"""
f = open(loc)
result= [line.replace("\n", "") for line in f]
f.close()
return result | c05101b94e459346adae553e31d25d46a8475514 | 702,616 |
def GetBuildShortBaseName(target_platform):
"""Returns the build base directory.
Args:
target_platform: Target platform.
Returns:
Build base directory.
Raises:
RuntimeError: if target_platform is not supported.
"""
platform_dict = {
'Windows': 'out_win',
'Mac': 'out_mac',
'Linux': 'out_linux',
'Android': 'out_android',
'NaCl': 'out_nacl'
}
if target_platform not in platform_dict:
raise RuntimeError('Unkown target_platform: ' + (target_platform or 'None'))
return platform_dict[target_platform] | 0bbbad4de3180c2ea51f5149cc3c2417a22b63e9 | 702,618 |
from pathlib import Path
def dir_files(path, pattern="*"):
"""
Returns all files in a directory
"""
if not isinstance(path, Path):
raise TypeError("path must be an instance of pathlib.Path")
return [f for f in path.glob(pattern) if f.is_file()] | 5dbeeec6fe72b70381afb52dcbbea55613a37d49 | 702,620 |
def _norm_args(norm):
"""
Returns the proper normalization parameter values.
Possible `norm` values are "backward" (alias of None), "ortho",
"forward".
This function is used by both the builders and the interfaces.
"""
if norm == "ortho":
ortho = True
normalise_idft = False
elif norm is None or norm == "backward":
ortho = False
normalise_idft = True
elif norm == "forward":
ortho = False
normalise_idft = False
else:
raise ValueError(f'Invalid norm value {norm}; should be "ortho", '
'"backward" or "forward".')
return dict(normalise_idft=normalise_idft, ortho=ortho) | e781c894c9d333fdbdf326120b1417b44dfc5181 | 702,622 |
import torch
def all_pair_iou(boxes_a, boxes_b):
"""
Compute the IoU of all pairs.
:param boxes_a: (n, 4) minmax form boxes
:param boxes_b: (m, 4) minmax form boxes
:return: (n, m) iou of all pairs of two set
"""
N = boxes_a.size(0)
M = boxes_b.size(0)
max_xy = torch.min(boxes_a[:, 2:].unsqueeze(1).expand(N, M, 2), boxes_b[:, 2:].unsqueeze(0).expand(N, M, 2))
min_xy = torch.max(boxes_a[:, :2].unsqueeze(1).expand(N, M, 2), boxes_b[:, :2].unsqueeze(0).expand(N, M, 2))
inter_wh = torch.clamp((max_xy - min_xy + 1), min=0)
I = inter_wh[:, :, 0] * inter_wh[:, :, 1]
A = ((boxes_a[:, 2] - boxes_a[:, 0] + 1) * (boxes_a[:, 3] - boxes_a[:, 1] + 1)).unsqueeze(1).expand_as(I)
B = ((boxes_b[:, 2] - boxes_b[:, 0] + 1) * (boxes_b[:, 3] - boxes_b[:, 1] + 1)).unsqueeze(0).expand_as(I)
U = A + B - I
return I / U | 1ca948e4a16016efa694d97c4829fcdfbc29e20d | 702,623 |
def builddict(fin):
"""
Build a dictionary mapping from username to country for all classes.
Takes as input an open csv.reader on the edX supplied file that lists
classname, country, and username and returns a dictionary that maps from
username to country
"""
retdict = {}
for course, country, username in fin:
if username not in retdict:
retdict[username] = country
return retdict | ddf9272e0da6616abd0495b7b159807a36a83dcc | 702,628 |
import collections
def node_degree_counter(g, node, cache=True):
"""Returns a Counter object with edge_kind tuples as keys and the number
of edges with the specified edge_kind incident to the node as counts.
"""
node_data = g.node[node]
if cache and 'degree_counter' in node_data:
return node_data['degree_counter']
degree_counter = collections.Counter()
for node, neighbor, key in g.edges(node, keys=True):
node_kind = node_data['kind']
neighbor_kind = g.node[neighbor]['kind']
edge_kind = node_kind, neighbor_kind, key
degree_counter[edge_kind] += 1
if cache:
node_data['degree_counter'] = degree_counter
return degree_counter | 08c08f240e3170f4159e72bc7e69d99b69c37408 | 702,631 |
async def get_account_id(db, name):
"""Get account id from account name."""
return await db.query_one("SELECT find_account_id( (:name)::VARCHAR, True )", name=name) | 3dd6b46abd8726eb34eb4f8e1850dc56c3632e5c | 702,632 |
from typing import Any
def is_a_string(v: Any) -> bool:
"""Returns if v is an instance of str.
"""
return isinstance(v, str) | f729f5784434ef255ea9b2f0ca7cdfbf726e7539 | 702,634 |
def str_to_dict(
text: str,
/,
*keys: str,
sep: str = ",",
) -> dict[str, str]:
"""
Parameters
----------
text: str
The text which should be split into multiple values.
keys: str
The keys for the values.
sep: str
The separator for the values.
Returns
-------
dict[str, str]
"""
values = text.split(sep)
return {key: value for key, value in zip(keys, values)} | 0b34ea1b47d217929fd9df760231f4786150e661 | 702,640 |
def get_data_type(name):
"""Extract the data type name from an ABC(...) type name."""
return name.split('(', 1)[0] | 7565b30e1e2469de929b377fde1f186d28080f94 | 702,644 |
def moving_average(time_series, window_size=20, fwd_fill_to_end=0):
"""
Computes a Simple Moving Average (SMA) function on a time series
:param time_series: a pandas time series input containing numerical values
:param window_size: a window size used to compute the SMA
:param fwd_fill_to_end: index from which computation must stop and propagate last value
:return: Simple Moving Average time series
"""
if fwd_fill_to_end <= 0:
sma = time_series.rolling(window=window_size).mean()
else:
sma = time_series.rolling(window=window_size).mean()
sma[-fwd_fill_to_end:] = sma.iloc[-fwd_fill_to_end]
'''
Moving average feature is empty for the first *n* days, where *n* is the window size,
so I'll use some backfill to fill NaN values
'''
sma.fillna(method='backfill', inplace=True)
return sma | d71931867c419f306824e8b240a9b1bb3fff2fdd | 702,645 |
from pathlib import Path
from typing import Sequence
def load_input(path: Path) -> Sequence[int]:
"""Loads the input data for the puzzle."""
with open(path, "r") as f:
depths = tuple(int(d) for d in f.readlines())
return depths | 35472eadcd2deefbbae332b3811be7d731cb2478 | 702,646 |
def set_accuracy_83(num):
"""Reduce floating point accuracy to 8.3 (xxxxx.xxx).
:param float num: input number
:returns: float with specified accuracy
"""
return float("{:8.3f}".format(num)) | fd1818a81ea7a78c296a85adc3621ab77fbad230 | 702,650 |
def load_file(path):
"""Loads file and return its content as list.
Args:
path: Path to file.
Returns:
list: Content splited by linebreak.
"""
with open(path, 'r') as arq:
text = arq.read().split('\n')
return text | 348d57ab3050c12181c03c61a4134f2d43cd93cd | 702,656 |
from typing import Type
import enum
def _enum_help(msg: str, e: Type[enum.Enum]) -> str:
"""
Render a `--help`-style string for the given enumeration.
"""
return f"{msg} (choices: {', '.join(str(v) for v in e)})" | e53762798e0ecb324143ee4a05c4152eaf756aad | 702,657 |
from typing import OrderedDict
def array_remove_duplicates(s):
"""removes any duplicated elements in a string array."""
return list(OrderedDict.fromkeys(s)) | ea5a0d620139e691db99f364c38827abe39a16f5 | 702,658 |
import socket
def get_ipv4_for_hostname(hostname, static_mappings={}):
"""Translate a host name to IPv4 address format.
The IPv4 address is returned as a string, such as '100.50.200.5'.
If the host name is an IPv4 address itself it is returned unchanged.
You can provide a dictionnary with static mappings.
Following mappings are added by default:
'127.0.0.1' => '127.0.0.1'
'localhost' => '127.0.0.1'
'localhost.localdomain' => '127.0.0.1'
Args:
hostname (string): hostname.
static_mappings (dict): dictionnary of static mappings
((hostname) string: (ip) string).
Returns:
(string) IPv4 address for the given hostname (None if any problem)
"""
hostname = hostname.lower()
static_mappings.update({'127.0.0.1': '127.0.0.1', 'localhost': '127.0.0.1',
'localhost.localdomain': '127.0.0.1'})
if hostname in static_mappings:
return static_mappings[hostname]
try:
return socket.gethostbyname(hostname)
except Exception:
return None | fd28106380c6a6d2c353c0e8103f15df264117ef | 702,660 |
def season_ts(ds, var, season):
""" calculate timeseries of seasonal averages
Args: ds (xarray.Dataset): dataset
var (str): variable to calculate
season (str): 'DJF', 'MAM', 'JJA', 'SON'
"""
## set months outside of season to nan
ds_season = ds.where(ds['time.season'] == season)
# calculate 3month rolling mean (only middle months of season will have non-nan values)
ds_season = ds_season[var].rolling(min_periods=3, center=True, time=3).mean()
# reduce to one value per year
ds_season = ds_season.groupby('time.year').mean('time')
# FUTURE: remove first year if it has nan?
return ds_season | 6d5b0ddc39762ceca42de6b9228c38f4bf365cd0 | 702,661 |
def getSubString(string, firstChar, secondChar,start=0):
"""
Gives the substring of string between firstChar and secondChar. Starts looking from start. If it is unable to find a substring returns an empty string.
"""
front = string.find(firstChar,start)
back = string.find(secondChar,front+1)
if front > -1 and back > -1:
return (string[front+1:back],back)
else:
return ("",-1) | c845c3c31abce7ed8064cfd16e455c27f1aac806 | 702,662 |
def factorial_iter(num: int) -> int:
"""
Return the factorial of an integer non-negative number.
Parameters
----------
num : int
Raises
------
TypeError
if num is not integer.
ValueError
if num is less than zero.
Returns
-------
int
"""
if not isinstance(num, int):
raise TypeError("an integer number is required")
if num < 0:
raise ValueError("a non-negative integer number is required")
product = 1
for factor in range(2, num + 1):
product *= factor
return product | 36ec433bf02bdef0770f9f9b86feff9afa995eb3 | 702,663 |
def update_Q(Qsa, Qsa_next, reward, alpha, gamma):
""" updates the action-value function estimate using the most recent time step """
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa)) | f5f54d8e8b9f67c1145d967fd731ea37a4aa1e57 | 702,664 |
def get_diff(l1, l2):
"""
Returns the difference between two lists.
"""
diff = list( list(set(l1) - set(l2)) + list(set(l2) - set(l1)) )
return diff | 16b2083fcd4f61cb86c563ea6773b64e6fbe6006 | 702,668 |
def identical_prediction_lists(prev_prediction_list, curr_prediction_list):
"""
Check if two predictions lists are the same
:param prev_prediction_list: list Last iterations predictions
:param curr_prediction_list: list current iterations predictions
:return: bool false = not identical, true = identical
"""
for x, y in zip(prev_prediction_list, curr_prediction_list):
if x != y:
return False
return True | b74140acb3c5529fb804710d4fd3bfd64bb4f009 | 702,669 |
def reverse_domain_from_network(ip_network):
"""Create reverse DNS zone name from network address (ipaddress.IPv4Network)"""
prefixlen = ip_network.prefixlen
if prefixlen % 8:
return ip_network.reverse_pointer # classless
else:
return ip_network.network_address.reverse_pointer[(4 - (prefixlen / 8)) * 2:] | 0b0b7bb6bc72cae6625e9bd024c9692253d55c88 | 702,682 |
from bs4 import BeautifulSoup
def get_anchor_href(markup):
"""
Given HTML markup, return a list of hrefs for each anchor tag.
"""
soup = BeautifulSoup(markup, 'lxml')
return ['%s' % link.get('href') for link in soup.find_all('a')] | ec75f36e0b14a1d20452a1b6c1233d789c03cd6b | 702,683 |
async def get_asterisk_chan(response_json):
"""Get the Asterisk Channel from the JSON responses"""
if response_json["type"] in ["PlaybackStarted", "PlaybackFinished"]:
return str(response_json.get("playback", {}).get("target_uri")).split(":")[1]
else:
return response_json.get("channel", {}).get("id") | 951ccc3cdea92cfb630eb24bbd9e2f2333a72f1e | 702,684 |
def massage_error_code(error_code):
"""Massages error codes for cleaner exception handling.
Args:
int Error code
Returns:
int Error code. If arg is not an integer, will change to 999999
"""
if type(error_code) is not int:
error_code = 999999
return error_code | c9c42e71aa4684e79078673baa9a7ecfe627a169 | 702,687 |
import random
def randomPartition(elems: list, bin_sizes: list) -> list:
"""
Randomly partition list elements into
bins of given sizes
"""
def shuffleList(elems):
random.shuffle(elems)
return elems
elems = shuffleList(elems)
partition = []
start, end = 0, 0
for bin_size in bin_sizes:
end += bin_size
partition.append(elems[start:end])
start += bin_size
return partition | 52d1d16639fa0a4566423255bb29c123879282cb | 702,688 |
def pre_process(line):
"""
Return line after comments and space.
"""
if '#' in line:
line = line[:line.index('#')]
stripped_data = line.strip()
return stripped_data | 63640048cb07376fb73b62cb6b6d2049adec5c17 | 702,689 |
def file_tag_from_task_file(file: str, cut_num_injs: bool = False) -> str:
"""Returns the file tag from a task output filename.
Args:
file: Filename of task file with or without path.
cut_num_injs: Whether to not include the number of injections per redshift bin.
"""
file_tag = file.replace("_TASK_", "results_").split("results_")[1]
if cut_num_injs:
return file_tag.split("_INJS-PER-ZBIN_")[0]
else:
return file_tag | 2ba0180c1a06f4ae6e47659e3598c2207cca5642 | 702,691 |
import torch
def cmc_score_count(
distances: torch.Tensor, conformity_matrix: torch.Tensor, topk: int = 1
) -> float:
"""
Function to count CMC from distance matrix and conformity matrix.
Args:
distances: distance matrix shape of (n_embeddings_x, n_embeddings_y)
conformity_matrix: binary matrix with 1 on same label pos and 0 otherwise
topk: number of top examples for cumulative score counting
Returns:
cmc score
Examples:
.. code-block:: python
import torch
from catalyst import metrics
metrics.cmc_score_count(
distances=torch.tensor([[1, 2], [2, 1]]),
conformity_matrix=torch.tensor([[0, 1], [1, 0]]),
topk=1,
)
# 0.0
.. code-block:: python
import torch
from catalyst import metrics
metrics.cmc_score_count(
distances=torch.tensor([[1, 0.5, 0.2], [2, 3, 4], [0.4, 3, 4]]),
conformity_matrix=torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
topk=2,
)
# 0.33
"""
perm_matrix = torch.argsort(distances)
position_matrix = torch.argsort(perm_matrix)
conformity_matrix = conformity_matrix.type(torch.bool)
position_matrix[~conformity_matrix] = (
topk + 1
) # value large enough not to be counted
closest = position_matrix.min(dim=1)[0]
k_mask = (closest < topk).type(torch.float)
return k_mask.mean().item() | 5c3d31a6455e1d8c7694a2117637e9e51478bb21 | 702,692 |
import threading
def simple_thread(func, daemon=True):
""" Start function in another thread, discarding return value. """
thread = threading.Thread(target=func)
thread.daemon = daemon
thread.start()
return thread | 870102cf07b92b7cdd56960a7c6da5d8521ee233 | 702,695 |
def add_gms_group(
self,
group_name: str,
parent_pk: str = "",
background_image_file: str = "",
) -> bool:
"""Update appliance group in Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - group
- POST
- /gms/group/new
:param group_name: Name of the group. Must be unique.
:type group_name: str
:param parent_pk: The appliance group identifier,
e.g. ``3.Network``, "" will act as to root group, defaults to ""
:type parent_pk: str, optional
:param background_image_file: Image filename for group,
defaults to ""
:type background_image_file: str, optional
:return: Returns True/False based on successful call.
:rtype: bool
"""
data = {
"name": group_name,
"parentId": parent_pk,
"backgroundImage": background_image_file,
}
return self._post(
"/gms/group/new",
data=data,
expected_status=[204],
return_type="bool",
) | c92cd932070913af2b98608822ad03ff4a59506e | 702,697 |
def func_split_token(str_token):
"""
Splits a VCF info token while guarenteeing 2 tokens
* str_token : String token to split at '='
: String
* return : List of 2 strings
"""
if str_token:
lstr_pieces = str_token.split("=")
i_pieces = len(lstr_pieces)
if i_pieces == 1:
return(lstr_pieces + [""])
if i_pieces == 2:
return lstr_pieces
elif i_pieces > 2:
return [lstr_pieces[0], "=".join(lstr_pieces[1:])]
return ["", ""] | 581b7729ab8dba6f3032b6f1f3bf9bc7dd17c67e | 702,698 |
def populate_sd_dict(herbivore_list):
"""Create and populate the stocking density dictionary, giving the stocking
density of each herbivore type."""
stocking_density_dict = {}
for herb_class in herbivore_list:
stocking_density_dict[herb_class.label] = herb_class.stocking_density
return stocking_density_dict | 87a0b9375a5a419557443506522ab2cf8d34c308 | 702,701 |
from typing import List
import pathlib
def get_files(
catalog: str, recursive: bool, suffix_markup: str, extension: str
) -> List[str]:
"""
Осуществляет поиск в каталоге и выдает список путей найденных файлов.
По-умолчанию, recursive = False, ищет файлы только в каталоге.
extension и suffix_markup - исключают поиск файлов с указанным расширением и суффиксом
"""
if recursive:
files = pathlib.Path(catalog).rglob(f"*{extension}")
else:
files = pathlib.Path(catalog).glob(f"*{extension}")
return [
str(i) for i in list(files) if not str(i).endswith(f"{suffix_markup+extension}")
] | 99f6d591b0548aafb654d05b94271a6d9ae243b6 | 702,703 |
def pnchunk(darray, maxsize_4d=1000**2, sample_var="u", round_func=round, **kwargs):
""" Chunk `darray` in time while keeping each chunk's size roughly
around `maxsize_4d`. The default `maxsize_4d=1000**2` comes from
xarray's rule of thumb for chunking:
http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
"""
chunk_number = darray[sample_var].size / maxsize_4d
chunk_size = int(round_func(len(darray[sample_var].time) / chunk_number))
return darray.chunk(dict(time=chunk_size)) | 770bc7465168d89bf85800e01d1fcbb8fa0af663 | 702,708 |
import errno
def _convert_errno_parm(code_should_be):
""" Convert the code_should_be value to an integer
If code_should_be isn't an integer, then try to use
the code_should_be value as a errno "name" and extract
the errno value from the errno module.
"""
try:
code = int(code_should_be)
except ValueError:
try:
code = getattr(errno, code_should_be)
except AttributeError:
raise AssertionError("code_should_be '%s' is not a valid errno name" % code_should_be)
return code | 949c9f17539d885a0fc4a51f3358fc3695c42e22 | 702,711 |
def _add(x):
"""
Add all elements of a list.
:param x: List to sum
:return: Sum of all elements in the input list
"""
return sum(x) | 19d42b51dbd07a992f3256b8b0d69c1f4043fa89 | 702,715 |
def _assign_link_resids(res_link, match):
"""
Given a link at residue level (`res_link`) and
a dict (`match`) specifying to which resids
the res_link nodes map, create a correspondence
dict that maps the higher resolution nodes to
the resids specified in` match`. Each `res_link`
node by definition can only map to one resid provided
in `match`. The lower resolution nodes associated
to a particular res_link node are stored in the 'graph'
attribute of res_link.
Note that the nodes in that graph are consecutive
and uniquely specify to a single node in the graph
specifying all residues at higher resolution.
Parameters:
-----------
res_link: :class:`nx.Graph`
must have a 'graph' attribute for each node
that itself is a graph of the atoms represented
match: dict
dict matching a resid to the node_key of res_link
Returns:
--------
:type:dict
correspondence of high resolution nodes to resid
"""
link_node_to_resid = {}
for resid, link_node in match.items():
for node in res_link.nodes[link_node]['graph']:
link_node_to_resid[node] = resid
return link_node_to_resid | cfb315a15ebba6e086f340dc7e6db27eadee1d2b | 702,716 |
def checkrows(sudoku):
"""
Checks if each row contains each value only once
"""
size = len(sudoku)
row_num = 0
for row in sudoku:
numbercontained = [False for x in range(size)]
for value in row:
# if placeholder, ignore it
if value in range(size):
if numbercontained[value]:
return False
else:
numbercontained[value] = True
row_num += 1
return True | 3266eb936b0f3f1e22bd16cd40fbf61753876ce1 | 702,720 |
def list_is_unique(ls):
"""Check if every element in a list is unique
Parameters
----------
ls: list
Returns
-------
bool
"""
return len(ls) == len(set(ls)) | a0bc92c2e00b48d80af39020622f31059845fc94 | 702,721 |
def compute_transpose(x):
""" given a matrix, computes the transpose """
xt=[([0]*len(x)) for k in x[0]]
for i, x_row in enumerate(x):
for j, b in enumerate(x_row):
xt[j][i]=x[i][j]
return xt | 22988bc9802deaf1bc07182a5e85c56d54c94436 | 702,724 |
def extract_mocha_summary(lines):
"""
Example mocha summary lines (both lines can be missing if no tests passed/failed):
✔ 3 tests completed
✖ 1 test failed
"""
passes, fails = 0, 0
for line in lines:
if line and line[0] == '✔':
passes = int(line.split()[1])
elif line and line[0] == '✖':
fails = int(line.split()[1])
return {
'total_tests': passes + fails,
'passed_tests': passes,
'failed_tests': fails
} | 53d671c3d18f0421cbb512835a84f71a9588f947 | 702,727 |
def factorial(n):
""" Returns the factorial of n.
e.g. factorial(7) = 7x6x5x4x3x2x1 = 5040
"""
answer = 1
for i in range(n, 1, -1):
answer = answer * i
return answer | 98c0530e4e0f1de8c1c0f622e7d62a5feb042bcb | 702,730 |
def vfun(self, parr="", func="", par1="", con1="", con2="", con3="",
**kwargs):
"""Performs a function on a single array parameter.
APDL Command: *VFUN
Parameters
----------
parr
The name of the resulting numeric array parameter vector. See *SET
for name restrictions.
func
Function to be performed:
Arccosine: ACOS(Par1). - Arcsine: ASIN(Par1).
Par1 is sorted in ascending order. *VCOL, *VMASK, *VCUM, and *VLEN,,NINC do not apply. *VLEN,NROW does apply. - Arctangent: ATAN(Par1).
Compress: Selectively compresses data set. "True" (*VMASK) values of Par1 (or row positions to be considered according to the NINC value on the *VLEN command) are written in compressed form to ParR, starting at the specified position. - Copy: Par1 copied to ParR.
Cosine: COS(Par1). - Hyperbolic cosine: COSH(Par1).
Direction cosines of the principal stresses (nX9). Par1 contains the nX6 component stresses for the n locations of the calculations. - Par1 is sorted in descending order. *VCOL, *VMASK, *VCUM, and *VLEN,,NINC do
not apply. *VLEN,NROW does apply.
Euler angles of the principal stresses (nX3). Par1 contains the nX6 component stresses for the n locations of the calculations. - Exponential: EXP(Par1).
Expand: Reverse of the COMP function. All elements of Par1 (starting at the position specified) are written in expanded form to corresponding "true" (*VMASK) positions (or row positions to be considered according to the NINC value on the *VLEN command) of ParR. - Natural logarithm: LOG(Par1).
Common logarithm: LOG10(Par1). - Nearest integer: 2.783 becomes 3.0, -1.75 becomes -2.0.
Logical complement: values 0.0 (false) become 1.0 (true). Values > 0.0 (true) become 0.0 (false). - Principal stresses (nX5). Par1 contains the nX6 component stresses for the n
locations of the calculations.
Power function: Par1**CON1. Exponentiation of any negative number in the vector Par1 to a non-integer power is performed by exponentiating the positive number and prepending the minus sign. For example, -4**2.3 is -(4**2.3). - Sine: SIN(Par1).
Hyperbolic sine: SINH(Par1). - Square root: SQRT(Par1).
Tangent: TAN(Par1). - Hyperbolic tangent: TANH(Par1).
Tangent to a path at a point: the slope at a point is determined by linear interpolation half way between the previous and next points. Points are assumed to be in the global Cartesian coordinate system. Path points are specified in array Par1 (having 3 consecutive columns of data, with the columns containing the x, y, and z coordinate locations, respectively, of the points). Only the starting row index and the column index for the x coordinates are specified, such as A(1,1). The y and z coordinates of the vector are assumed to begin in the corresponding next columns, such as A(1,2) and A(1,3). The tangent result, ParR, must also have 3 consecutive columns of data and will contain the tangent direction vector (normalized to 1.0); such as 1,0,0 for an x-direction vector. - Normal to a path and an input vector at a point: determined from the cross-
product of the calculated tangent vector (see
TANG) and the input direction vector (with the i,
j, and k components input as CON1, CON2, and
CON3). Points are assumed to be in the global
Cartesian coordinate system. Path points are
specified in array Par1 (having 3 consecutive
columns of data, with the columns containing the
x, y, and z coordinate locations, respectively,
of the points). Only the starting row index and
the column index for the x coordinates are
specified, such as A(1,1). The y and z
coordinates of the vector are assumed to begin in
the corresponding next columns, such as A(1,2)
and A(1,3). The normal result, ParR, must also
have 3 consecutive columns of data and will
contain the normal direction vector (normalized
to 1.0); such as 1,0,0 for an x-direction vector.
Transforms global Cartesian coordinates of a point to the coordinates of a specified system: points to be transformed are specified in array Par1 (having 3 consecutive columns of data, with the columns containing the x, y, and z global Cartesian coordinate locations, respectively, of the points). Only the starting row index and the column index for the x coordinates are specified, such as A(1,1). The y and z coordinates of the vector are assumed to begin in the corresponding next columns, such as A(1,2) and A(1,3). Results are transformed to coordinate system CON1 (which may be any valid coordinate system number, such as 1,2,11,12, etc.). The transformed result, ParR, must also have 3 consecutive columns of data and will contain the corresponding transformed coordinate locations. - Transforms specified coordinates of a point to global Cartesian coordinates:
points to be transformed are specified in array
Par1 (having 3 consecutive columns of data, with
the columns containing the local coordinate
locations (x, y, z or r, θ, z or etc.) of the
points). Only the starting row index and the
column index for the x coordinates are specified,
such as A(1,1). The y and z coordinates (or θ
and z, or etc.) of the vector are assumed to
begin in the corresponding next columns, such as
A(1,2) and A(1,3). Local coordinate locations
are assumed to be in coordinate system CON1
(which may be any valid coordinate system number,
such as 1,2,11,12, etc.). The transformed
result, ParR, must also have 3 consecutive
columns of data, with the columns containing the
global Cartesian x, y, and z coordinate
locations, respectively.
par1
Array parameter vector in the operation.
con1, con2, con3
Constants (used only with the PWR, NORM, LOCAL, and GLOBAL
functions).
Notes
-----
Operates on one input array parameter vector and produces one output
array parameter vector according to:
ParR = f(Par1)
where the functions (f) are described below. Functions are based on
the standard FORTRAN definitions where possible. Out-of-range function
results (or results with exponents whose magnitudes are approximately
greater than 32 or less than -32) produce a zero value. Input and
output for angular functions may be radians (default) or degrees
[*AFUN]. ParR may be the same as Par1. Starting array element numbers
must be defined for each array parameter vector if it does not start at
the first location. For example, *VFUN,A,SQRT,B(5) takes the square
root of the fifth element of B and stores the result in the first
element of A. Operations continue on successive array elements [*VLEN,
*VMASK] with the default being all successive elements. Absolute
values and scale factors may be applied to all parameters [*VABS,
*VFACT]. Results may be cumulative [*VCUM]. Skipping array elements
via *VMASK or *VLEN for the TANG and NORM functions skips only the
writing of the results (skipped array element data are used in all
calculations). See the *VOPER command for detail s . / p >
p > T h i s c o m m a n d i s
v a l i d i n a n y p r o c e
s s o r . / p > / d i v > d i v c
l a s s = " r e f s e c t 1 " t i
t l e = " M e n u P a t h s " > a
n a m e = " d 0 e 2 9 2 8 5 8 " > /
a > h 2 > M e n u P a t h s / h 2
> t a b l e b o r d e r = " 0 "
s u m m a r y = " S i m p l e l i
s t " c l a s s = " s i m p l e l
i s t " > t r > t d > s p a n c l
a s s = " g u i m e n u " > s t r o
n g > U t i l i t y M e n u &g t ;
P a r a m e t e r s &g t ; A r r a y
O p e r a t i o n s &g t ; V e c t o
r F u n c t i o n s / s t r o n g
> / s p a n > / t d > / t r > / t a
b l e > / d i v > / d i v > h r > p
c l a s s = " l e g a l f o o t e r
" > s m a l l > i > R e l e a s e
1 6 . 2 - &c o p y ; S A S I
P , I n c . A l l r i g h t s
r e s e r v e d . / i > / s m a l l
> / p > / b o d y > / h t m l >
"""
command = f"*VFUN,{parr},{func},{par1},{con1},{con2},{con3}"
return self.run(command, **kwargs) | dba87cb721ba79a797c357160a2ebed425b3239f | 702,731 |
def reverse_colors(frame):
"""
Reverse the order of colors in a frame from RGB to BGR and from BGR to
RGB.
"""
return frame[:, :, ::-1] | 210deca283c373d02c0d114daad2e23ba822b3ab | 702,732 |
from typing import Optional
def algo_parts(name: str) -> tuple[str, Optional[int]]:
"""Return a tuple of an algorithm's name and optional number suffix.
Example::
>>> algo_parts('rot13')
('rot', 13)
>>> algo_parts('whirlpool')
('whirlpool', None)
"""
base_algo = name.rstrip('0123456789')
try:
bits = int(name[len(base_algo):])
except ValueError:
bits = None
return base_algo, bits | c3231da2bc3f96091b6f07d38fdd2caa414e59a1 | 702,734 |
def calculate_rsi(analysis_df, column, window):
"""
Calculates relative stength index.
Args:
analysis_df: Pandas dataframe with a closing price column
column: String representing the name of the closing price column
window: Integer representing the number of periods used in the RSI calculation
Returns:
Pandas dataframe containing RSI
"""
delta = analysis_df[column]
up_periods = delta.copy()
up_periods[delta<=0] = 0.0
down_periods = abs(delta.copy())
down_periods[delta>0] = 0.0
rs_up = up_periods.rolling(window, min_periods=1).mean()
rs_down = down_periods.rolling(window, min_periods=1).mean()
rsi = 100 - 100/(1+rs_up/rs_down)
# Impute nan rows
rsi = rsi.fillna(method="bfill")
return rsi | 7c32751bc4aeb5583caa69397f1c19b88a208039 | 702,736 |
def has_slide_type(cell, slide_type):
"""
Select cells that have a given slide type
:param cell: Cell object to select
:param slide_type: Slide Type(s): '-', 'skip', 'slide', 'subslide', 'fragment', 'notes'
:type slide_type: str / set / list
:return: a bool object (True if cell should be selected)
"""
if isinstance(slide_type, str):
slide_type = {slide_type}
return all(f(cell) for f in [lambda c: 'slideshow' in c.metadata,
lambda c: 'slide_type' in c.metadata['slideshow'],
lambda c: c.metadata['slideshow']['slide_type'] in slide_type]) | edb1323331317d53502179fe357c151a5b59af0b | 702,737 |
import sqlite3
def query(x,db,v=True):
""" A function that takes in a query and returns/ prints the result"""
conn = sqlite3.connect(db)
curs = conn.cursor()
my_result = list(curs.execute(x).fetchall())
curs.close()
conn.commit()
if v is True:
print(my_result)
return my_result | 8b914224429bdfd8d167327bf6cc0e3afaf94b3c | 702,739 |
def build_url(selector, child_key, parent_key):
"""Return url string that's conditional to `selector` ("site" or "area")."""
if selector == "site":
return "https://%s.craigslist.org/" % child_key
return "https://%s.craigslist.org/%s/" % (parent_key, child_key) | f8f00d4f9d20c3312f2b36d014365dbcf08242bc | 702,740 |
def sieve_of_eratosphenes(n: int) -> list[int]:
"""
Finds prime numbers <= n using the sieve of Eratosphenes algorithm.
:param n: the upper limit of sorted list of primes starting with 2
:return: sorted list of primes
Integers greater than 2 are considered good input that gives
meaningful results:
>>> sieve_of_eratosphenes(17)
[2, 3, 5, 7, 11, 13, 17]
>>> sieve_of_eratosphenes(16)
[2, 3, 5, 7, 11, 13]
Other integers are considered relatively good input that
results in empty list:
>>> sieve_of_eratosphenes(0)
[]
>>> sieve_of_eratosphenes(-7)
[]
Other types cause TypeError and so are considered bad input:
>>> sieve_of_eratosphenes("m")
Traceback (most recent call last):
...
TypeError: can only concatenate str (not "int") to str
>>> sieve_of_eratosphenes(2.3)
Traceback (most recent call last):
...
TypeError: can't multiply sequence by non-int of type 'float'
"""
sieve = [True] * (n+1)
for i in range(2, n+1):
if sieve[i] and i*i < n:
for k in range(i*i, n+1):
if not k%i:
sieve[k] = False
prime_numbers = []
for i in range(2, n+1):
if sieve[i]:
prime_numbers.append(i)
return prime_numbers | 4dbfdffe0ff6e360361daccdcd39fb7fb3d09a03 | 702,743 |
def recovery_secret_to_ksk(recovery_secret):
"""Turn secret and salt to the URI.
>>> recovery_secret_to_ksk("0123.4567!ABCD")
'[email protected]!ABCD'
"""
if isinstance(recovery_secret, bytes):
recovery_secret = recovery_secret.decode("utf-8")
# fix possible misspellings (the recovery_secret is robust against them)
for err, fix in ["l1", "I1", "O0", "_-", "*+"]:
recovery_secret = recovery_secret.replace(err, fix)
ksk = "KSK@babcom-recovery-" + recovery_secret
return ksk | a8832a9e970e4728dcb5f779fd4463abf142e69e | 702,746 |
import click
from datetime import datetime
def validate_end_date(ctx, param, value):
"""
Validator for the 'click' command line interface, checking whether the entered date as argument 'start_date' is
later than argument 'end_date' and 'end_date' argument is later than current date.
"""
# option when the user enters the '--end_date' parameter earlier than '--start_date'
if ctx.params.get('start_date') is not None:
if ctx.params.get('start_date') > value:
raise click.BadParameter(f'the date value must be equal or later than: '
f'{(ctx.params.get("start_date")).date()}')
if value > datetime.now():
raise click.BadParameter(f'the date value must be equal or earlier than {(datetime.now().date())}')
return value | b1416fdc614aad53ffab537275b7ae2cb15eddce | 702,747 |
def string_pair_list_to_dictionary_no_json(spl):
"""
Covert a mongodb_store_msgs/StringPairList into a dictionary, ignoring content
:Args:
| spl (StringPairList): The list of (key, value) to pairs convert
:Returns:
| dict: resulting dictionary
"""
return dict((pair.first, pair.second) for pair in spl) | fdcaa23eed195d389456a6ceb0fdde2d06d932d6 | 702,749 |
import re
def sanitize(inStr):
"""Hide any sensitive info from the alert"""
# Key-value pairs of patterns with what to replace them with
patterns = {
"https\:\/\/oauth2\:[\d\w]{64}@gitlab\.pavlovia\.org\/.*\.git": "[[OAUTH key hidden]]" # Remove any oauth keys
}
# Replace each pattern
for pattern, repl in patterns.items():
inStr = re.sub(pattern, repl, inStr)
return inStr | 62c55f7d0af7c458d66e426fc9366c8ec84379df | 702,750 |
def get_techniques_of_tactic(tactic, techniques):
"""Given a tactic and a full list of techniques, return techniques that
appear inside of tactic
"""
techniques_list = []
for technique in techniques:
for phase in technique['kill_chain_phases']:
if phase['phase_name'] == tactic['x_mitre_shortname']:
techniques_list.append(technique)
techniques_list = sorted(techniques_list, key=lambda k: k['name'].lower())
return techniques_list | 15efe8788bc4e45170f9d02c452482422ec8cf9f | 702,751 |
import torch
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
mask = tensor.ne(padding_idx).long()
return torch.cumsum(mask, dim=1) * mask + padding_idx | f59fad86a23ff76f184c0dd6a21a92722f4817f5 | 702,755 |
import typing
def ark(row: typing.Mapping[str, str]) -> str:
"""The item ARK (Archival Resource Key)
Args:
row: An input CSV record.
Returns:
The item ARK.
"""
ark_prefix = "ark:/"
if row["Item ARK"].startswith(ark_prefix, 0):
return row["Item ARK"]
return ark_prefix + row["Item ARK"] | ea428fcddf26a5bfdad3ec1c4906e765b72b1f47 | 702,758 |
import math
def is_mc_multiplier(multiplier, modulus):
"""
Checks if multiplier is a MC multiplier w.r.t. modulus.
:param multiplier: an integer in (0, modulus).
:param modulus: a prime number.
:return: True if multiplier is a MC multiplier w.r.t. modulus.
"""
return (modulus % multiplier) < math.floor(modulus / multiplier) | 6d210f8de081ae0a468692b2f93e5145170917e9 | 702,760 |
def readToTuple(f_path):
"""Reads in a two-col file (tab-delim) and returns a list of tuples"""
f = open(f_path)
ls = []
for l in f:
if l.startswith("#"):
continue
ls.append(tuple(l.strip().split("\t")))
return ls | 726d1b4a4682c4e11afbf59e342340e0cf5ccc63 | 702,761 |
def increment_ctr(ctr):
"""
Increments one the counter.
Parameters
----------
ctr : string
Counter
Returns
-------
incremented_counter : string
Incremented Counter
"""
ctr_inc_int = int.from_bytes(bytes.fromhex(ctr), byteorder="big") + 1
return bytes.hex(ctr_inc_int.to_bytes(length=16, byteorder="big")) | 0ef04e10283f02b6b7df46cf196493a3ad4a95c8 | 702,766 |
def himmelblauConstraintOne(solution):
"""First restriction
Args:
solution (Solution): Candidate solution
Returns:
bool: True if it meets the constraint, False otherwise
"""
return (26 - (solution[0] - 5) ** 2 - solution[1] ** 2) >= 0 | f5878d2573559b78fee3b434d537a380abb5e2c8 | 702,772 |
def create_ip_list(addr0, n_addrs):
"""Creates list of IP multicast subscription addresses.
Args:
addr0 (str): first IP address in the list.
n_addrs (int): number of consecutive IP addresses for subscription.
Returns:
addr_list (list): list of IP addresses for subscription.
"""
prefix, suffix0 = addr0.rsplit('.', 1)
addr_list = [addr0]
for i in range(1, n_addrs):
addr_list.append(prefix + '.{}'.format(i + int(suffix0)))
return addr_list | e29b5c4b9f9ec0dc46916977e4a54bb77a9e74a6 | 702,775 |
from typing import OrderedDict
def get_form_errors(form):
"""
Django form errors do not obey natural field order,
this template tag returns non-field and field-specific errors
:param form: the form instance
"""
return {
'non_field': form.non_field_errors(),
'field_specific': OrderedDict(
(field, form.errors[field.name])
for field in form
if field.name in form.errors
)
} | 056597492d24dc406c9d952f5cb56c14d0a75fff | 702,786 |
from typing import List
from typing import Dict
def get_vrf_group(files_list: List[str]) -> Dict[str, List[str]]:
"""
Group files by VRF name.
"""
groups = {}
for filename_path in files_list:
filename_path = filename_path.replace("\\", "/")
# print(filename_path)
if "show" in filename_path \
or "display" in filename_path:
continue
"""
Example)
"switch-001_192.168.252.201_20191028-142654_vrf_10.log"
vrf_name = "_vrf_10.log"
"""
pos1 = filename_path.rfind("_")
pos2 = filename_path[:pos1].rfind("_")
vrf_name = filename_path[pos2:]
# Group by vrf_name and store a list of file names in the list.
# print(vrf_name)
if not vrf_name in groups.keys():
groups[vrf_name] = []
groups[vrf_name].append(filename_path)
return groups | 348f1c10f4bd054ca2f45f36b5420a971b52e4cf | 702,788 |
def scr_total(
bscr,
scr_op
):
""" This function simply adds the SCR_Op to the BSCR """
return bscr + scr_op | 7d1711f75abae59b79cf62f6e64daeb7e4c556eb | 702,789 |
import re
import six
def load_tff_dat(fname, processor=None):
"""Read a tff.dat or dff.dat files generated by tff command
Parameters
----------
fname : file or str
File, or filename
processor: callable or None
A final output processor, by default a tuple of tuples is returned
Returns
-------
Whathever the processor return or a tuple of tuples
"""
processor = processor or tuple
def gen(fp):
buff = []
for line in fp:
line = re.sub(r"\*{2,}", "nan", line) # remove al ****...
# new source?
if buff and not line.startswith(" "):
yield tuple(buff)
buff = []
buff.extend(line.strip().split())
if buff:
yield tuple(buff)
if isinstance(fname, six.string_types):
with open(fname) as fp:
generator = gen(fp)
return processor(generator)
generator = gen(fname)
return processor(generator) | 55f9ba3915c2d31cb83b8ea26de996f8f29e5e43 | 702,790 |
from typing import List
def is_luhn(string: str) -> bool:
"""
Perform Luhn validation on input string
Algorithm:
* Double every other digit starting from 2nd last digit.
* Subtract 9 if number is greater than 9.
* Sum the numbers
*
>>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713,
... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718,
... 79927398719]
>>> test_cases = list(map(str, test_cases))
>>> list(map(is_luhn, test_cases))
[False, False, False, True, False, False, False, False, False, False]
"""
check_digit: int
_vector: List[str] = list(string)
__vector, check_digit = _vector[:-1], int(_vector[-1])
vector: List[int] = [*map(int, __vector)]
vector.reverse()
for idx, i in enumerate(vector):
if idx & 1 == 0:
doubled: int = vector[idx] * 2
if doubled > 9:
doubled -= 9
check_digit += doubled
else:
check_digit += i
if (check_digit) % 10 == 0:
return True
return False | 92253489a18efc902198d5eb3fb93a06a74a3246 | 702,791 |
import glob
def patternMatch(pattern, dir='./'):
"""
:pattern: A file pattern to match the desired output. Input to a glob, so use traditional unix wildcarding.
:dir: The directory to search.
:returns: list of matching files in the target directory
"""
files = []
files = glob.glob(dir+pattern)
return files | d5e9b1d531cdfa3ebca3baea2b8e273621df3357 | 702,793 |
def removeBottomMargin(image, padding):
"""Remove the bottom margin of width = padding from an image
Args:
image (PIL.Image.Image): A PIL Image
padding (int): The padding in pixels
Returns:
PIL.Image.Image: A PIL Image
"""
return image.crop((0, 0, image.width, image.height - padding)) | 69cd12d6c3ed0b857bae3f42c34e9754fa3620f3 | 702,794 |
def get_syst ( syst , *index ) :
"""Helper function to decode the systematic uncertainties
Systematic could be
- just a string
- an object with index: obj [ibin]
- a kind of function: func (ibin)
"""
if isinstance ( syst , str ) : return syst
elif syst and hasattr ( syst , '__getitem__' ) : return str ( syst [ index ] )
elif syst and callable ( syst ) : return str ( syst ( *index ) )
elif syst : raise AttributeError("Invalid systematic %s/%s" % ( syst , type( syst ) ) )
return '' | 37b2b39245587da16345752e02759d2c94c93415 | 702,800 |
def find_projects(company_name, project_list):
"""returns list of projects associated with company_name
:param company_name: name of company to return projects for
:type company_name: str
:param project_list: list of projects as dictionaries
:type project_list: list
:return: list
"""
result = []
for project in project_list:
if project['company'] == company_name:
result.append(project)
return result | e2e193aa103bec6620fb17679ff02de92c3f299e | 702,803 |
def test_retrieve_and_encode_simple(test_client, test_collection_name):
"""Test retrieving documents and encoding them with vectors.
"""
VECTOR_LENGTH = 100
def fake_encode(x):
return test_client.generate_vector(VECTOR_LENGTH)
# with TempClientWithDocs(test_client, test_collection_name, 100) as client:
test_client.insert_documents(test_collection_name, test_client.create_sample_documents(100))
results = test_client.retrieve_and_encode(test_collection_name,
models={'country': fake_encode})
assert list(test_client.collection_schema(test_collection_name)['country_vector_'].keys())[0] == 'vector'
assert len(results['failed_document_ids']) == 0
assert 'country_vector_' in test_client.collection_schema(test_collection_name)
docs = test_client.retrieve_documents(test_collection_name)['documents']
assert len(docs[0]['country_vector_']) == VECTOR_LENGTH | 4fb6b1ea0278575ff53778dbefe8fb4f12a9abc2 | 702,810 |
def scale_from_internal(vec, scaling_factor, scaling_offset):
"""Scale a parameter vector from internal scale to external one.
Args:
vec (np.ndarray): Internal parameter vector with external scale.
scaling_factor (np.ndarray or None): If None, no scaling factor is used.
scaling_offset (np.ndarray or None): If None, no scaling offset is used.
Returns:
np.ndarray: vec with external scale
"""
if scaling_factor is not None:
vec = vec * scaling_factor
if scaling_offset is not None:
vec = vec + scaling_offset
return vec | c7f2471d2a7776f8756d709d0288163aab3594ae | 702,816 |
def get_nth_combination(
iterable,
*,
items: int,
index: int,
):
"""
Credit to:
https://docs.python.org/3/library/itertools.html#itertools-recipes
Examples:
>>> wallet = [1] * 5 + [5] * 2 + [10] * 5 + [20] * 3
>>> get_nth_combination(wallet, items=3, index=454)
(20, 20, 20)
>>> get_nth_combination(wallet, items=3, index=-1)
(20, 20, 20)
>>> get_nth_combination(wallet, items=3, index=455)
Traceback (most recent call last):
...
IndexError: Index 455 out of bounds
>>> get_nth_combination(wallet, items=3, index=-454)
(1, 1, 1)
>>> get_nth_combination(wallet, items=len(wallet), index=0)
(1, 1, 1, 1, 1, 5, 5, 10, 10, 10, 10, 10, 20, 20, 20)
>>> get_nth_combination(wallet, items=len(wallet), index=1)
Traceback (most recent call last):
...
IndexError: Index 1 out of bounds
"""
space = tuple(iterable)
n = len(space)
if items < 1:
msg = f'Argument must be positive'
raise ValueError(msg)
if items > n:
msg = f'Sample space has {n} items; Argument items cannot exceed that'
raise ValueError(msg)
c = 1
k = min(items, n - items)
for i in range(1, k + 1):
c = c * (n - k + i) // i
orig_index = index
if index < 0:
index += c
if index < 0 or index >= c:
msg = f'Index {orig_index} out of bounds'
raise IndexError(msg)
result = []
while items:
c, n, items = c * items // n, n - 1, items - 1
while index >= c:
index -= c
c, n = c * (n - items) // n, n - 1
result.append(space[-(n + 1)])
return tuple(result) | 6ed186d260ca86c0f16d383576e69402d079ed9b | 702,820 |
def capped(value, minimum=None, maximum=None, key=None, none_ok=False):
"""
Args:
value: Value to cap
minimum: If specified, value should not be lower than this minimum
maximum: If specified, value should not be higher than this maximum
key (str | None): Text identifying 'value' (ValueError is raised if provided and `value` is not within bounds)
none_ok (bool): True if `None` value is considered OK
Returns:
`value` capped to `minimum` and `maximum` (if it is outside of those bounds)
"""
if value is None:
if none_ok:
return None
if key and not none_ok:
raise ValueError("'None' is not acceptable for '%s'" % key)
return minimum if minimum is not None else maximum
if minimum is not None and value < minimum:
if key:
raise ValueError("'%s' value %s is lower than minimum %s" % (key, value, minimum))
return minimum
if maximum is not None and value > maximum:
if key:
raise ValueError("'%s' value %s is greater than maximum %s" % (key, value, maximum))
return maximum
return value | 663a63041699f4e4f52886adbd49423bf52c0282 | 702,828 |
def row(ctx):
"""Get this cell's row."""
return ctx["cell"].row | 4cfc89daa3ca771359acd762d716316209ca0eb4 | 702,829 |
def above_the_line(x_array, x1, x2):
"""
Return states above a specified line defined by (x1, x2).
We assume that a state has only two coordinates.
Parameters
----------
x_array: `np.array`
A 2-d matrix. Usually, an embedding for data points.
x1: `np.array`
A list or array of two entries.
x2: `np.array`
A list or array of two entries.
Returns
-------
A boolean array.
"""
return (x_array[:, 1] - x1[1]) > ((x2[1] - x1[1]) / (x2[0] - x1[0])) * (
x_array[:, 0] - x1[0]
) | d20b5d462b7254a93f7896b592ae25eae26075a7 | 702,831 |
import torch
def _add_embedding_layer(model_1, model_2):
"""
Returns an embedding layer with a weight matrix
of the follwing structure:
[MODEL_1 EMBEDDING MATRIX ; MODEL_2 EMBEDDING MATRIX]
"""
result_layer = torch.nn.Embedding(
model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim
)
result_layer.weight = torch.nn.Parameter(
torch.cat((model_1.weight.data, model_2.weight.data), dim=1)
)
return result_layer | 2b4f4f3e36d56c57302cdcbf07c6cbbdb5165e11 | 702,833 |
def to_undirected(graph):
"""Returns an undirected view of the graph `graph`.
Identical to graph.to_undirected(as_view=True)
Note that graph.to_undirected defaults to `as_view=False`
while this function always provides a view.
"""
return graph.to_undirected(as_view=True) | 96ceb4e2d7dbe2a9c120b8e1ac7cad0ef2b2c6ae | 702,836 |
from typing import MutableMapping
from typing import Any
def remove_keys(
_dict: MutableMapping[str, Any] | None, keys: list[str]
) -> MutableMapping[str, Any]:
"""Remove keys from a dictionary."""
if not _dict:
return {}
new = dict(_dict)
for key in keys:
new.pop(key, None)
return new | 7a0ee8482eea69b0be7f7ecfd41355206adcf01c | 702,841 |
import struct
def read64(f):
"""Read 8 bytes from a file and return as an 64-bit unsigned int (little endian).
"""
return struct.unpack("<Q", f.read(8))[0] | 4a055188bd9db074ca3807771d779eccb25e5484 | 702,843 |
import re
def contain_static(val):
"""
Check if URL is a static resource file
- If URL pattern ends with
"""
if re.match(r'^.*\.(jpg|jpeg|gif|png|css|js|ico|xml|rss|txt).*$', val, re.M|re.I):
# Static file, return True
return True
else:
# Not a static file, return False
return False | 9b69c0e8c69f9a97abbea82855d0c387de2a381a | 702,844 |
def get_min(statistical_series):
"""
Get minimum value for each group
:param statistical_series: Multiindex series
:return: A series with minimum value for each group
"""
return statistical_series.groupby(level=0).agg('min') | c032f2f834cfe298a6c9f98c9116aaf354db0960 | 702,847 |
import unicodedata
import re
def unaccented_letters(s: str) -> str:
"""Return the letters of `s` with accents removed."""
s = unicodedata.normalize('NFKD', s)
s = re.sub(r'[^\w -]', '', s)
return s | e75b8929f8bd800ad4c79ae5688dea1067c351c5 | 702,859 |
import glob
def get_lists_in_dir(dir_path):
"""
Function to obtain a list of .jpg files in a directory.
Parameters:
- dir_path: directory for the training images (from camera output)
"""
image_list = []
for filename in glob.glob(dir_path + '/*.jpg'):
image_list.append(filename)
return image_list | c59701c5c8327569a5efe68c2751b0568de0498e | 702,872 |
def walk_links_for_node(node, callback, direction, obj=None):
"""
Walks the each link from the given node. Raising a StopIteration will terminate the
traversal.
:type node: treestruct.Node
:type callback: (treestruct.Node, treestruct.Node, object) -> ()
:type direction: int
:type obj: Any
:return: Returns `obj` (or None if no `obj` is supplied).
:rtype: Any
"""
try:
visited = set()
queue = [node]
while queue:
node = queue.pop(0)
if node in visited:
continue
visited.add(node)
for connected_node in node.direction(direction):
callback(node, connected_node, obj)
queue.append(connected_node)
except StopIteration:
pass
return obj | a92cb831945a537c55ff4c014eedcb17b26ddd96 | 702,873 |
import configparser
def get_config_parser(filepath):
"""Create parser for config file.
:param filepath: Config file path
:type filepath: str
:return: configparser.ConfigParser instance
"""
config_parser = configparser.ConfigParser(interpolation=None)
# use read_file() instead of read() to catch possible OSError
with open(filepath) as cfg_file:
config_parser.read_file(cfg_file)
return config_parser | 224f524c60161bc45b1324b26eeb6d4715c43054 | 702,874 |
def load_id_map(id_file):
""" Load a ids file in to a barcode -> coordinate dictionary.
"""
id_map = {}
with open(id_file, "r") as fh:
for line in fh:
bc, x, y = line.split("\t")
id_map[bc] = (int(x), int(y))
return id_map | cd2c0635496209c3e19597fa45ae4f486779ceac | 702,875 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.