content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _edge_is_between_selections(edge, selection_a, selection_b):
"""
Returns ``True`` is the edge has one end in each selection.
Parameters
----------
edge: tuple[int, int]
selection_a: collections.abc.Container[collections.abc.Hashable]
selection_b: collections.abc.Container[collections.abc.Hashable]
Returns
-------
bool
"""
return (
(edge[0] in selection_a and edge[1] in selection_b)
or (edge[1] in selection_a and edge[0] in selection_b)
) | 808ee767b44a05fb8258a2bef5621d22131e6467 | 20,050 |
from pathlib import Path
def get_file_path(path: Path, fn: str) -> Path:
"""
Find an available path for a file, using an index prefix.
Parameters
----------
path: Path
file path
fn: str
filename
Returns
----------
path
file path
"""
paths = path.glob(f"*_{fn}")
max_index = max((int(p.name.split("_")[0]) for p in paths), default=-1) + 1
return path / f"{max_index}_{fn}" | 54182f627cded53f5f6dabb5b32965bd1693a4ac | 20,058 |
import logging
def get_logger(name=None):
"""Return a logger to use.
Parameters
----------
name : None or str, optional
Name of the logger. Defaults to None.
Returns
-------
logging.Logger
logger object.
"""
return logging.getLogger("bids-schema" + (".%s" % name if name else "")) | e1bf5e385615a26391b5121752300e53d8e288d5 | 20,063 |
def _binary_search(array, elt):
"""Modified binary search on an array."""
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) // 2
if elt == array[mid]:
return mid + 1
if elt < array[mid]:
end = mid - 1
else:
start = mid + 1
if start > end:
return start | 857e7d0d32522b11f30027eff4294efd6b5f5ac0 | 20,064 |
def dynamic_filter(func):
"""Function decorator that sets the wrapped function's 'dynamic_filter'
attribute to True.
"""
func.dynamic_filter = True
return func | a59cae5d20c367a1966cb35ca1c5b80ccd9d895a | 20,068 |
def actual_svg(pathname: str) -> str:
"""Read SVG image from disk."""
with open(pathname, "r") as file:
svg = file.read()
return svg | ba7ae52d3bdbae1d3112a183de2f484f4bcc066d | 20,073 |
def squash_flags(flags):
"""Remove lowercase flags if the respective uppercase flag exists
>>> squash_flags('abc')
'abc'
>>> squash_flags('abcC')
'ab'
>>> squash_flags('CabcAd')
'bd'
"""
exclude = ''.join(f.upper() + f.lower() for f in flags if f == f.upper())
return ''.join(f for f in flags if f not in exclude) | 1484a7f4b1764e1c48dc4eb07e15db98a7bb9881 | 20,075 |
def empty_stack(stack, graph):
"""
Pops the items in the stack. If they have no head, they are assigned
a ROOT head
:param stack:
:param graph:
:return:
"""
for word in stack:
if word['id'] not in graph['heads']:
graph['heads'][word['id']] = '0'
graph['deprels'][word['id']] = 'ROOT'
stack = []
return stack, graph | 323a841f359d3a9823bd6e43d3de7540b3f2a6df | 20,084 |
import logging
def render_disassembly(dis, match_offset, match_len, context_lines=4):
"""
Accepts a DecodeGenerator from distorm and returns a string that will be directly rendered in the ICE yara results page
dis: DecodeGenerator from distorm.Decode()
match_offset: offset into the file where the match occured
match_len: Length of yara match
context_lines: How many lines of disassembly to return before and after the matching lines
"""
lines = []
first_line = None
last_line = None
for i in range(len(dis)):
instr = dis[i]
asm = "0x{:08X} {:<20}{}".format(instr[0], instr[3], instr[2])
if instr[0] >= match_offset and instr[0] < match_offset + match_len:
lines.append("<b>{}</b>".format(asm))
if not first_line:
first_line = i
else:
lines.append(asm)
if first_line and not last_line:
last_line = i
lines = (
lines[:first_line][-context_lines - 1 :]
+ lines[first_line:last_line]
+ lines[last_line:][:context_lines]
)
logging.error("Rendered disassembly: {}".format("\n".join(lines)))
return "\n".join(lines) | ce719252bae1f5833e788922832cf11207f63deb | 20,088 |
from typing import Counter
from typing import Tuple
from typing import List
def order_counts(counts: Counter[Tuple[int, ...]]) -> List[int]:
"""
Helper method for organising two-qubit correlations.
:param counts: Counter object as returned by BackendResult, giving two qubit counts for desired correlation
:type counts: Counter[Tuple[int, ...]]
:return: A four element list, giving counts for the (0,0), (0,1), (1,0) and (1,1) states in order.
:rtype: List[int]
"""
ordered_counts = [0, 0, 0, 0]
if (0, 0) in counts:
ordered_counts[0] = counts[(0, 0)]
if (0, 1) in counts:
ordered_counts[1] = counts[(0, 1)]
if (1, 0) in counts:
ordered_counts[2] = counts[(1, 0)]
if (1, 1) in counts:
ordered_counts[3] = counts[(1, 1)]
return ordered_counts | 7c538da5655d00da4399e0b33ededf4c86978848 | 20,091 |
def boundary_substraction(
*,
boundary: tuple[tuple[int, ...], ...],
subtracted: tuple[tuple[int, ...], ...],
) -> tuple[tuple[int, ...], ...]:
"""From a boundary composed of tuples of node number segments, subtracts the subset
of those segments contained in the subtracted tuple.
Arguments:
boundary (tuple[tuple[int, ...], ...]): A tuple of tuples of ints, where each
integer is a node number in sequence along a discrete boundary.
subtracted (tuple[tuple[int, ...], ...]): A subset of the boundary.
Returns:
tuple[tuple[int, ...], ...]: The difference of boundary less subtracted.
"""
output = tuple(
boundary_i for boundary_i in boundary if boundary_i not in subtracted
)
return output | 67047475ddff113c8b34637cb4686fb37b8377ee | 20,093 |
from datetime import datetime
def get_daily_filename(date: datetime = datetime.today()) -> str:
"""Returns the filename for the given date in the format:
'yyyy-mm-dd-nnn-day.md'
:param date [datetime.datetime] : Date for filename, defaults to datetime.today().
:return (str) : Filename
"""
iso_date = date.strftime("%Y-%m-%d") # ISO date
doy = date.strftime("%j") # Day of year
dow = date.strftime("%a").lower() # Day of week
return f"{iso_date}-{doy}-{dow}.md" | 0e954aec6de6914842417315b43a36ecbca9acdf | 20,094 |
def get_num_pages(page_souped):
"""
This gets the number of pages to search through.
:param page_souped: <class 'bs4.BeautifulSoup'>, a page that has been passed through BeautifulSoup().
:return: int, the number of pages to search through
"""
span_parsed = page_souped.find_all(lambda tag: tag.name == 'span' and tag.get('class') == ['pageRange'])
try:
span_parsed_contents_list = span_parsed[0].contents[0].split(' ')
except IndexError:
return 0
return int(span_parsed_contents_list[-1]) | 6930feb28b8a264433cbce93ed573b0c6172250b | 20,096 |
def copy_worksheet(workbook, worksheet):
"""
Creates a copy of the worksheet.
:param workbook: The workbook the worksheet is from.
:param worksheet: Worksheet to copy.
:return: A copy of the worksheet.
"""
return workbook.copy_worksheet(worksheet) | d650f9f056c1fa951dc1de28c362015726798f26 | 20,105 |
def get_answer(current_row):
"""Returns the answer text value og HTML element"""
return current_row.find_all("td", class_="risp")[0].text.strip() | 14e91c250d6f28b98534fe7839e47b3650750152 | 20,106 |
def _split_scene(images, cameras, top_down, captions):
"""Splits scene into query and target.
Args:
images: A tensor containing images.
cameras: A tensor containing cameras.
top_down: A tensor containing the scene seen from top.
captions: A tensor containing captions.
Returns:
A tuple query, target. The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
"""
context_image = images[:-1, :, :, :]
context_camera = cameras[:-1, :]
context_caption = captions[:-1]
target_image = images[-1, :, :, :]
target_camera = cameras[-1, :]
target_caption = captions[-1]
query = ((context_image, context_camera, context_caption), target_camera)
target = (target_image, target_caption, top_down)
return query, target | cb1aa58dcd3f3bd33f113fef7b38399effa484a7 | 20,108 |
import requests
from pathlib import Path
def download_url(url,download_path):
"""
Download an url
url: url to download
download_path: path where to save the downloaded url
"""
r = requests.get(url, stream = True)
if not Path(download_path).exists():
print("Downloading file {}".format(url))
with open(download_path,"wb") as dest_file:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
dest_file.write(chunk)
return 1
else:
return 0 | 2d95aeb5dbad228fa8a09279b5084f750c5b4362 | 20,110 |
def truncate(message, from_start, from_end=None):
"""
Truncate the string *message* until at max *from_start* characters and
insert an ellipsis (`...`) in place of the additional content. If *from_end*
is specified, the same will be applied to the end of the string.
"""
if len(message) <= (from_start + (from_end or 0) + 3):
return message
part1, part2 = message[:from_start], ''
if from_end and len(message) > from_end:
if len(message) - from_start < from_end:
from_end -= len(message) - from_start
part2 = message[-from_end:]
return part1 + '...' + part2 | 44e37164aff5912d37a3dc27217e498b1c14efff | 20,112 |
def get_key_padding_mask(padded_input, pad_idx):
"""Creates a binary mask to prevent attention to padded locations.
Arguements
----------
padded_input: int
Padded input.
pad_idx:
idx for padding element.
Example
-------
>>> a = torch.LongTensor([[1,1,0], [2,3,0], [4,5,0]])
>>> get_key_padding_mask(a, pad_idx=0)
tensor([[False, False, True],
[False, False, True],
[False, False, True]])
"""
if len(padded_input.shape) == 4:
bz, time, ch1, ch2 = padded_input.shape
padded_input = padded_input.reshape(bz, time, ch1 * ch2)
key_padded_mask = padded_input.eq(pad_idx).to(padded_input.device)
# if the input is more than 2d, mask the locations where they are silence
# across all channels
if len(padded_input.shape) > 2:
key_padded_mask = key_padded_mask.float().prod(dim=-1).bool()
return key_padded_mask.detach()
return key_padded_mask.detach() | 234d5f947b7042c5edad68e9b8162e4bbf6963f3 | 20,113 |
from typing import Iterable
import torch
from typing import Dict
from typing import List
def cluster_strings(strings: Iterable[str]) -> torch.Tensor:
"""
given a list of strings, assigns a clustering, where
each pair of identical ground truth strings is in the same
cluster
return a torch.LongTensor containing the cluster id of
each ground truth
"""
cluster_id_by_truth: Dict[str, int] = {}
cluster_l: List[int] = []
for n, truth in enumerate(strings):
cluster_id = cluster_id_by_truth.setdefault(truth, len(cluster_id_by_truth))
cluster_l.append(cluster_id)
return torch.tensor(cluster_l, dtype=torch.int64) | 7821fa946e7a07be13411f54913ee71f1e67dc3a | 20,116 |
def format_conditions(**kwargs):
"""Converts an arbitrary number of lists to a list of dictionaries. Useful
for specifying the conditions in pmutt.io.chemkin.write_EA
Parameters
----------
kwargs - keyword arguments
Lists of the conditions where each index corresponds to a run
Returns
-------
conditions - list of dict
A list where each element is a dictionary containing the conditions
for a specific run
"""
conditions = []
for cond_name, cond_values in kwargs.items():
for i, cond_value in enumerate(cond_values):
try:
conditions[i][cond_name] = cond_value
except (IndexError, KeyError):
conditions.append({cond_name: cond_value})
return conditions | a7ee7dcb6abff418c9795c9a1e604cab2bfbbe19 | 20,118 |
def prefix(values, content):
"""
Discover start and separate from content.
:param list[str] values:
Will scan through up to the one ``content`` starts with.
:param str content:
The value to scan, will separate from the start if found.
:raises:
:class:`ValueError` if no start matches.
.. code-block:: py
>>> prefix(('-', '.', '!'), './echo')
>>> ('.', '/echo')
"""
for value in values:
if content.startswith(value):
break
else:
raise ValueError('invalid start')
size = len(value)
content = content[size:]
return (value, content) | 2bce07c6a33925434768e339e27ad44bbe543a26 | 20,125 |
from typing import List
def get_row(row: List[str], cells_per_row: int, cell_tag: str):
"""
:param row: list of cell contents
:param cells_per_row: how many cells per row
:param cell_tag: tag name for the cell, td and th being the possibilities known.
:return: html describing the row
"""
html_row = "\n<tr>\n"
for i, cell in enumerate(row):
if i == cells_per_row:
# sub-divide natural row width:
html_row += "\n</tr>\n<tr>"
html_row += "<{}>".format(cell_tag) + cell + "</{}>".format(cell_tag)
return html_row + "\n</tr>" | cb919c311af3314e2c1eb25bb0949c836312f96c | 20,129 |
def gen(symbol, s1, s2, s3):
"""
Generate one quaternary formula.
:param symbol: Mask object
:param s1: Mask object
:param s2: Mask object
:param s3: Mask object
"""
result = '(' + symbol.outer
for s in [s1, s2, s3]:
result += ','
if len(s.outer) == 0:
result += '_'
else:
result += s.inner
result += ')'
return result | a3817b32521d420caa90f0b3c6511395030e48a9 | 20,131 |
def comp(z1, z2, tol):
"""Return a bool indicating whether the error between z1 and z2 is <= tol.
If z2 is non-zero and ``|z1| > 1`` the error is normalized by ``|z1|``, so
if you want the absolute error, call this as ``comp(z1 - z2, 0, tol)``.
"""
if not z1:
z1, z2 = z2, z1
if not z1:
return True
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol | 6c51adbae99f9d642b7e46010c96b3b1cf8ba44d | 20,135 |
def _as_dict(module):
"""
Returns publicly names values in module's __dict__.
"""
try:
return {k: v for k, v in module.__dict__.iteritems() if not k[0:1] == '_'}
except AttributeError:
return {} | 5904d111040219f7f4b4c2f82e4d9f86cbe1c401 | 20,137 |
from typing import List
def keys_remove_dollar_suffixes(keys : List[str]) -> List[str]:
"""Removes dollar suffixes from keys."""
result = []
for key in keys:
i = key.find("$")
if i != -1:
result.append(key[:i])
else:
result.append(key)
return result | 355f521b04fd00a27130bf714559208b062fe79a | 20,150 |
def _get_or_create_personal_context(user):
"""Get or create personal context for user"""
personal_context = user.get_or_create_object_context(
context=1,
name='Personal Context for {0}'.format(user.id),
description='')
return personal_context | 6dc3cce9a0073db480608d1766e4892dfa66e310 | 20,152 |
import numbers
def all_numeric(data):
"""
Tests if all values in an iterable are numeric.
Args:
data: An iterable claiming to contain numbers
Returns:
A list containing a boolean indicating whether all of the
values were numbers and then a list of the genuine numeric values
"""
nums = [dt for dt in data if isinstance(dt, numbers.Number)]
return [len(nums) == len(data), nums] | b7ed6f5c37cb5cf41a3c5a337e686a4144d531dc | 20,157 |
def create_names(instances, Ns, str_format):
"""
Create a list of names for spectra loaded from task instances.
:param instances:
A list of task instances where spectra were loaded. This should
be length `N` long.
:param Ns:
A list containing the number of spectra loaded from each task instance.
This should have the same length as `instances`.
:param str_format:
A string formatting for the names. The available keywords include
all parameters associated with the task, as well as the `star_index`
and the `spectrum_index`.
:returns:
A list with length `sum(Ns)` that contains the given names for all
the spectra loaded.
"""
names = []
for star_index, (instance, N) in enumerate(zip(instances, Ns)):
kwds = instance.parameters.copy()
kwds.update(star_index=star_index)
for index in range(N):
kwds["spectrum_index"] = index
names.append(str_format.format(**kwds))
return names | f7d734e712769d86c3ca31483790c6a1e46c3962 | 20,160 |
def fluid_saturation(vol_fluid=50, vol_pore=100):
"""Returns the fluid saturation given the fluid volume and pore volume."""
return float(vol_fluid/vol_pore) | 30d608e384076ae94626a7f4351e58fe3d0e05bc | 20,163 |
def get_signed_polygon_area(points):
"""
Get area 2d polygon
:param points: list[DB.UV]
:type points: list[DB.UV]
:return: Area
:rtype: float
"""
area = 0
j = points[len(points) - 1]
for i in points:
area += (j.U + i.U) * (j.V - i.V)
j = i
return area / 2 | 960d5a3e5bff125fb560580f81b5103fa8147789 | 20,174 |
def sec_to_exposure_decimation(sec):
"""
Convert seconds to exposure and decimation.
The algorithm is limited since it multiplies decimation by 10 until the
resulting exposure is less than 65_535. This is not perfect because it
limits decimation to 10_000 (the next step would be 100_000 which is
bigger then max decimation of 65_535).
The max theoretical value is ~497 days. This algorithm is limited to
~75 days. If it is not enough for you feel free to improve it :-)
(max theoretical = datetime.timedelta(seconds = 2**16 * 2**16 * 10E-3))
"""
decimation = 1
deci_millis = sec * 100
while (2 ** 16 * decimation) < deci_millis:
decimation *= 10
exposure = round(deci_millis / decimation)
return exposure, decimation | c880a371bc3aa9420de094df4815a876bb504b33 | 20,176 |
def color_int_to_rgb(value: int) -> tuple[int, int, int]:
"""Return an RGB tuple from an integer."""
return (value >> 16, (value >> 8) & 0xFF, value & 0xFF) | ff68ff63032470d09c9222a7883b7c8848770985 | 20,178 |
def generate_fibonacci_series(n):
"""Generate fibonacci series from 0 to nth term
args:
n->nth term of fibonnaci series
Returns:
A list of numbers of fibpnacci series to the nth term
"""
n1 = 0
n2 = 1
count = 0
fibonacci_series = []
if not isinstance(n, int):
raise ValueError('nth term must be an integer')
elif n <= 0:
raise ValueError("nth term must be postive")
elif n == 1:
fibonacci_series.append(n1)
return fibonacci_series
else:
while count < n:
fibonacci_series.append(str(n1))
nth = n1 + n2
n1, n2 = n2, nth
count += 1
return fibonacci_series | 8796ff1bc97a944b644ee7fe869e4b2407b6994e | 20,192 |
def generate_catalog_mags(instrument_mags, color, model):
"""
Generate catalog magnitudes from instrumental magnitudes
given a model that relates the two.
"""
return instrument_mags + model(color) | 0b39a7dae5eb1f573c62b25b7053acebf28e91d2 | 20,194 |
from typing import List
def generateBasisIndexList(basisStrList: List[str], sysLevel: int) -> List[int]:
"""
Return a list of integers which indicates the basis indices according to the input basis string list.
For example, ``generateBasisIndexList(['00', '01', '10', '11'], 3)`` will return:
``[0, 1, 3, 4]``
:param basisStrList: basis string list
:param sysLevel: the energy level of qubits in the system.
:return: basis indices list
"""
strLen = [len(item) for item in basisStrList]
assert max(strLen) == min(strLen), "All input digital strings should have same length."
digLen = max(strLen)
def translateStrToInt(strN: str) -> int:
""" Translate a string to int """
intNum = 0
for digIndex, charN in enumerate(strN):
dig = int(charN)
assert dig < sysLevel, f"Digit '{dig}' is greater than sysLevel '{sysLevel}'."
intNum += (sysLevel ** (digLen - digIndex - 1)) * dig
return intNum
basisIntList = []
for strNum in basisStrList:
basisIntList.append(translateStrToInt(strNum))
return basisIntList | aa11cd27a134b5ec432e957578908a64e2c1cc9e | 20,197 |
def get_urn_from_raw_update(raw_string):
"""
Return the URN of a raw group update
Example: urn:li:fs_miniProfile:<id>
Example: urn:li:fs_updateV2:(<urn>,GROUP_FEED,EMPTY,DEFAULT,false)
"""
return raw_string.split("(")[1].split(",")[0] | fa96086f79462354f70a19e4475da9e62a3e0046 | 20,198 |
def close_window(driver, w):
"""Close the window associated with the given window handle."""
driver.switch_to.window(w)
return driver.close() | f0b8cc5abd6703f5a1a056ffb24925d1d4e2c8e0 | 20,202 |
def disable(f):
"""Mark a test as disabled."""
f.__test__ = False
return f | afd3851496472d65748ea67a1f3e4860f379451c | 20,206 |
from typing import List
from typing import Tuple
def tuple_zip(list1: List, list2: List) -> List[Tuple]:
"""Creates tuples of elements having same indices from two lists.
doctests:
>>> tuple_zip([1, 2], ['x', 'y'])
[(1, 'x'), (2, 'y')]
>>> tuple_zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'])
[(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]
"""
x = list(zip(list1, list2))
return x | dbd3a162dc55ea70122483591c82c187fe4f1411 | 20,212 |
def NoneSafeType(_type):
"""
A hack for a "None-safe" typecaster. Given a type, it casts all
values to that type as the type would, except that None is always
cast to None.
"""
def caster(value):
if value is None:
return None
else:
return _type(value)
return caster | d8b763ec10ba16faf151dc20acf68fcb35286197 | 20,213 |
def isTruthy(value):
"""Converts any value to a boolean value; just uses the 'bool' built-in function,
except that strings like 'FALSE', 'false' and 'False', and strings that are
numeric values equal to 0, return False.
"""
if str(value).lower() == 'false':
return False
try:
return bool(float(value))
except:
return bool(value) | bfc7b3547c77f1d8642c0078c80740d632813b45 | 20,225 |
def p2a(p, m1, m2):
"""
It computes the separation (Rsun) given m1 (Msun), m2 (Msun) and p (days).
"""
yeardy=365.24
AURsun=214.95
p = p/yeardy
a = AURsun*(p*p*(m1 + m2))**(1./3.)
return a | a0c5d8c0d7b961e8017217f22f54aa2a70daf5a0 | 20,231 |
def calculate_bootstrap(bootstrap_size, length):
"""
Calculate the bootstrap size for the data of given length.
Parameters
----------
bootstrap_size : int, float, default=None
Bootstrap size for training. Must be one of:
- int : Use `bootstrap_size`.
- float : Use `bootstrap_size * n_samples`.
- None : Use `n_samples`.
length : int
Length of the data to be bootstrapped.
Returns
-------
bootstrap : int
Actual bootstrap size.
"""
if bootstrap_size is None:
return length
elif isinstance(bootstrap_size, int) and bootstrap_size > 0:
return bootstrap_size
elif isinstance(bootstrap_size, float) and 0 < bootstrap_size <= 1:
return int(bootstrap_size * length)
else : raise ValueError("Bootstrap Size must be None, a positive int or float in (0,1]") | f6d0856322ac43638fd75b94838f9b04a6acabd1 | 20,235 |
def totM(m1, m2):
"""The total mass shows up in Kepler formulae, m1+m2
"""
return( m1+m2 ) | 1be75e653582e7f9eaafaf07d1f946735eb6f66e | 20,241 |
from pathlib import Path
def tmp_dir(tmp_path) -> Path:
"""
Returns `Path` to the temporary directory.
If It not exists - creates and returns it.
"""
dir_path = tmp_path / "tmp_dir"
if not dir_path.exists():
dir_path.mkdir()
return dir_path | 39c61278f942cc9cbfd531bf3e7be296ca012d77 | 20,247 |
def generate_batch_spec(mode, batch_size):
""" Generates a spec describing how to draw batches
Args:
mode: one of ['train', 'test', 'val']
"""
assert mode in ['train', 'test', 'val']
# on a more complicated dataset this would include useful arguments
# such as whether to augment the data, what data source to draw from....
# this example is simple enough to not need a spec so this is just for illustration purposes
batch_spec = {
'mode': mode,
'batch_size': batch_size
}
return batch_spec | 232991fc65e037f5c860f8ca9dc2fa832b7062e8 | 20,250 |
def get_star_column_number(line):
""" For a line in a star file describing a column entry (e.g., '_rlnEstimatedResolution #5'), retrieve the value of that column (e.g. 5)
"""
column_num = int(line.split()[1].replace("#",""))
return column_num | 3cd4e981b1486167fdad0e6cbfeb5b36e88c4a1a | 20,251 |
from typing import Iterable
from typing import List
def flatten_lists(list_of_lists:Iterable) -> List:
""" Flatten a list of iterables into a single list
This function does not further flatten inner iterables.
Parameters
----------
list_of_lists : typing.Iterable
The iterable to flatten
Returns
-------
flattened_list: typing.List
The flattened list
"""
return [item for sublist in list_of_lists for item in sublist] | 5ba8f5add9f8f1fd7fad50fbaea765655b183718 | 20,253 |
def _convert_float32_to_float64(data):
"""
Converts DataArray values of float32 to float64
:param data: Xarray dataset of coverage data
:returns: Xarray dataset of coverage data
"""
for var_name in data.variables:
if data[var_name].dtype == 'float32':
og_attrs = data[var_name].attrs
data[var_name] = data[var_name].astype('float64')
data[var_name].attrs = og_attrs
return data | 1a66c6de0de7ff2c79d7e03c017b79b78cb43639 | 20,256 |
def _pisano_period_len(modulo):
"""
In number theory, the nth Pisano period, written π(n),
is the period with which the sequence of Fibonacci numbers taken modulo n repeats.
Args:
modulo: modulo
Returns:
length of Pisano period
"""
init_array = [0, 1]
idx = 1
while 1:
idx += 1
init_array.append(init_array[idx - 1] % modulo + init_array[idx - 2] % modulo)
if init_array[idx] % modulo == 1 and init_array[idx - 1] % modulo == 0:
return len(init_array) - 2 | 776db9e0e8fd2af28159b9616018ff000dd55154 | 20,265 |
def potency_tensor(normal, slip):
"""
Given a fault unit normal and a slip vector, return a symmetric potency
tensor as volume components (W11, W22, W33), and shear components
(W23, W31, W12).
"""
v = [
normal[0] * slip[0],
normal[1] * slip[1],
normal[2] * slip[2],
]
s = [
0.5 * (normal[1] * slip[2] + normal[2] * slip[1]),
0.5 * (normal[2] * slip[0] + normal[0] * slip[2]),
0.5 * (normal[0] * slip[1] + normal[1] * slip[0]),
]
return [v, s] | 87c1a08d74e8e2dfdd51f32a19ee44dd543a4c4c | 20,266 |
def extract_doi_suffix(protocol_doi):
"""
DOIs come in a format like 'dx.doi.org/10.17504/protocols.io.bazhif36'.
We just need the 'protocols.io.bazhif36' element to form our query url.
"""
return protocol_doi.split("/")[2] | 9dbb9d44b9159bd9b3168169e17c6a0d66fe95d8 | 20,268 |
import re
def fenced_bootstrap(block):
"""Set up a fenced block for bootstrap prettify highlighting."""
pattern = re.compile(r'```(?P<lang>\w+)?(?P<code>.*?)```', re.MULTILINE|re.DOTALL)
match = pattern.match(block)
if not match:
return block
lang = match.groupdict().get('lang', None)
code = match.groupdict().get('code', '')
return '''<pre class="prettyprint linenums">%s</pre>''' % code | 34140706556e0aa4fa80b946eb2f4fbbb3018d02 | 20,270 |
def find_token_by_position(tokens, row, column):
"""Given a list of tokens, a specific row (linenumber) and column,
a two-tuple is returned that includes the token
found at that position as well as its list index.
If no such token can be found, ``None, None`` is returned.
"""
for index, tok in enumerate(tokens):
if (
tok.start_row <= row <= tok.end_row
and tok.start_col <= column < tok.end_col
):
return tok, index
return None, None | d5493b596761bc7620aac2f54c0ccd9cc8982d7b | 20,271 |
def read_txt_file(file_name):
"""Read the content of a text file with name `file_name` and return content as a list of lines"""
with open(file_name, 'r') as file:
lines = file.readlines()
return lines | 0231bd41327ae9c082502d9cf5c712978d88df26 | 20,272 |
from typing import List
import math
def chunks(arr, m) -> List[list]:
"""分割列表,但是子list元素个数尽可能平均
Args:
arr: 待分割的list
m: 分成几份
Returns:
分割后的每个子list都是返回结果list的一个元素
"""
n = int(math.ceil(len(arr) / float(m)))
return [arr[i : i + n] for i in range(0, len(arr), n)] | 040b75647fcdd72cac561bb43019c50c64dec51d | 20,279 |
import torch
def flatten_parameters_wg(model):
"""
Flattens parameters of a model but retains the gradient
:return: 1D torch tensor with size N, with N the model paramters
"""
return torch.cat([p.view(-1) for p in model.parameters()]) | 1bdf6779099e37dce5179ce1fe5e63472ea72fbf | 20,280 |
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'description': plugin.formatted_description,
'name': plugin.name,
'title': plugin.title,
'version': plugin.version
}
return plugins | d534e3ba9379947821fc2a49b0439b4a4269d9ff | 20,289 |
def list_instruments(snap):
"""
List instruments from a snapshot
"""
return (list(snap['station']['instruments'].keys()) +
list(snap['station']['components'].keys())) | a4abe5bc9884d80b014bbf6eb4688349114ed664 | 20,291 |
def insertion_sort(some_list):
"""
https://en.wikipedia.org/wiki/Insertion_sort
Split the array into a "sorted" and "unsorted" portion. As we go through the unsorted portion we will backtrack
through the sorted portion to INSERT the element-under-inspection into the correct slot.
O(N^2)
"""
iters = 0
# We get to ignore the first element of the unsorted portion, as it becomes the first element of the sorted portion.
for i in range(1, len(some_list)):
iters += 1
# Keep track of where we are in the unsorted portion of the list.
elem = some_list[i]
hole_pos = i # hole_pos is index, in unsorted portion, of the hole
# We're iterating right to left. We want to stop iterating when the element to the left of our hole position is
# less than the element we're trying to insert.
while (hole_pos > 0) and (some_list[hole_pos - 1] > elem):
iters += 1
# Shift each element one space to the right. Keeps a clear space for our insertion.
some_list[hole_pos] = some_list[hole_pos - 1]
# Continue to move left.
hole_pos = hole_pos - 1
# Insert the element into the sorted portion of the list where the hole is.
some_list[hole_pos] = elem
return iters, some_list | ce5be31c03aa925f567c9880cd81281cf3c5af96 | 20,292 |
def Nop(x):
"""empty function for tree traversal"""
return True | 120feb2ba4e1eaa291eb8db4a5586bfd8478f8f2 | 20,294 |
def doNothing(rawSolutions):
"""
Contrary to its name, this function returns its input argument in a list structure, whose sole element is said input argument.
"""
return [rawSolutions] | 8a59f34dba4bcd00a0d69eb2875d8b5407fb737f | 20,297 |
from typing import Dict
from typing import Any
from typing import List
def validate_required_keys_for_add_asset(valid_json: Dict[str, Any], required_keys: List[str]) -> bool:
"""
Check if the required keys for adding an asset are present or not
:param valid_json: The valid input asset JSON
:param required_keys: The required keys for creating an asset
:return: True if the required values are present else false
"""
return all(key in valid_json.keys() for key in required_keys) | e6dd5fe20891af30fa997e19b8c08716340fe7b5 | 20,298 |
def create_user(txn, email, password):
"""Creates a user node."""
query = """
MERGE (a:User {email: $email,
password: $password})
"""
return txn.run(
query,
email=email,
password=password) | 0ce6800de8ea4be5891ae38017bcc6b1e97e7ddd | 20,313 |
from datetime import datetime
def isday(yyyymmdd):
"""Is the yyyymmdd formatted as 'YYYY-MM-DD' such as '2020-03-18'"""
try:
datetime.strptime(yyyymmdd, '%Y-%m-%d')
return True
except ValueError:
return False | 9ec31aa0cc4d924d5ae0df2510fb6258df6668a2 | 20,315 |
import re
def parse_variable_declaration(srcline):
"""Return (name, decl) for the given declaration line."""
# XXX possible false negatives...
decl, sep, _ = srcline.partition('=')
if not sep:
if not srcline.endswith(';'):
return None, None
decl = decl.strip(';')
decl = decl.strip()
m = re.match(r'.*\b(\w+)\s*(?:\[[^\]]*\])?$', decl)
if not m:
return None, None
name = m.group(1)
return name, decl | ce84f493845fe7a8d72b62bfb69a34ee25114e18 | 20,318 |
def get_exception_name(node):
"""
Find the name of an exception
Args:
node: Parso node containing the raises statement
Returns:
str: The exception name
"""
name = node.children[1]
while not name.type == 'name':
name = name.children[0]
return name.value | 0ab7841a4a1044c990fc8d8490d2ff040bd45d05 | 20,321 |
def calculate_depth_loss(est_depths, gt_depths, loss_type="l1"):
"""Calculate loss between estimated depthmap and GT depthmap
Args:
est_depths: [B,1,H,W]
gt_depths: [B,1,H,W]
loss_type: Choose loss type from ['l1','l2']
"""
assert est_depths.dim() == gt_depths.dim(), "inconsistent dimensions"
assert loss_type in ["l1", "l2"], "loss_type should be l1/l2"
valid_mask = (gt_depths > 0).detach()
diff = est_depths - gt_depths
if loss_type == "l1":
return diff[valid_mask].abs().mean()
elif loss_type == "l2":
return (diff[valid_mask] ** 2).mean() | fbe65b8c9f5e8546e0f633f07b65e22be84df67d | 20,324 |
def _intersect_point2_circle(P, C):
"""
Returns True if point P lies with circle C.
@type P : Point2 instance
@type C : Circle instance
"""
return abs(P - C.c) <= C.r | 58522af7eed88e90c6377be5393da82d8ff99529 | 20,329 |
def reshape_pivot(df_in):
"""
Reduce df to crucial subset then pivot on cases and genes.
"""
df = (df_in[['case_barcode', 'Hugo_Symbol', 'mutation_count']]
.copy()
.pivot(index='case_barcode', columns='Hugo_Symbol', values='mutation_count')
.fillna(0)
.astype(int))
return df | 1f95f1bb496f2a3734cb32d10f748c4ccb936cbc | 20,332 |
from datetime import datetime
from dateutil import tz
def date_iso(date):
"""Convert a datetime string into ISO 8601 format.
HTML date format agrees with ISO 8601 (see also, :RFC:`3339`), ie::
YYYY[-MM[-DD]][Thh[:mm[:ss[.s]]]T]
For more information:
* `Date and Time Formats:
<https://www.w3.org/TR/NOTE-datetime>`_
* `Date formats:
<https://www.w3.org/International/questions/qa-date-format>`_
:param date: Datetime object
:type date: datetime.datetime
:return: Datetime formatted as ISO 8601, or empty string if invalid
:rtype: str
.. note::
If the datetime object is timezone naïve, it'll be localized to
UTC, so the feed parser, and any other function that requires
timezone aware datetime objects, do not raise an exception.
"""
if date and not datetime.strftime(date, '%z'):
date = date.replace(tzinfo=tz.tzutc())
return datetime.isoformat(date, timespec='minutes') if date else '' | 0fa1e5ee9087caeaea302e6dc4bfe70262c9507a | 20,334 |
def kill_process(device, process="tcpdump"):
"""Kill any active process
:param device: lan or wan
:type device: Object
:param process: process to kill, defaults to tcpdump
:type process: String, Optional
:return: Console ouput of sync sendline command after kill process
:rtype: string
"""
device.sudo_sendline("killall %s" % process)
device.expect(device.prompt)
device.sudo_sendline("sync")
device.expect(device.prompt)
return device.before | 8b892b79c07feff586ddd0576cc18ab92551c322 | 20,336 |
import json
def read_test_file(path):
"""Read a test file.
Parameters
----------
path : str
The path to a test file.
Returns
-------
typing.Tuple[str, typing.List[str]]
"""
with open(path) as f:
dct = json.load(f)
return dct['text'], dct['sentences'] | a416a4031ff355134004dde233d741899a35b28b | 20,339 |
def make_xml_name(attr_name):
""" Convert an attribute name to its XML equivalent by replacing
all '_' with '-'. CamelCase names are retained as such.
:param str attr_name: Name of the attribute
:returns: Attribute name in XML format.
"""
return attr_name.replace('_', '-') | f1463c4592edd40c20f030cbd9add83a3cf93577 | 20,342 |
import typing
def query_bt_info(
start: typing.Dict[str, typing.Any],
composition_key: str,
wavelength_key: str,
default_composition: str = "Ni"
) -> dict:
"""Query the necessary information for the PDFGetter."""
if composition_key in start:
composition = start[composition_key]
if isinstance(composition, dict):
composition_str = "".join(["{}{}".format(k, v) for k, v in composition.items()])
elif isinstance(composition, str):
composition_str = composition
else:
raise ValueError("Cannot parse composition: {}".format(type(composition)))
else:
composition_str = default_composition
if wavelength_key in start:
wavelength = float(start[wavelength_key])
else:
wavelength = None
return {
"composition": composition_str,
"wavelength": wavelength
} | 520f006a9fdf479089230f6af123b2a9e8838de9 | 20,343 |
def other(player):
""" Returns the given player's opponent.
"""
if player == 'X':
return 'O'
return 'X' | e23ed765c5331ae47d284e71835b6c0897612b5c | 20,345 |
def get_module(module_name: str):
"""
Retrieves a module.
"""
try:
# import importlib
# module = importlib.import_module(module_name)
# requiring parameter `fromlist` to get specified module
module = __import__(module_name, fromlist=[''])
return module
except ImportError:
return None | 76d9ebd5b7a8a2450f4a740720152f85bf56ef76 | 20,346 |
import re
def check_package_name(package_name):
"""Check that package name matches convention
Args:
package_name: The package name to validate
Returns:
A boolean determining whether the package name is valid or not
"""
m = re.match('[a-z0-9_]{3,30}', package_name)
return (m != None and m.group(0) == package_name) | 28d22f69d3926152aabbb835ada8bb8d31553a01 | 20,347 |
import random
def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
"""Generate a random IP address.
You can also specify an IP address prefix if you are interested in
local network address generation, etc.
:param bool ip3: Whether to generate a 3 or 4 group IP.
:param bool ipv6: Whether to generate IPv6 or IPv4
:param list prefix: A prefix to be used for an IP (e.g. [10, 0, 1]). It
must be an iterable with strings or integers. Can be left
unspecified or empty.
:returns: An IP address.
:rtype: str
:raises: ``ValueError`` if ``prefix`` would lead to no random fields at
all. This means the length that triggers the ``ValueError`` is 4 for
regular IPv4, 3 for IPv4 with ip3 and 8 for IPv6. It will be raised in
any case the prefix length reaches or exceeds those values.
"""
# Set the lengths of the randomly generated sections
if ipv6:
rng = 8
elif ip3:
rng = 3
else:
rng = 4
prefix = [str(field) for field in prefix]
# Prefix reduces number of random fields generated, so subtract the length
# of it from the rng to keep the IP address have correct number of fields
rng -= len(prefix)
if rng == 0:
raise ValueError(
'Prefix {} would lead to no randomness at all'.format(
repr(prefix)))
elif rng < 0:
raise ValueError(
'Prefix {} is too long for this configuration'.format(
repr(prefix)))
random.seed()
if ipv6:
# StackOverflow.com questions: generate-random-ipv6-address
random_fields = [
'{0:x}'.format(random.randint(0, 2 ** 16 - 1)) for _ in range(rng)]
ipaddr = ':'.join(prefix + random_fields)
else:
random_fields = [str(random.randrange(0, 255, 1)) for _ in range(rng)]
ipaddr = '.'.join(prefix + random_fields)
if ip3:
ipaddr = ipaddr + '.0'
return ipaddr | 173551883ab17f033f043dbbf5fe7f2c8e87c2ff | 20,349 |
def determine_layers_below_above(layers, values, elevation):
"""Determine the layers below and above the current elevation
Args:
layers [<str>]: All the pressure layers to load for each parameter
values [<LayerInfo>]: All the interpolated layers information
elevation <float>: The elevation to determine the layers for
Returns
<str>: Below layer ID
<str>: Above layer ID
<int>: Index to the above layer
"""
index = 0
index_below = 0
index_above = len(layers) - 1
for layer in layers:
if values[layer].hgt >= elevation:
index_below = index - 1
index_above = index
break
index += 1
# Only need to check for the low height condition
# We should never have a height above our highest elevation
if index_below < 0:
index_below = 0
index_above = 1
return (layers[index_below], layers[index_above], index_above) | bc0b0846037fd9a415c091264213684ee088174c | 20,357 |
def get_suffix_side(node, matcher):
"""Check if (not) adding char node to right syllable would cause creating word suffix.
Return 1 if char node should be added to right,
-1 if to left,
0 if cannot determine."""
if node.next.syllable is None or node.next.syllable.is_last() is False:
return 0
left_score = matcher.is_suffix(str(node.next.syllable))
right_score = matcher.is_suffix(str(node) + str(node.next.syllable))
return right_score - left_score | c46ed8605c0eef77059804802e7151b75b0388b0 | 20,358 |
def _sanitize_git_config_value(value: str) -> str:
"""Remove quotation marks and whitespaces surrounding a config value."""
return value.strip(" \n\t\"'") | 8945955e6ac4811a637d9df6eac0f7f29a5e55eb | 20,360 |
def is_urn(s: str):
"""Test if is uuid string in urn format"""
return type(s) == str and s.startswith("urn:uuid:") | 7f5bbf7dad8e86a687230c29a50f3218198a8286 | 20,366 |
def get_snirf_measurement_data(snirf):
"""Returns the acquired measurement data in the SNIRF file."""
return snirf["nirs"]["data1"]["dataTimeSeries"][:] | 2781c241aac66167e94f6873492d62a200081688 | 20,372 |
import json
def parse_sw(sw_file):
""" Parse stopword config. """
with open(sw_file, "r", encoding="utf-8") as sw_stream:
sw = json.load(sw_stream)
assert isinstance(sw["wordlist"], list)
for word in sw["wordlist"]:
assert isinstance(word, str)
stopword = sw["wordlist"]
return stopword | 8dc46a61c195f0ff47bbc825dd816c780ef6f0b5 | 20,373 |
def num_ints(lst):
"""
Returns: the number of ints in the list
Example: num_ints([1,2.0,True]) returns 1
Parameter lst: the list to count
Precondition: lst is a list of any mix of types
"""
result = 0 # Accumulator
for x in lst:
if type(x) == int:
result = result+1
return result | cba6c06bc1618dae1b7a6f515e7f9b42ca88187b | 20,375 |
def without_keys(d: dict, *rm_keys):
"""Returns copy of dictionary with each key in rm_keys removed"""
return {k: v for k, v in d.items() if k not in rm_keys} | 256597224426708c38369ba635b4fa1df15591be | 20,377 |
def retry_if(predicate):
"""
Create a predicate compatible with ``with_retry``
which will retry if the raised exception satisfies the given predicate.
:param predicate: A one-argument callable which will be called with the
raised exception instance only. It should return ``True`` if a retry
should be attempted, ``False`` otherwise.
"""
def should_retry(exc_type, value, traceback):
if predicate(value):
return None
raise exc_type, value, traceback
return should_retry | e02401ba7eb9af88565e06c0013135bfcf711521 | 20,379 |
def next_pow2(n):
""" Return first power of 2 >= n.
>>> next_pow2(3)
4
>>> next_pow2(8)
8
>>> next_pow2(0)
1
>>> next_pow2(1)
1
"""
if n <= 0:
return 1
n2 = 1 << int(n).bit_length()
if n2 >> 1 == n:
return n
else:
return n2 | a94ee5064ae2f3540b65b03b85ab82290c26addd | 20,388 |
def merge_annotations(interaction_dataframe, columns):
"""Merges the annotations for a protein-protein interaction.
Given two columns of a protein-protein interaction dataframe, each of
which contains a type of annotation data, this function returns the merged
set of those annotations.
E.g. Column 1: {IPR011333, IPR003131, IPR000210}
Column 2: {GO:0046872}
Result: {GO:0046872, IPR011333, IPR003131, IPR000210}
Parameters
----------
interaction_dataframe : DataFrame
DataFrame containing protein-protein interactions and annotations.
columns : list
A list of annotation columns to merge. Expects the column contents to
be array-like, not strings.
Returns
-------
pandas Series of sets
Array containing a set of annotations for each interaction (row).
"""
# replace NaNs with empty sets
for i in columns:
interaction_dataframe.loc[interaction_dataframe[
i].isnull(), i] = interaction_dataframe.loc[interaction_dataframe[
i].isnull(), i].apply(lambda x: set())
# join all sets in each supplied column
# the unpacking operator can accept an array of lists or sets
merged_annotations = interaction_dataframe[columns].apply(
lambda x: set().union(*x), axis=1)
""" Example usage
Row entry:
interpro_xref_A {IPR029199, IPR026667}
interpro_xref_B {IPR027417, IPR001889, IPR013672}
Name: 1, dtype: object
Lambda result:
{'IPR001889', 'IPR013672', 'IPR026667', 'IPR027417', 'IPR029199'}
"""
return merged_annotations | e5dcacc645e1110c1515f5630cd9197a8b7656bd | 20,389 |
import math
def angle_between(pos1, pos2):
""" Computes the angle between two positions. """
diff = pos2 - pos1
# return np.arctan2(diff[1], diff[0])
return math.atan2(diff[1], diff[0]) | ee69333fbd188fe977ac64f91275962b34246ed4 | 20,394 |
def not_none(elem):
"""Check if an element is not None."""
return elem is not None | f91a28efc42e3d50515bb783d3f16a64cdcf9490 | 20,398 |
def format_weather_header_for_HELP(itype, iunits, city, lat=None):
"""
Prepare the header for the precipitation, air temperature and
global solar radiation input weather datafile for HELP. The format of the
header is defined in the subroutine READIN of the HELP Fortran source code.
"""
fheader = [['{0:>2}'.format(itype)], # 3: data was entered by the user.
['{0:>2}'.format(iunits)], # 1 for IP and 2 for SI
['{0:<40}'.format(city[:40])],
]
if lat is not None:
# Append the latitude if the data are solar radiation.
fheader.append(['{0:>6.2f}'.format(lat)])
else:
fheader.append([])
return fheader | 7d848481b7316cef4094c2d24b9978665a1c2e1d | 20,399 |
def wilight_to_hass_hue(value):
"""Convert wilight hue 1..255 to hass 0..360 scale."""
return min(360, round((value * 360) / 255, 3)) | 5a9021185f7bbb9bf1351b2df55207063ee49f9a | 20,407 |
def convert_triggers(events, return_event_ids=False):
"""Function to convert triggers to failed and successful inhibition.
Trigger codes:
stop_signal: 11,
go: 10,
response: 1,
stop_signal_only: 12,
failed_inhibition_response: 2,
failed_inhibition: 37,
successful_inhibition: 35
Parameters
----------
events : numpy array
The original events
return_event_id: bool
If true return event_id that matches the new triggers.
Returns
-------
converted_events
The converted events.
"""
events_tmp = events.copy()
for idx, line in enumerate(events_tmp):
if line[2] == 20:
if events_tmp[idx + 1][2] == 1:
events_tmp[idx][2] = 30 # go_before_stop
elif (events_tmp[idx + 1][2] == 11) and (events_tmp[idx + 2][2] != 1):
events_tmp[idx][2] = 35 # successful inhibition
elif (events_tmp[idx + 1][2] == 11) and (events_tmp[idx + 2][2] == 1):
events_tmp[idx][2] = 37 # failed inhibition
events_tmp[idx + 2][2] = 2 # failed inhibition response
event_id = {
"stop_signal": 11,
"go": 10,
"response": 1,
"stop_signal_only": 12,
"failed_response": 2,
"failed_inhibition": 37,
"successful_inhibition": 35,
}
if return_event_ids:
return (
events_tmp,
event_id,
)
else:
return events_tmp | 69eca81afe81c4f161ee69e3a02f2e5706c1c5ee | 20,408 |
import random
def intRndPct(n, pct=20):
"""
Randomize an integer
Parameters
----------
n : int
pct : int, optional
Randomization factor in %. The default is 20.
Returns
-------
int
Randomized integer
"""
return int(n * (100.0 + random.uniform(-pct, pct)) / 100.0) | 5ef784b83d7e9e5c033932ec41d5426b775ece35 | 20,410 |
def user_index_by_id(slack_id, users):
"""
Returns index of user in users list, based on it's Slack id
:param slack_id:
:param users:
:return: Index in users or False
"""
found_users = [user for user in users if user['id'] == slack_id]
if len(found_users) == 0:
# not found
return False
elif len(found_users) == 1:
return users.index(found_users[0])
else:
# found more than one, this should never happen
return False | ec5fb9b382f71f1df4fa7778285679307c634c39 | 20,413 |
from pathlib import Path
import json
def load_json(path: Path):
"""
Loads authentification keys/tokens.
:param path: JSON file to load the keys from
:return: Dictionary containing the key-file information
"""
with open(path, mode='r', encoding='utf8') as file:
return json.load(file) | e7368431970682451669603098d56c1c991750a5 | 20,416 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.