content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def convert_datetime_to_timestamp(dt):
"""Convert pandas datetime to unix timestamp"""
return int(dt.timestamp()) | f9cf6223bfabfa54c00835b56bdce2d5b268afe7 | 702,533 |
def get_indicator_type_value_pair(field):
"""
Extracts the type/value pair from a generic field. This is generally used on
fields that can become indicators such as objects or email fields.
The type/value pairs are used in indicator relationships
since indicators are uniquely identified via their type/value pair.
This function can be used in conjunction with:
crits.indicators.handlers.does_indicator_relationship_exist
Args:
field: The input field containing a type/value pair. This field is
generally from custom dictionaries such as from Django templates.
Returns:
Returns true if the input field already has an indicator associated
with its values. Returns false otherwise.
"""
# this is an object
if field.get("name") != None and field.get("type") != None and field.get("value") != None:
name = field.get("name")
type = field.get("type")
value = field.get("value").lower().strip()
full_type = type
if type != name:
full_type = type + " - " + name
return (full_type, value)
# this is an email field
if field.get("field_type") != None and field.get("field_value") != None:
return (field.get("field_type"), field.get("field_value").lower().strip())
# otherwise the logic to extract the type/value pair from this
# specific field type is not supported
return (None, None) | 1750558782b91017061176176dda94c83c3dee6a | 702,534 |
def doc_arg(name, brief):
"""argument of doc_string"""
return " :param {0}: {1}".format(name, brief) | 8a341c7ef0b437ba9ec035001e71952e3793eea8 | 702,535 |
def example_project(tmp_path):
""" a minimal project
"""
my_module = tmp_path / "my_module"
starter_content = my_module / "starter_content"
starter_content.mkdir(parents=True)
(tmp_path / "README.md").write_text("# My Module\n")
(my_module / "__init__.py").write_text("__version__ = '0.0.0\n")
(starter_content / "example.txt").write_text("123")
return tmp_path | 9063c3f317abc66116bd79656c4fd16e96095e4f | 702,536 |
def foo():
"""
example function documentation
an example doctest is included below
returns: None
>>> x = foo()
>>> x
'foo'
"""
return "foo" | 81300580771ac0fa31b8c701d322a48997683c9c | 702,537 |
def string_date(mnthDay, year):
"""Return a string date as 'mm/dd/yyyy'.
Argument format:
'mm/dd' string
'yyyy'"""
return(mnthDay + '/' + str(year)) | e85bf9f0e72735be04009c6b685f2788a5c46d47 | 702,538 |
def check_who_queued(user):
"""
Returns a function that checks if the song was requested by user
"""
def pred(song):
if song.requested_by and song.requested_by.id == user.id:
return True
return False
return pred | e53a1434077ec7b97e237d1ff8bcc8c2454c4015 | 702,539 |
def _get_lines(filename):
"""Returns a list of lines from 'filename', joining any line ending in \\
with the following line."""
with open(filename, "r") as f:
lines = []
accum = ""
for line in f:
if line.endswith("\\\n"):
accum += line[:-2]
else:
lines.append(accum + line)
accum = ""
return lines | 83b2184eedfb21d27f310f9f2229d05d69ac8b92 | 702,540 |
import re
def replace_punctuation_and_whitespace(text):
"""Replace occurrences of punctuation (other than . - _) and any consecutive white space with ."""
rx = re.compile(r"[^\w\s\-.]|\s+")
return rx.sub(".", text) | b539ec796c1b69176e0da132ee88f9695b745fb2 | 702,541 |
import re
def remove_symbols(text):
""" Removes all symbols and keep alphanumerics """
whitelist = []
return [re.sub(r'([^a-zA-Z0-9\s]+?)',' ',word) for word in text if word not in whitelist] | 54e3706a275f5c5ff58a924a04249066d014f393 | 702,542 |
def filter_out_length_of_one(word):
"""
Filters out all words of length 1
:param word: Input word
:return: None if word is of length 1, else the original word
"""
if len(word) > 1:
return True
return False | c89fda6b560811a178a5a2d2c242d54fba27d071 | 702,543 |
def bmesh_join(list_of_bmeshes, list_of_matrices, *, normal_update=False, bmesh):
"""takes as input a list of bm references and outputs a single merged bmesh
allows an additional 'normal_update=True' to force _normal_ calculations.
"""
bm = bmesh.new()
add_vert = bm.verts.new
add_face = bm.faces.new
add_edge = bm.edges.new
for bm_to_add, matrix in zip(list_of_bmeshes, list_of_matrices):
bm_to_add.transform(matrix)
offset = len(bm.verts)
for v in bm_to_add.verts:
add_vert(v.co)
bm.verts.index_update()
bm.verts.ensure_lookup_table()
if bm_to_add.faces:
for face in bm_to_add.faces:
add_face(tuple(bm.verts[i.index + offset] for i in face.verts))
bm.faces.index_update()
if bm_to_add.edges:
for edge in bm_to_add.edges:
edge_seq = tuple(bm.verts[i.index + offset] for i in edge.verts)
try:
add_edge(edge_seq)
except ValueError:
# edge exists!
pass
bm.edges.index_update()
if normal_update:
bm.normal_update()
return bm | 07d7f3401d170ed4afc3f6a795258710fe263aba | 702,544 |
import json
def read_json(jsonfile):
"""Read a json file into a dictionary
Args:
jsonfile: the name of the json file to read
Returns:
the contents of the JSON file as a dictionary
>>> from click.testing import CliRunner
>>> test = dict(a=1)
>>> with CliRunner().isolated_filesystem():
... filename = 'tmp.json'
... save_json(filename, test)
... assert test == read_json(filename)
"""
with open(jsonfile, 'r') as filepointer:
dict_ = json.load(filepointer)
return dict_ | da5b7bddc42b14a6547071fe528a1c051d35356c | 702,545 |
def zero_float(string):
"""Try to make a string into a floating point number and make it zero if
it cannot be cast. This function is useful because python will throw an
error if you try to cast a string to a float and it cannot be.
"""
try:
return float(string)
except:
return 0 | 075a49b53a0daf0f92072a5ec33f4b8240cc6885 | 702,546 |
import re
def _special_att_handling(attype, col_args): # noqa: C901
""" laundry list of special handling that sqlalchemy
does to convert between a Postgres type and a Sqlalchemy type """
kwargs = {}
if attype == 'uuid':
kwargs = {'as_uuid': True}
args = ()
elif attype == 'numeric':
if col_args:
prec, scale = col_args
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone',
'datetime with time zone'):
attype = attype.replace('datetime', 'timestamp')
kwargs['timezone'] = True
args = ()
elif attype in ('timestamp without time zone',
'time without time zone',
'datetime without time zone',
'datetime',
'time'):
attype = attype.replace('datetime', 'timestamp')
kwargs['timezone'] = False
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if col_args: # pragma: no cover
assert len(col_args) == 1
args = (col_args[0],)
else:
args = ()
elif attype == 'varchar':
attype = 'character varying'
if col_args:
assert len(col_args) == 1
args = (col_args[0],)
else: # pragma: no cover
args = ()
elif attype == 'char':
attype = 'character'
args = col_args
elif attype.startswith('interval'):
field_match = re.match(r'interval (.+)', attype, re.I)
if col_args: # pragma: no cover
assert len(col_args) == 1
kwargs['precision'] = col_args[0]
if field_match: # pragma: no cover
kwargs['fields'] = field_match.group(1)
attype = "interval"
args = ()
else:
args = col_args or ()
return attype, args, kwargs | e9feabb792001c66a9679a3a5931cb00c96dcf83 | 702,547 |
def checksum(data) -> int:
"""
Found on: http://www.binarytides.com/raw-socket-programming-in-python-linux/. Modified to work in python 3.
The checksum is the 16-bit ones's complement of the one's complement sum
of the ICMP message starting with the ICMP Type (RFC 792).
:param data: data to built checksum from.
:return: 16-bit int checksum
"""
s = 0
for i in range(0, len(data), 2):
tmp = data[i]
if i + 1 < len(data):
tmp += (data[i + 1] << 8)
s += tmp
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
s = ~s & 0xffff
return s | af8ba70fa53f95514bc6e8118440ba607c17d794 | 702,548 |
def _number_convert(match):
"""
Convert number with an explicit base
to a decimal integer value:
- 0x0000 -> hexadecimal
- 16'h0000 -> hexadecimal
- 0b0000 -> binary
- 3'b000 -> binary
- otherwise -> decimal
"""
prefix, base, number = match.groups()
if prefix is not None:
return str(match.group(0))
if base in "xh":
return str(int(number, 16))
if base == "b":
return str(int(number, 2))
return str(int(number, 10)) | adef8f8f80342fbcd79c461068eb04f99427f88c | 702,549 |
import requests
import sys
def get_scientific_name(taxon_id):
"""Get scientific name for input taxon_id.
:param taxon_id: NCBI taxonomy identifier
:return scientific_name: scientific name of sample that distinguishes its taxonomy
"""
# endpoint for scientific name
url = 'http://www.ebi.ac.uk/ena/taxonomy/rest/tax-id'
session = requests.Session()
session.trust_env = False
r = session.get(f"{url}/{str(taxon_id).strip()}")
try:
taxon_id = r.json()['scientificName']
return taxon_id
except ValueError:
msg = f'Oops, no scientific name avaible for {taxon_id}. Is it a valid taxon_id?'
sys.exit(msg) | 5cab933a3cc6ab2602e680c3db2ed129c3f85b94 | 702,550 |
def rating_calc(item, ocurrences, last_ocurrences, total_ocurrences):
""" Calculates the rating of the target language.
"""
rating = ocurrences / total_ocurrences
if item in last_ocurrences:
rating *= 2
if last_ocurrences and item == last_ocurrences[-1]:
rating *= 4
return rating | fa93bd9c44b612231cd7dd486909fa2654e34288 | 702,551 |
import hashlib
def makeKey(password, salt):
"""make master key"""
if not hasattr(password, 'decode'):
password = password.encode('utf-8')
if not hasattr(salt, 'decode'):
salt = salt.lower()
salt = salt.encode('utf-8')
# Here we use 100,000 iterations since that is the default that the
# bitwarden web vault uses. In the future, we can parameterize this, or at
# least change it if updating it becomes necessary.
#
# I don't know where we got a dklen of 32 from.
return hashlib.pbkdf2_hmac('sha256', password, salt, 100000, dklen=32) | 59819fc96f3ad5e4627c5acccf3ba2acf99ee49d | 702,552 |
def run_episode(environment, agent, is_training=False):
"""Run a single episode."""
timestep = environment.reset()
while not timestep.last():
action = agent.step(timestep, is_training)
new_timestep = environment.step(action)
if is_training:
agent.update(timestep, action, new_timestep)
timestep = new_timestep
episode_return = environment.episode_return
return episode_return | dcec7609b33cf2f13ca6753c2dfd614252189b51 | 702,553 |
def pandas_df_to_temporary_csv(tmp_path):
"""Provides a function to write a pandas dataframe to a temporary csv file with function scope."""
def _pandas_df_to_temporary_csv(pandas_df, sep=",", filename="temp.csv"):
temporary_csv_path = tmp_path / filename
pandas_df.to_csv(temporary_csv_path, sep=sep, header=True, index=False, na_rep="")
return temporary_csv_path
return _pandas_df_to_temporary_csv | 5ec9b3072928e3cdbe067dfcb33010b2a51a267b | 702,554 |
def df(n):
"""Gives the double factorial of *n*"""
return 1.0 if n <= 0 else 1.0 * n * df(n - 2) | 71fcc2445db94b5686d4c9b8d85e9bdc1dc2bbb4 | 702,555 |
def cli(ctx, job_id):
"""Resume a job if it is paused.
Output:
dict containing output dataset associations
.. note::
This method is only supported by Galaxy 18.09 or later.
"""
return ctx.gi.jobs.resume_job(job_id) | 956ee7cbc0b251718311fae88b2bf53482d88b23 | 702,556 |
def queue_exists(queue_id):
"""Returns true if the queue ID belongs to a valid queue."""
return True | 24bb477842cedb5443eb06217374deec49f99398 | 702,557 |
import re
def track_num_to_int(track_num_str):
""" Convert a track number tag value to an int.
This function exists because the track number may be
something like 01/12, i.e. first of 12 tracks,
so we need to strip off the / and everything after.
If the string can't be parsed as a number, -1 is returned. """
if track_num_str == '':
return -1
if '/' in track_num_str:
track_num_str = re.sub('(/(.*)$)', '', track_num_str)
try:
track_num = int(track_num_str)
except ValueError:
track_num = -1
return track_num | af6e878bad7c3e26c61ad2ad4a759cb8c1dc4224 | 702,558 |
def elt1(list, context):
"""return second member"""
return list[1] | 40d59eaac5b9eeb86b511ef2a4df4b68893a6248 | 702,559 |
def sort_list(player_data_list):
"""Sort list based on qualifer.
Args:
player_data_list: player data list
Returns:
player data list properly sorted
"""
return sorted(player_data_list, key=lambda x: x[-1]) | 7164d2851ca6c7557e8a9e10c45a25243254180d | 702,560 |
def is_alive(worker_dict):
""" Test if worker is alive and running | None --> Bool """
return worker_dict['worker'].is_alive() | c3979d4722d27c8a3ceefe38b141313f4cdef881 | 702,561 |
def sort(array: list[int]) -> list[int]:
"""Naive bubble sort implementation.
"""
for i in range(len(array)):
for j in range(len(array) - 1, i, -1):
if array[j] < array[j - 1]:
array[j], array[j - 1] = array[j - 1], array[j]
return array | f3121f84b238d82ea3cc7d87bfa993c2a9339786 | 702,562 |
def box_teamnames(page):
"""
A @ H always
"""
teams = page.find_all("span", {"class": "short-name"})
destinations = page.find_all("span", {"class": "long-name"})
names = [team.text for team in teams]
cities = [destination.text for destination in destinations]
if not names or not cities:
return None
a_team, h_team = [dest + " " + name for (dest, name) in zip(cities, names)]
return a_team, h_team | f116fdb86fa347c5dcbaef40771b3f49d2b67f7a | 702,563 |
def _call_bool_filter(context, value):
"""Pass a value through the 'bool' filter.
:param context: Jinja2 Context object.
:param value: Value to pass through bool filter.
:returns: A boolean.
"""
return context.environment.call_filter("bool", value, context=context) | 181a51d7d436cf0eaf2c0fe9d2f04ab4030f010c | 702,564 |
def get_attr(attrs, key):
""" Get the attribute that corresponds to the given key"""
path = key.split('.')
d = attrs
for p in path:
if p.isdigit():
p = int(p)
# Let it raise the appropriate exception
d = d[p]
return d | eec6878b19413c54008eb930af21cd40decff2cc | 702,566 |
def get_wbo(offmol, dihedrals):
"""
Returns the specific wbo of the dihedral bonds
Parameters
----------
offmol: openforcefield molecule
dihedrals: list of atom indices in dihedral
Returns
-------
bond.fractional_bond_order: wiberg bond order calculated using openforcefield toolkit for the specific dihedral central bond
"""
offmol.assign_fractional_bond_orders(bond_order_model="am1-wiberg-elf10")
bond = offmol.get_bond_between(dihedrals[1], dihedrals[2])
return bond.fractional_bond_order | 82aa84036d3078826fedaec40e599ffd783a422c | 702,567 |
import re
def _GetLogTag(filename):
"""Get the tag of a log."""
regex = re.compile(r'\d+\-\d+(:?\-(?P<tag>.+))?.h5')
match = regex.search(filename)
if match and match.group('tag'):
return match.group('tag')
elif filename.endswith('.h5'):
return filename[:-3]
else:
return '' | 4be40e744478d4f0b8c5ffc7ea866537802b1f25 | 702,568 |
import numpy
def l2_norm(x: numpy.ndarray, y: numpy.ndarray) -> numpy.ndarray:
"""Euclidean distance between two batches of points stacked across the first dimension."""
return numpy.linalg.norm(x - y, axis=1) | 248f004276d5459e7b6ce8906abc7bf950a9b1a3 | 702,569 |
def ilarisHitProb(at,vt):
"""
at: AT of attacking character
vt: VT of defending character
return: probability that attacking character will hit defending character
"""
# 47.5 is the chance without any bonus
# 47.5-((y-41)*y)/8 is the added chance per bonus
# (at>vt) is extra if at>vt because equal wins
if at-vt>20:
return 1
elif vt-at>20:
return 0
else:
y = lambda y: 47.5-((y-41)*y)/8
return y(at-vt+(at>vt))/100 if at>=vt else 1-y(vt-at+(vt>at))/100 | 488668075154a509ed87345a17fac31b88c86d69 | 702,571 |
import string
import random
def random_string() -> str:
"""Return a random string of characters."""
letters = string.ascii_letters
return "".join(random.choice(letters) for i in range(32)) | dc828f6f89f1e20ee6cea5ebcbbecb38b0b07aa6 | 702,572 |
def bbox_structure_to_square(bbox):
"""Function to turn from bbox coco struture to square.
[x,y,width,height] -> [min_height, min_width, max_height, max_width]
"""
x,y,width,height = bbox
sq = [y,x,y+height,x+width]
return sq | ff3147c89ad6d14ad22126bdc0cf119883d82db9 | 702,573 |
from typing import Pattern
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
"""Replace `regex` with `replacement` twice on `original`.
This is used by string normalization to perform replaces on
overlapping matches.
"""
return regex.sub(replacement, regex.sub(replacement, original)) | 6bada9c7b349ba3a5da840e131577dd354f0b9eb | 702,574 |
import pickle
def load_pickle(filename):
"""
Loads a serialized object
Parameters:
-----------
filename : string
"""
return pickle.load(open(filename,'rb')) | 6525b73e211947ea0b2f76ec5f071d46bb806f06 | 702,575 |
def get_properties(value):
"""
Converts the specified value into a dict containing the NVPs (name-value pairs)
:param value:
:return:
"""
rtnval = None
if isinstance(value, dict):
# value is already a Python dict, so just return it
rtnval = value
elif '=' in value:
# value contains '=' character(s), so split on that to
# come up with NVPs
properties = value.split('=')
if properties:
rtnval = {properties[0]: properties[1]}
if rtnval is None:
# Couldn't tell what the delimiter used in value was, so just
# return a dict with value as the value, and 'Value' as the key
rtnval = {'Value': value}
return rtnval | 03f75e85e54a08b75b8d6e25b5d1dd65f7e987db | 702,577 |
def get_pdm_terms(site_index, n, adj_sites=4, shift=0):
"""
inputs:
site_index (int): center site index
n (int): number of sites in lattice (for enforcing periodic boundary conditions)
adj_site (int): how many adjacent sites to collect pop and coherence values from
shift (int): return row indices shifted by this value (when collecting from matrix w/ compound index
returns:
ind_list (list of ints): all site indices to collect
shift_ind_list (list of ints): site indices shifted down i*n rows to collect the correct sample
coh_list (list of ints): coherences in fragment with central site
shift_coh_list (list of ints): coherences shifted down i*n rows to select particular sample
"""
# if a term in sites is out of bounds, subtract bound length
ind_list, shift_ind_list = [site_index], [site_index+shift]
coh_list, shift_coh_list = [], []
#print("Site: ", site_index)
for ind in range(site_index - adj_sites, site_index + adj_sites + 1):
if ind != site_index: # we've already add the target site population to ind_list and shift_ind_list
if ind < 0: ind += n
elif ind >= n: ind -= n
else: pass
#print(ind)
ind_list.append(ind)
shift_ind_list.append(ind+shift) # shift down to specific matrix in set we are selecting
coh_list.append(ind)
shift_coh_list.append(ind+shift)
return ind_list, shift_ind_list, coh_list, shift_coh_list | 34d7914ddd751cb75b04e855d68edb156c1defda | 702,579 |
import ast
def ast_name_node(**props):
"""
creates a name ast node with the property names and values
as specified in `props`
"""
node = ast.Name()
for name, value in props.items():
setattr(node, name, value)
return node | 880d9527d63c7b2c97a4bc616bf245dab6583f81 | 702,581 |
def buildX(traj_file, t, X):
"""
Builds the node attribute matrix for a given time step.
Inputs:
traj_file : string indicating the location of the ground truth trajectory data
t : scalar indicating current time step
X : empty node attribute matrix in shape [n_nodes, n_features]
Outputs:
X : node attribute matrix with values added from traj_file in shape [n_nodes, n_features]
"""
# read through the file to find the current time step from "t = 100" etc.
with open(traj_file) as f:
lines = f.readlines()
i = -1
# find the line that contains the time step information
for line in lines:
i += 1
if line[0:4] == "t = " and int(line[4:]) == t:
# print("found time {0} in trajectory data".format(t))
count = 0
for line in lines[i:]:
# print("count", count)
# extract these lines and make a graph
if count < 3:
count += 1
continue
if ((count >= 3) and (count < (X.shape[0] + 3))):
# X(i,1:-1) = all data in the current row of .oxdna file
j = 1
my_str = ""
for k in range(len(line)):
if line[k] != " " and line[k] != "\n":
my_str += line[k]
if line[k] == " " or line[k] == "\n":
X[(count-3),j] = float(my_str)
j += 1
my_str = ""
count += 1
else:
break
return X | 3b0d3b18d9897364e1140103d7f049d5502d302a | 702,582 |
def move_left(board, row):
"""Move the given row to one position left"""
board[row] = board[row][1:] + board[row][:1]
return board | a0dc74f65abd5560db7c2fe602186d15b8fda3d2 | 702,583 |
import re
import torch
def embed_seq(sequence, tokenizer, prottrans_model, device="cpu"):
"""
Embed a single sequence from the tokenizer and prottrans model.
"""
sequences = [sequence]
sep_sequences = []
for seq in sequences:
sep_sequences.append(" ".join([x for x in seq]))
#print(sep_sequences)
clean = [re.sub(r"[UZOB]", "X", sequence) for sequence in sep_sequences]
#print(clean)
ids = tokenizer.batch_encode_plus(clean, add_special_tokens=True, pad_to_max_length=True)
input_ids = torch.tensor(ids['input_ids']).to(device)
attention_mask = torch.tensor(ids['attention_mask']).to(device)
with torch.no_grad():
embedding = prottrans_model(input_ids=input_ids,attention_mask=attention_mask)[0]
print(embedding.shape)
return embedding | 2c8182e67aa2fe8fa1bf0dff332d8bd8a25a5fb2 | 702,584 |
def get_quadrant(x, y):
"""
Returns the quadrant as an interger in a mathematical positive system:
1 => first quadrant
2 => second quadrant
3 => third quadrant
4 => fourth quadrant
None => either one or both coordinates are zero
Parameters
----------
x : float
x coordinate.
y : float
y coordinate.
Returns
-------
int or None
Quadrant number.
"""
if x > 0 and y > 0:
return 1
elif x < 0 and y > 0:
return 2
elif x < 0 and y < 0:
return 3
elif x > 0 and y < 0:
return 4
else:
return None | 8650edb7a3e854eed0559f633dcf5cd0c7310db4 | 702,585 |
def days_in_month(year, month):
"""
Inputs:
year - an integer between datetime.MINYEAR and datetime.MAXYEAR
representing the year
month - an integer between 1 and 12 representing the month
Returns:
The number of days in the input month.
"""
leapyear=0
if(year%4 ==0):
if(year%100!=0):
leapyear=1
elif(year%400==0):
leapyear=1
if(month == 1 or month == 3 or month == 5 or month == 7 or
month == 8 or month == 10 or month == 12):
return 31
elif(month == 4 or month == 6 or month == 9 or month == 11):
return 30
elif(leapyear == 0 and month == 2):
return 28
elif(leapyear == 1 and month == 2):
return 29
else:
print("Please Enter a valid month and Year") | b2988e9a6b1413ef0957413c10e5e4f5ef225d8b | 702,586 |
import requests
import json
def _get_token(user, password, host, port):
""" Gets auth token. """
token_url = "{}:{}/api/token/".format(host, port)
response = requests.post(token_url, json={
"username": user,
"password": password
})
if response.ok:
return json.loads(response.text).get("access")
else:
return "" | 701bb7c1a740cfeca5c321bb6662b8f4e701cd81 | 702,587 |
import random
def random_point():
"""Returns a random point on a 100x100 grid."""
return (random.randrange(100), random.randrange(100)) | 6a2a2c3a4bc3f8347d538984354c2d9688989e14 | 702,588 |
def repeat(s, n):
""" (str, int) -> str
Return s repeated n times; if n is negative, return empty string.
>>> repeat('yes', 4)
'yesyesyesyes'
>>>repeat('no', 0)
''
"""
return (s * n) | b44b56b9e69783c7f57e99b61cc86e975c575a59 | 702,589 |
def can_translate(user):
"""Checks if a user translate a product"""
return user.permissions['perm_translate'] | c6797346d8bd61637927af808bb9355a9220a91e | 702,590 |
def read_xml_file_line_basis(xml_file, element):
"""
Read the xml file and capture only the elements we need.
"""
start_tag = f'<{element}>' # 3
end_tag = f'</{element}>' # 2
start_tag_identified = False # 3
captured_records = list() # 4
captured_line = ''
with open(xml_file) as f: # 5
for line in f: # 6
if start_tag in line: # 7
start_tag_identified = True
if start_tag_identified: # 8
captured_line += line
if end_tag in line: # 9
captured_records.append(captured_line)
start_tag_identified = False # 10
captured_line = '' # 10
return '{:,.0f}'.format(len(captured_records)) | be45d97bd3ca051f06c775b21184c158472cd824 | 702,591 |
def xyz_string(labels, coords):
""" .xyz format string for this cartesian geometry
:param labels: optional labels for the beginnings of atom lines, by index
:type labels: dict
"""
assert len(labels) == len(coords)
dxyz = '\n'.join(
'{:s} {:s} {:s} {:s}'.format(asymb, *map(repr, xyz))
for asymb, xyz in zip(labels, coords))
return dxyz | be2d34fe41a3468c0764bbfd9d6960dc377375cc | 702,592 |
import pandas
def Protein_translation_Amb(t, y, data, mRNAData):
"""
Defines ODE function Conversion of Amb_mRNA to protein
p1,p2,p3....: Protein concentrations for all ODEs
It will have list of all parameter values for all my ODEs, so 36 values : L, U, D for each mRNA to protein conversion equation
"""
data = pandas.read_csv(data, sep="\t")
L = data["L"].tolist() # protein synthesis rate per day
U = data["U"].tolist() # protein degradation rate per day
D = data["D"].tolist() # factor affecting feedback from protein
# concentration to rate of protein synthesis from mRNA
mRNAData = pandas.read_csv(mRNAData, sep="\t")
mRNA = mRNAData["mRNA_Amb"].tolist()
# Output from ODE function must be a COLUMN vector, with n rows
dydt = []
for x in range(0,len(L)):
temp = ((L[x]*mRNA[x])/(1+y[x]/D[x]))-(U[x]*y[x])
dydt.append(temp)
return dydt | 1730cf15d57566dc9ec461f3631fe0feeddd4b49 | 702,593 |
import os
def _get_abs_path(path):
"""Return the absolute path for a given path.
:param path: ``str`` $PATH to be created
:returns: ``str``
"""
return os.path.abspath(
os.path.expanduser(
path
)
) | 3420a9624470c9a2129865356277864a6d930046 | 702,594 |
def square(num):
"""
Return the square values of the input number.
The input number must be integer.
"""
return num ** 2 | e4f88e5f00de7c469d372d9ce1a7e539941b3857 | 702,595 |
from typing import OrderedDict
def apply_address_fixups(address: OrderedDict[str, str]) -> OrderedDict[str, str]:
"""Sometimes the usaddress parser makes mistakes. It's an imperfect world.
This function applies transformations to the parsed address to correct specific
known errors.
"""
# Fixup: At least one address has "WI, USA" in the "StateName" component.
# Strip non-state components
address["StateName"] = address["StateName"].partition(",")[0]
# Fixup: (OrderedDict([('AddressNumber', '1019'),
# ('StreetNamePreDirectional', 'S.'),
# ('StreetName', 'Green Bay Road'),
# ('StreetNamePostType', 'Mount'),
# ('PlaceName', 'Pleasant'),
# ('StateName', 'WI'),
# ('ZipCode', '53406')]),
# 'Street Address'),
#
# The correct name of the town is "Mount Pleasant"
if (
address.get("StreetNamePostType") == "Mount"
and address.get("PlaceName") == "Pleasant"
):
del address["StreetNamePostType"]
del address["PlaceName"]
address["PlaceName"] = "Mount Pleasant"
# Fixup:
# (OrderedDict([('AddressNumber', '3111'),
# ('StreetNamePreDirectional', 'S.'),
# ('PlaceName', 'Chicago South Milwaukee'),
# ('StateName', 'WI'),
# ('ZipCode', '53172')]),
# 'Street Address'),
#
# 'Chicago' is a 'StreetName', located in the town of 'South Milwaukee'
if address.get("PlaceName") == "Chicago South Milwaukee":
del address["PlaceName"]
address["StreetName"] = "Chicago"
address["PlaceName"] = "South Milwaukee"
# Fixup:
# (OrderedDict([('AddressNumber', '2490'),
# ('StreetName', 'Bushwood'),
# ('PlaceName', 'Dr.Elgin'),
# ('StateName', 'IL')]),
# 'Street Address'),
#
# 'Dr.Elgin' is a typographical error. Create a 'StreetNamePostType' of 'Dr.' and a new 'PlaceName' of 'Elgin'
if address.get("PlaceName") == "Dr.Elgin":
del address["PlaceName"]
address["StreetNamePostType"] = "Dr."
address["PlaceName"] = "Elgin"
return address | 735f7e035b352b21b9b7998b8aa6f5ad7ac826d7 | 702,596 |
import torch
def mask_finished_scores(score, flag):
"""
If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score
and the rest -inf score.
Args:
score: A real value array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A real value array with shape [batch_size * beam_size, beam_size].
"""
beam_width = score.size(-1)
zero_mask = torch.zeros_like(flag, dtype=torch.bool)
if beam_width > 1:
unfinished = torch.cat(
(zero_mask, flag.repeat([1, beam_width - 1])), dim=1)
finished = torch.cat(
(flag.bool(), zero_mask.repeat([1, beam_width - 1])), dim=1)
else:
unfinished = zero_mask
finished = flag.bool()
score.masked_fill_(unfinished, -float('inf'))
score.masked_fill_(finished, 0)
return score | 87d5d8fb45a44c54cd690280ce0baf0c4fe8dab5 | 702,597 |
import itertools
def args_combinations(*args, **kwargs):
"""
Given a bunch of arguments that are all a set of types, generate all
possible possible combinations of argument type
args is list of type or set of types
kwargs is a dict whose values are types or set of types
"""
def asset(v):
if isinstance(v, set):
return v
else:
return {v}
keys = list(kwargs.keys())
for curr_args in itertools.product(*[asset(a) for a in args]):
for curr_kwargs in itertools.product(*[asset(kwargs[k]) for k in keys]):
yield curr_args, {k: v for (k, v) in zip(keys, curr_kwargs)} | 8e90ce285322bd17a97e4bdb75e230f7015f4b2d | 702,598 |
import copy
def coordinate_tensor(dim):
"""
:param dim: list of lengths for each dimension
:return: a list of every possible coordinate within the dimension system
"""
# recurrent function
def recurrent_dim_filler(dim_current, dim_higher, coordinate_higher, coordinates):
# processing the lowest dimensions
if len(dim_current) == 1:
# loop over the space in this dimension
for i in range(dim_current[0]):
# concatenate the higher dimensions' coordinates with the lowest dimension iterator
new_element = coordinate_higher + [i]
# add the element to the master coordinate list
coordinates.append(new_element)
# still in higher dimensions
else:
# pop an element off of the current dimension list onto the higher dimension list
dim_higher.extend([dim_current.pop(0)])
# add a placeholder value to the higher dimension coordinate list
coordinate_higher.extend([-1])
# loop over the most recently added higher dimension
for i in range(dim_higher[-1]):
# update placeholder value
coordinate_higher[-1] = i
# recursively call this function
coordinates = recurrent_dim_filler(dim_current, dim_higher, coordinate_higher, coordinates)
# reverse the pop
dim_current.extend([dim_higher.pop(-1)])
# eliminate place holder value
coordinate_higher.pop(-1)
return coordinates
# initialize lists
dim_current = copy.deepcopy(list(dim))#list(dim).copy()
dim_higher = []
coordinate_higher = []
coordinates = []
# run coordinate generator
coordinates = recurrent_dim_filler(dim_current, dim_higher, coordinate_higher, coordinates)
return coordinates | 8a5e2f11b5157cb2500748ea149305a21b562b77 | 702,599 |
def RGBStringToList(rgb_string):
"""Convert string "rgb(red,green,blue)" into a list of ints.
The purple air JSON returns a background color based on the air
quality as a string. We want the actual values of the components.
Args:
rgb_string: A string of the form "rgb(0-255, 0-255, 0-255)".
Returns:
list of the 3 strings representing red, green, and blue.
"""
return rgb_string[4:-1].split(',') | f94650ed977b5a8d8bb85a37487faf7b665f2e76 | 702,600 |
import six
def _metric_value(value_str, metric_type):
"""
Return a Python-typed metric value from a metric value string.
"""
if metric_type in (int, float):
try:
return metric_type(value_str)
except ValueError:
raise ValueError("Invalid {} metric value: {!r}".
format(metric_type.__class__.__name__, value_str))
elif metric_type is six.text_type:
# In Python 3, decode('unicode_escape) requires bytes, so we need
# to encode to bytes. This also works in Python 2.
return value_str.strip('"').encode('utf-8').decode('unicode_escape')
else:
assert metric_type is bool
lower_str = value_str.lower()
if lower_str == 'true':
return True
if lower_str == 'false':
return False
raise ValueError("Invalid boolean metric value: {!r}".format(value_str)) | 39a3b0e5bfe2180e1897dd87872f8e08925e8847 | 702,601 |
import threading
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread' | 82da352928b2af3794a2ee608b2763b98b8a731e | 702,602 |
def fixUnits(object, **kwargs):
"""Convert output pint units into a proper latex string."""
string = str(object)
type = kwargs.get('type', None)
unitColor = 'darkBlue'
replacements = kwargs.get('replacements', None)
if replacements:
for old, new in replacements.items():
string = string.replace(old, str(new))
string = string.replace('kip_per_square_inch', f'{{_clr_{{_bf_{{_sp_ksi}}}}}}')
string = string.replace('kft', f'{{_clr_{{_bf_{{_sp_kft}}}}}}')
string = string.replace('force_pound', f'{{_clr_{{_bf_{{_sp_lb}}}}}}')
string = string.replace('foot', f'{{_clr_{{_bf_{{_sp_ft}}}}}}')
string = string.replace('plf', f'{{_clr_{{_bf_{{_sp_plf}}}}}}')
string = string.replace('yard', f'{{_clr_{{_bf_{{_sp_yd}}}}}}')
string = string.replace('kilonewton / meter', f'{{_clr_{{_bf_{{_sp_kN/m}}}}}}')
string = string.replace('kilogram * standard_gravity / meter', f'{{_clr_{{_bf_{{_sp_kg/m}}}}}}')
string = string.replace('kilogram * standard_gravity', f'{{_clr_{{_bf_{{_sp_kg}}}}}}')
string = string.replace('meter', f'{{_clr_{{_bf_{{_sp_m}}}}}}')
string = string.replace('dimensionless', '')
string = string.replace('klf', f'{{_clr_{{_bf_{{_sp_klf}}}}}}')
string = string.replace('kip', f'{{_clr_{{_bf_{{_sp_kip}}}}}}')
string = string.replace('inch ** 2', f'{{_clr_{{_bf_{{_sp_inch^{{2}}}}}}}}')
string = string.replace('inch ** 3', f'{{_clr_{{_bf_{{_sp_inch^{{3}}}}}}}}')
string = string.replace('inch ** 4', f'{{_clr_{{_bf_{{_sp_inch^{{4}}}}}}}}')
string = string.replace('inch ** 6', f'{{_clr_{{_bf_{{_sp_inch^{{6}}}}}}}}')
string = string.replace('inch', f'{{_clr_{{_bf_{{_sp_in}}}}}}')
string = string.replace('pcf', f'{{_clr_{{_bf_{{_sp_pcf}}}}}}')
string = string.replace('_clr_', f'\\color{{{unitColor}}}')
string = string.replace('_arrow_', '\\textrightarrow')
if type == 'text':
string = string.replace('_bf_', '\\textbf')
string = string.replace('_sp_', '')
string = string.replace('^', '\\textsuperscript')
string = string.replace('_', '\\textsubscript')
string = string.replace('Sec. ', f'\\S')
else:
string = string.replace('_bf_', '\\mathbf')
string = string.replace('_sp_', ' \; ')
string = string.replace('_clr_', f'\\color{{{unitColor}}}')
return string | a1922f296a7d55b989fe5455ed98eb166edeeee7 | 702,603 |
def getMDistance(plug):
"""
Gets the MDistance value from the supplied plug.
:type plug: om.MPlug
:rtype: om.MDistance
"""
return plug.asMDistance() | 43cd8dfd2c698ad1cc88771c2c69f3b5e502f202 | 702,604 |
def get_time_size(rates):
"""
Get number of time intervals
Parameters
----------
Returns
-------
out : int
Number of time intervals
See Also
--------
DataStruct
"""
wgname_keys = list(rates.keys())
mnemo_keys = list(rates[wgname_keys[0]].keys())
return len(rates[wgname_keys[0]][mnemo_keys[0]]) | c6c496992ef0fe557df302c3c88d9290895cd4a1 | 702,606 |
import argparse
def arg_parser():
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, dest='input_file',
help="Input file with tweets summary information")
parser.add_argument("-e", "--input-entities", type=str, dest='input_ent',
help="Input file with entitites annotated")
parser.add_argument("-o", "--output-weka", type=str, dest='output_weka',
help="Output Weka file path")
parser.add_argument("-n", "--output-nn", type=str, dest='output_nn',
help="Output Neural Network file path")
parser.add_argument("-l", "--logfile", type=str,
help="Log file path")
parser.add_argument("-L", "--limit", type=int, dest='limit',
help="Limit number of tweet to process")
args = parser.parse_args()
return args | b0079ee9990229890f510e2679344256a4d32dc2 | 702,607 |
def splitFragP(uriref, punct=0):
"""split a URI reference before the fragment
Punctuation is kept.
e.g.
>>> splitFragP("abc#def")
('abc', '#def')
>>> splitFragP("abcdef")
('abcdef', '')
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i:]
else:
return uriref, "" | cc179fd8f064f3e87f18a9968f6f98ff0d584eb6 | 702,608 |
def sanitize_name(name):
""" Clean-up the given username."""
name = name.strip()
# clean up group
name = name.replace('- IE', ' -IE')
name = name.replace('- MA', ' -MA')
for l in [1,2,3,4,5,6,7,8,9]:
for g in "AB":
name = name.replace(f'IE{l}-{g}', f'IE-{l}{g}')
name = name.replace(f'IE{l}{g}', f'IE-{l}{g}')
for l in [1,2,3,4]:
for g in [2*l-1, 2*l]:
name = name.replace(f'MA-{l}{g}', f'MA{l}-{g}')
name = name.replace(f'MA{l}{g}', f'MA{l}-{g}')
# clean up name
try:
parts = name.split(' ')
firstname = parts[0].title()
group = parts[-1]
familynames = parts[1:-1]
familyname = " ".join(f.upper() for f in familynames)
name = f"{firstname} {familyname} {group}"
name = name.replace('-IE', '- IE')
name = name.replace('-MA', '- MA')
except:
pass
while " " in name:
name = name.replace(' ', ' ')
return name | c0618737b0717e6e90c09ab8d2f4f969abb7d891 | 702,609 |
import os
def extensionName(path):
"""
Function responsible for returning the extension of a file.
"""
extension = os.path.splitext(path)[1][1:]
return extension | e76f47164acc5d1c189b111b018690b7d2f039b4 | 702,610 |
import yaml
import re
def create_kubeconfig_for_ssh_tunnel(kubeconfig_file, kubeconfig_target_file):
"""
Creates a kubeconfig in which the Server URL is modified to use a locally set up SSH tunnel. (using 127.0.0.1 as an address)
Returns a tuple consisting of:
- the original IP/Servername of the K8s API
- the original Port of the K8s API
"""
with open (kubeconfig_file, "r") as f:
kubeconfig = yaml.load(f.read(), Loader=yaml.FullLoader)
original_server_address = kubeconfig["clusters"][0]["cluster"]["server"]
address_pattern = re.compile('https://([^:]*):([0-9]+)')
match = address_pattern.match(original_server_address)
if not match:
print('Error: No API address found in kubeconfig')
exit(1)
original_api_hostname = match.group(1)
original_api_port = match.group(2)
kubeconfig["clusters"][0]["cluster"]["server"] = f"https://127.0.0.1:{original_api_port}"
with open (kubeconfig_target_file, "w") as f:
f.write(yaml.dump(kubeconfig, default_flow_style=False))
f.close()
return (original_api_hostname, original_api_port) | 39c85681486abda0008a040ad13a37032fc182b5 | 702,611 |
def sse_pack(d):
"""
Format a map with Server-Sent-Event-meaningful keys into a string for transport.
Happily borrowed from: http://taoofmac.com/space/blog/2014/11/16/1940
For reading on web usage: http://www.html5rocks.com/en/tutorials/eventsource/basics
For reading on the format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
"""
buffer_ = ''
for k in ['retry', 'id', 'event', 'data']:
if k in d.keys():
buffer_ += '%s: %s\n' % (k, d[k])
return buffer_ + '\n' | 5e3f791fc5b2451ff5538c3463674e1e89b80e12 | 702,612 |
import argparse
def parse_arguments():
"""
Create command-line interface
"""
desc = "Compute dG, stdDG for different lengths and number of trajectories"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-l", "--lagtime", type=int, default=100, help="Lagtimes to use, default 100")
parser.add_argument("-c", "--cluster", type=int, default=100, help="Number of clusters to use, default 100")
parser.add_argument("--lengths", type=int, nargs=2, required=True, help="Lengths to analyse, specifying lower and upper bound")
parser.add_argument("--trajs", type=int, nargs=2, required=True, help="Number of trajectories to analyse, specifying lower and upper bound")
parser.add_argument("--length_step", type=int, default=20, help="Resolution in the lengths to explore, default is 20")
parser.add_argument("--trajs_step", type=int, default=20, help="Resolution in the trajs to explore, default is 20")
parser.add_argument("-n", type=int, default=10, help="Number of iterations for the cross validation")
parser.add_argument("--skip_steps", type=int, default=0, help="Number of initial steps to skip")
parser.add_argument("--out_path", type=str, default="", help="Path to store the output")
parser.add_argument("--cluster_each_iteration", action="store_true", help="Whether to cluster at each iteration, slower but more accurate results")
args = parser.parse_args()
return args.lagtime, args.out_path, args.cluster, args.length_step, args.trajs_step, args.n, args.skip_steps, args.lengths, args.trajs, args.cluster_each_iteration | e464504557497ebcfabdefe99fec5069aa9c03a1 | 702,613 |
def _touint64(num):
"""
This is required to convert signed json integers to unsigned.
"""
return num & 0xffffffffffffffff | 9edf75a0bd62ae2c6fb126a9ce1e1b283dfbd52c | 702,614 |
import torch
def shuffle(images, targets, global_targets):
"""
A trick for CAN training
"""
sample_num = images.shape[1]
for i in range(4):
indices = torch.randperm(sample_num).to(images.device)
images = images.index_select(1, indices)
targets = targets.index_select(1, indices)
global_targets = global_targets.index_select(1, indices)
return images, targets, global_targets | 0079304c05293fb68e3af45be7a2e3cbd9564184 | 702,615 |
def get_lines_from_file(loc):
"""Reads the file and returns a list with every line.
Parameters:
loc (str): location of the file.
Returns:
list: list containing each of the lines of the file.
"""
f = open(loc)
result= [line.replace("\n", "") for line in f]
f.close()
return result | c05101b94e459346adae553e31d25d46a8475514 | 702,616 |
def GetBuildShortBaseName(target_platform):
"""Returns the build base directory.
Args:
target_platform: Target platform.
Returns:
Build base directory.
Raises:
RuntimeError: if target_platform is not supported.
"""
platform_dict = {
'Windows': 'out_win',
'Mac': 'out_mac',
'Linux': 'out_linux',
'Android': 'out_android',
'NaCl': 'out_nacl'
}
if target_platform not in platform_dict:
raise RuntimeError('Unkown target_platform: ' + (target_platform or 'None'))
return platform_dict[target_platform] | 0bbbad4de3180c2ea51f5149cc3c2417a22b63e9 | 702,618 |
def make_wildcard(title, *exts):
"""Create wildcard string from a single wildcard tuple."""
return "{0} ({1})|{1}".format(title, ";".join(exts)) | 0634c450f43cc779431f61c2a060cea7e02f6332 | 702,619 |
from pathlib import Path
def dir_files(path, pattern="*"):
"""
Returns all files in a directory
"""
if not isinstance(path, Path):
raise TypeError("path must be an instance of pathlib.Path")
return [f for f in path.glob(pattern) if f.is_file()] | 5dbeeec6fe72b70381afb52dcbbea55613a37d49 | 702,620 |
def myDownsample(y, N) :
"""
yds = myDownsample(y,N)
yds is y sampled at every Nth index, starting with yds[0]=y[0] with y[range(0,len(y),N)]. Implementing Matlab's downsample.
Ted Golfinopoulos, 7 June 2012
"""
return y[range(0,len(y),N)] | 0d39f37f4e3a5528f087921e6a4993ea6bf2981c | 702,621 |
def _norm_args(norm):
"""
Returns the proper normalization parameter values.
Possible `norm` values are "backward" (alias of None), "ortho",
"forward".
This function is used by both the builders and the interfaces.
"""
if norm == "ortho":
ortho = True
normalise_idft = False
elif norm is None or norm == "backward":
ortho = False
normalise_idft = True
elif norm == "forward":
ortho = False
normalise_idft = False
else:
raise ValueError(f'Invalid norm value {norm}; should be "ortho", '
'"backward" or "forward".')
return dict(normalise_idft=normalise_idft, ortho=ortho) | e781c894c9d333fdbdf326120b1417b44dfc5181 | 702,622 |
import torch
def all_pair_iou(boxes_a, boxes_b):
"""
Compute the IoU of all pairs.
:param boxes_a: (n, 4) minmax form boxes
:param boxes_b: (m, 4) minmax form boxes
:return: (n, m) iou of all pairs of two set
"""
N = boxes_a.size(0)
M = boxes_b.size(0)
max_xy = torch.min(boxes_a[:, 2:].unsqueeze(1).expand(N, M, 2), boxes_b[:, 2:].unsqueeze(0).expand(N, M, 2))
min_xy = torch.max(boxes_a[:, :2].unsqueeze(1).expand(N, M, 2), boxes_b[:, :2].unsqueeze(0).expand(N, M, 2))
inter_wh = torch.clamp((max_xy - min_xy + 1), min=0)
I = inter_wh[:, :, 0] * inter_wh[:, :, 1]
A = ((boxes_a[:, 2] - boxes_a[:, 0] + 1) * (boxes_a[:, 3] - boxes_a[:, 1] + 1)).unsqueeze(1).expand_as(I)
B = ((boxes_b[:, 2] - boxes_b[:, 0] + 1) * (boxes_b[:, 3] - boxes_b[:, 1] + 1)).unsqueeze(0).expand_as(I)
U = A + B - I
return I / U | 1ca948e4a16016efa694d97c4829fcdfbc29e20d | 702,623 |
def get_empty_theme():
"""Create object that contains empty theme."""
return {
'theme': {},
'background': bytearray()
} | fc129d109ef2677b41d9a0f608fd580b63c45c8e | 702,624 |
import string
def letter_extractor(raws):
"""letter_
Frequencies of 26 English letters in a given text, case insensitive.
Known differences with Writeprints Static feature "letter frequency": None.
Args:
raws: List of documents.
Returns:
Frequencies of English letters in the document.
"""
letter_ = [[raw.count(letter) for letter in string.ascii_lowercase] for raw in raws]
label = ["letter_" + letter for letter in string.ascii_lowercase]
return letter_, label | 54dde1c58b7f5c59af313f11294483196ba917dc | 702,626 |
def crop_frames(frames, speaker):
"""
frames: (b h w c)
"""
if speaker == "chem" or speaker == "hs":
return frames
elif speaker == "chess":
return frames[:, 270:460, 770:1130]
elif speaker == "dl" or speaker == "eh":
return frames[:, int(frames.shape[1] * 3 / 4) :, int(frames.shape[2] * 3 / 4) :]
else:
raise ValueError("Unknown speaker!") | 1d92d7f6ea62f26a8bfead47594681602f2051c4 | 702,627 |
def builddict(fin):
"""
Build a dictionary mapping from username to country for all classes.
Takes as input an open csv.reader on the edX supplied file that lists
classname, country, and username and returns a dictionary that maps from
username to country
"""
retdict = {}
for course, country, username in fin:
if username not in retdict:
retdict[username] = country
return retdict | ddf9272e0da6616abd0495b7b159807a36a83dcc | 702,628 |
def get_reverse_depends(name, capability_instances):
"""Gets the reverse dependencies of a given Capability
:param name: Name of the Capability which the instances might depend on
:type name: str
:param capability_instances: list of instances to search for having a
dependency on the given Capability
:type capability_instances: :py:obj:`list` of :py:class:`CapabilityInstance`
:returns: A list of :py:class:`CapabilityInstance`'s which depend on the
given Capability name
:rtype: :py:obj:`list` of :py:class:`CapabilityInstance`
"""
rdepends = []
for instance in capability_instances:
if name in instance.depends_on:
rdepends.append(instance)
return rdepends | fda11bb01d6352b18e87365f1060f48a5c07f266 | 702,630 |
import collections
def node_degree_counter(g, node, cache=True):
"""Returns a Counter object with edge_kind tuples as keys and the number
of edges with the specified edge_kind incident to the node as counts.
"""
node_data = g.node[node]
if cache and 'degree_counter' in node_data:
return node_data['degree_counter']
degree_counter = collections.Counter()
for node, neighbor, key in g.edges(node, keys=True):
node_kind = node_data['kind']
neighbor_kind = g.node[neighbor]['kind']
edge_kind = node_kind, neighbor_kind, key
degree_counter[edge_kind] += 1
if cache:
node_data['degree_counter'] = degree_counter
return degree_counter | 08c08f240e3170f4159e72bc7e69d99b69c37408 | 702,631 |
async def get_account_id(db, name):
"""Get account id from account name."""
return await db.query_one("SELECT find_account_id( (:name)::VARCHAR, True )", name=name) | 3dd6b46abd8726eb34eb4f8e1850dc56c3632e5c | 702,632 |
def params_to_payload(params, config):
"""Converts a set of parameters into a payload for a GET or POST
request.
"""
base_payload = {config['param-api-key']: config['api-key']}
return dict(base_payload, **params) | aec633ab62cf18c0acf685115d4e291cd6198cb3 | 702,633 |
from typing import Any
def is_a_string(v: Any) -> bool:
"""Returns if v is an instance of str.
"""
return isinstance(v, str) | f729f5784434ef255ea9b2f0ca7cdfbf726e7539 | 702,634 |
def historical():
"""" Retrieve stored data from datastore. """
return {
'page': 'historical',
} | 91933b3372e2972c37aaae2d8c83696e9398c19c | 702,635 |
import os
import re
def name_from_cmakelists(cmakelists):
""" Get a project name from a CMakeLists.txt file
"""
if not os.path.exists(cmakelists):
return None
res = None
# capture first word after project(), excluding quotes if any
regexp = re.compile(r'^\s*project\s*\("?(\w+).*"?\)', re.IGNORECASE)
lines = list()
with open(cmakelists, "r") as fp:
lines = fp.readlines()
for line in lines:
match = re.match(regexp, line)
if match:
res = match.groups()[0]
res = res.strip()
return res
return res | e18b214cbc4453f96989931ec9d4501cdbd1b718 | 702,636 |
import time
import json
import requests
def catch_distribution():
"""抓取行政区域确诊分布数据"""
data = dict()
url = "https://view.inews.qq.com/g2/getOnsInfo?name=wuwei_ww_area_counts&callback=&_=%d" %int(time.time()*1000)
for item in json.loads(requests.get(url=url).json()["data"]):
if item["area"] not in data:
data.update({item["area"]:0})
data[item["area"]] += int(item["confirm"])
return data | 4ce28940e9a3852a7622ceec489c186429fc9745 | 702,637 |
import sys
def extract_entity_text(text: str, offset: int, length: int) -> str:
"""
Get entity value.
:param text: Full message text
:param offset: Entity offset
:param length: Entity length
:return: Returns required part of the text
"""
if sys.maxunicode == 0xFFFF:
return text[offset : offset + length]
entity_text = text.encode("utf-16-le")
entity_text = entity_text[offset * 2 : (offset + length) * 2]
return entity_text.decode("utf-16-le") | 773426ffbf3cc186447594c9ddd50cd461c82316 | 702,638 |
def reconstructTypeFunctionType(typeFunction, args, kwargs):
"""Reconstruct a type from the values returned by 'isTypeFunctionType'"""
#note that our 'key' objects are dict-in-tuple-form, because dicts are
#not hashable. So to keyword-call with them, we have to convert back to a dict...
return typeFunction(*args, **dict(kwargs)) | e57658bb7e4b368a8caf86a72db05157b689500e | 702,639 |
def str_to_dict(
text: str,
/,
*keys: str,
sep: str = ",",
) -> dict[str, str]:
"""
Parameters
----------
text: str
The text which should be split into multiple values.
keys: str
The keys for the values.
sep: str
The separator for the values.
Returns
-------
dict[str, str]
"""
values = text.split(sep)
return {key: value for key, value in zip(keys, values)} | 0b34ea1b47d217929fd9df760231f4786150e661 | 702,640 |
def keyset():
"""
Creates a set of numeric keys centered around 0
Provides a comparison function based on numeric closeness of the
keys
"""
class KeySet:
extent = 10
def __init__(self):
self.key = "0"
self.all = [self.key]
for i in range(KeySet.extent):
self.all.append(str(i + 1))
@staticmethod
def compare(k1, k2):
return abs(int(k1) - int(k2)) / KeySet.extent
return KeySet() | ebdc08f13d9b82136042dae9b02206e4c6bb30d9 | 702,641 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.