content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def transpose(table):
"""
Returns a copy of table with rows and columns swapped
Example:
1 2 1 3 5
3 4 => 2 4 6
5 6
Parameter table: the table to transpose
Precondition: table is a rectangular 2d List of numbers
"""
result = [] # Result (new table) accumulator
# Loop over columns
# Add each column as a row to result
return result | fe84714d3e09deb22058fd75ac3333c2206f77c3 | 704,561 |
def max_rl(din):
"""
A MAX function should "go high" only when all of
its inputs have arrived. Thus, AND gates are
used for its implementation.
Input: a list of 1-bit WireVectors
Output: a 1-bit WireVector
"""
if len(din) == 1:
dout = din[0]
else:
dout = din[0] & max_rl(din[1:])
return dout | b65710967a8a785e1ca0679252ac69c140b4c560 | 704,562 |
import requests
def process_request(url, auth):
"""Perform an http request.
:param url: full url to query
:type url: ``str``
:param auth: username, password credentials
:type auth: ``tuple`` || ``None``
:returns: ``dict``
"""
content = requests.get(url, auth=auth)
if content.status_code >= 300:
raise SystemExit(content.content)
return content.json() | 051c60e03458e3c38d93dfd65d15f355ec284c12 | 704,563 |
import platform
def get_os():
"""
Get operating system.
:return: operating system
:rtype: str or unicode
"""
return platform.platform() | 104c8547c751388a2ea4be675be1fa44758d61d0 | 704,564 |
import random
def genpass(pwds_amount=1, paswd_length=8):
""" Returns a list of 'pwds_amount' random passwords, having length of 'paswd_length' """
return [ ''.join([chr(random.randint(32, 126)) for _ in range(paswd_length)]) for _ in range(pwds_amount)] | d5d4e38cc334f44e837c72f265a391bf72f5bd5f | 704,565 |
def vect3_scale(v, f):
"""
Scales a vector by factor f.
v (3-tuple): 3d vector
f (float): scale factor
return (3-tuple): 3d vector
"""
return (v[0]*f, v[1]*f, v[2]*f) | 94902cad0a7743f8e3ed1582bf6402229b8a028d | 704,566 |
def create_trigger_body(trigger):
"""Given a trigger, remove all keys that are specific to that trigger
and return keys + values that can be used to clone another trigger
https://googleapis.github.io/google-api-python-client/docs/dyn/tagmanager_v2.accounts.containers.workspaces.triggers.html#create
:param trigger: [description]
:type trigger: [type]
"""
body = {}
non_mutable_keys = [
"accountId",
"containerId",
"fingerprint",
"parentFolderId",
"path",
"tagManagerUrl",
"triggerId",
"workspaceId",
]
for k, v in trigger.items():
if k not in non_mutable_keys:
body[k] = v
return body | 3b324407e77c1f17a5f76f82181db4976966e21b | 704,567 |
import math
def RadialToTortoise(r, M):
"""
Convert the radial coordinate to the tortoise coordinate
r = radial coordinate
M = ADMMass used to convert coordinate
return = tortoise coordinate value
"""
return r + 2. * M * math.log( r / (2. * M) - 1.) | 1bbfad661d360c99683b3c8fbe7a9c0cabf19686 | 704,570 |
import requests
import json
def get_pulls_list(project, github_api=3):
"""get pull request list
github_api : version of github api to use
"""
if github_api == 3:
url = f"https://api.github.com/repos/{project}/pulls"
else:
url = f"http://github.com/api/v2/json/pulls/{project}"
response = requests.get(url)
response.raise_for_status()
if github_api == 2:
return json.loads(response.text)["pulls"]
return json.loads(response.text) | 891c99d53faa5fb89960e5bb52c85e42f6003c42 | 704,571 |
def read_chunk(path, start_offset, end_offset, delete_me_entire_func_maybe):
"""
Return only if 100% successful.
"""
try:
with open(path, 'rb') as f:
f.seek(start_offset)
return f.read(end_offset - start_offset)
except FileNotFoundError as e:
raise e | e45c948bcb7f75fdf0eecac8289e4323b2d88dfe | 704,573 |
import subprocess
import click
def _create_kube_config_gcloud_entry(cluster_name, cluster_zone, project):
"""Uses GCloud CLI to create an entry for Kubectl.
This is needed as we install the charts using kubectl, and it needs the correct config
Args:
cluster_name (str): Name of cluster
cluster_zone (str): Zone of cluster
project (str): Current used project
Returns:
(str): Kube context for the cluster
"""
p = subprocess.Popen(
[
"gcloud",
"container",
"clusters",
"get-credentials",
cluster_name,
"--zone",
cluster_zone,
"--project",
project,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, error = p.communicate()
if p.returncode != 0:
raise click.UsageError(
"Failed to add kube config entry:\n {}".format(error.decode())
)
context_name = "gke_{}_{}_{}".format(project, cluster_zone, cluster_name)
return context_name | b161e397445393bde989b6ae9dec1353fec38329 | 704,574 |
def MediumOverLong(lengths):
"""
A measure of how needle or how plate-like a molecules is.
0 means perfect needle shape
1 means perfect plate-like shape
ShortOverLong = Medium / Longest
"""
return lengths[1]/lengths[2] | 48a053b55b39a50d7b0f618f843d370a55220765 | 704,575 |
from typing import Optional
import os
import re
def change_suffix(fname: str, new_suffix: str, old_suffix: Optional[str] = None) -> str:
"""Change suffix of filename.
Changes suffix of a filename. If no old suffix is provided, the part that is replaced is guessed.
Args:
fname: Filename to process.
new_suffix: Replace old suffix with this.
old_suffix: (Optional) Old suffix of filename - must be part of filename. Default = None.
Returns:
Filename with replaced suffix.
Examples:
>>> change_suffix("test.txt.gz", "")
"test.txt"
>>> change_suffix("test.sorted.txt.gz", ".txt", ".sorted.txt.gz")
"test.txt"
"""
if not old_suffix:
old_suffix = os.path.splitext(fname)[1]
return str(re.sub(old_suffix + "$", new_suffix, fname)) | ae9f4c05d88d8d293e59d8e8a58b8961a36c568c | 704,576 |
def segment_objects(white_cloud):
"""
Cluster extraction and create cluster mask
"""
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster sizes (in points)
ec.set_ClusterTolerance(0.025)
ec.set_MinClusterSize(50)
ec.set_MaxClusterSize(20000)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
return cluster_indices | 590c3d75a1739128d97e998a601e92e335507915 | 704,577 |
import string
def tamper(payload, **kwargs):
"""
Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> \u0053\u0045\u004C\u0045\u0043\u0054)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '\\u%.4X' % ord(payload[i])
i += 1
return retVal | ef293a5be9698dea8f01186a38794ff9c3482c94 | 704,578 |
from datetime import datetime
def timestamp_to_iso(timestamp):
"""
Converts an ISO 8601 timestamp (in the format `YYYY-mm-dd HH:MM:SS`) to :class:`datetime`
Example:
>>> timestamp_to_iso(timestamp='2020-02-02 02:02:02')
datetime(year=2020, month=2, day=2, hour=2, minute=2, second=2)
:param timestamp: timestamp to convert
:return: datetime representation of the timestamp
"""
return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') | 7de7ea8b1fd5bd4d854c43b9818bf6f8f58da279 | 704,579 |
def rename_category_for_flattening(category, category_parent=""):
"""
Tidy name of passed category by removing extraneous characters such as '_' and '-'.
:param category: string to be renamed (namely, a category of crime)
:param category_parent: optional string to insert at the beginning of the string (in addition to other edits)
:return: new string name for category passed
"""
if category_parent == "":
return category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "_").replace("-", "")
return category_parent + "_" + category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "").replace("-", "") | 360e87da0a8a778f32c47adc58f33a2b92fea801 | 704,580 |
import math
def billing_bucket(t):
"""
Returns billing bucket for AWS Lambda.
:param t: An elapsed time in ms.
:return: Nearest 100ms, rounding up, as int.
"""
return int(math.ceil(t / 100.0)) * 100 | 87b9963c1a2ef5ad7ce1b2fac67e563dcd763f73 | 704,581 |
import hashlib
def filename_to_int_hash(text):
"""
Returns the sha1 hash of the text passed in.
"""
hash_name_hashed = hashlib.sha1(text.encode("utf-8")).hexdigest()
return int(hash_name_hashed, 16) | b5cb53b921146d4ae124c20b0b267acc80f6de43 | 704,582 |
def adjust_lr_on_plateau(optimizer):
"""Decrease learning rate by factor 10 if validation loss reaches a plateau"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr']/10
return optimizer | 615631fd4853e7f0c0eae59a3336eb4c4794d3a3 | 704,583 |
def chain(node1, node2, include_ids=False, only_ids=False):
"""
Find a chain of dependency tags from `node1` to `node2` (if possible)
:param node1: The node 1
:type node1: udon2.Node
:param node2: The node 2
:type node2: udon2.Node
"""
node, chain = node2, []
while not node.is_identical(node1, ""):
chain.append(node.id if only_ids else f"{node.deprel}#{int(node.id)}" if include_ids else node.deprel)
node = node.parent
chain.reverse()
return chain | bcfe1497ea731ad902bc5760542c8ce6f3286b60 | 704,584 |
import os
def file_str(f):
"""
:param f: 输入完整路径的文件夹或文件名
:return: 返回简化的名称
a/b ==> <b>
a/b.txt ==> b.txt
"""
name = os.path.basename(f)
if os.path.isdir(f):
s = '<' + name + '>'
else:
s = name
return s | 60ea019dc5bf2145b85d15e4c58ae9c08a588c38 | 704,585 |
def image_output_size(input_shape, size, stride, padding):
"""Calculate the resulting output shape for an image layer with the specified options."""
if len(size) > 2 and input_shape[3] != size[2]:
print("Matrix size incompatible!")
height = size[0]
width = size[1]
out_depth = size[3] if len(size) > 2 else int(input_shape[3])
input_height = input_shape[1]
input_width = input_shape[2]
if padding == "VALID":
input_height -= height - 1
input_width -= width - 1
return (
int(input_shape[0]),
(input_height + stride[0] - 1) // stride[0],
(input_width + stride[1] - 1) // stride[1],
out_depth
) | 77665f8304570bd5ba805241131a96d5d6908587 | 704,586 |
import re
from typing import Tuple
def _info_from_match(match: re.Match, start: int) -> Tuple[str, int]:
"""Returns the matching text and starting location if none yet available"""
if start == -1:
start = match.start()
return match.group(), start | 3599c6345db5ce2e16502a6e41dda4684da2f617 | 704,587 |
def otherICULegacyLinks():
"""The file `icuTzDir`/tools/tzcode/icuzones contains all ICU legacy time
zones with the exception of time zones which are removed by IANA after an
ICU release.
For example ICU 67 uses tzdata2018i, but tzdata2020b removed the link from
"US/Pacific-New" to "America/Los_Angeles". ICU standalone tzdata updates
don't include modified icuzones files, so we must manually record any IANA
modifications here.
After an ICU update, we can remove any no longer needed entries from this
function by checking if the relevant entries are now included in icuzones.
"""
return {
# Current ICU is up-to-date with IANA, so this dict is empty.
} | bfacf0d8b5a31c5edbd69f93c4d55d8857599e1a | 704,588 |
import re
def replace_php_define(text, define, value):
"""
Replaces a named constaint (define) in PHP code.
Args:
text (str) : The PHP code to process.
define (str) : Name of the named constant to modify.
value (int,str) : Value to set the 'define' to.
Returns:
The modified PHP code.
"""
if isinstance(value, str): replacement = '\g<1>\'{0}\'\g<2>'.format(value)
elif isinstance(value,int): replacement = '\g<1>{0}\g<2>'.format(value)
else: raise RuntimeError('Datatype is not supported.')
regex = '^(\s*define\s*\(\s*\'{0}\'\s*,\s*).*(\s*\)\s*;.*)'.format(re.escape(define))
text,substitutions = re.subn(regex, replacement, text, 1, re.MULTILINE | re.UNICODE)
if substitutions == 0: raise RuntimeError('Named constant \'{0}\' is not part of the specified php code.'.format(define))
return text | 02e3194d6fb83958d525651cdca6e3cec1cf3bb7 | 704,590 |
import os
import inspect
def get_spec_file_path(step_class):
"""
Given a STep (sub)class, divine and return the full path to the
corresponding spec file. Use the fact that by convention, the spec file is
in the same directory as the `step_class` source file. It has the name of
the Step (sub)class and extension .spec.
"""
step_source_file = os.path.abspath(inspect.getfile(step_class))
# Since `step_class` could be defined in a file called whatever, we need
# the source file basedir and the class name.
dir = os.path.dirname(step_source_file)
return(os.path.join(dir, step_class.__name__ + '.spec')) | 55c0df8873322c8d31e728ea02d721df1deb2abe | 704,591 |
def _only_one_selected(*args):
"""Test if only one item is True."""
return sum(args) == 1 | 9966cc7c2cde16c689f29ba2add80b2cddce56e7 | 704,592 |
import random
import string
def generate_random_id(start: str = ""):
"""
Generates a random alphabetic id.
"""
result = "".join(random.SystemRandom().choices(string.ascii_lowercase, k=16))
if start:
result = "-".join([start, result])
return result | f818ecf7ba4296a3ad010ef20bc5e286036bb56d | 704,593 |
def get_client_names(worksheet) -> list:
"""Get list of client names from Excel worksheet."""
num_rows = worksheet.max_row
names = []
for i in range(2, num_rows+1):
cell_obj = worksheet.cell(row=i, column=1)
if cell_obj.value not in names:
names.append(cell_obj.value)
return names | 6da6e52ed10e84ae79119c511e063114bb61b334 | 704,594 |
def _num_to_words(num):
"""
Turkish converter
Params:
num(int/long): number to be converted
Returns:
wordString
"""
units = ['', u'bir', u'iki', u'üç', u'dört', u'beş', u'altı', u'yedi', u'sekiz', u'dokuz']
teens = ['', u'onbir', u'oniki', u'onüç', u'ondört', u'onbeş', u'onaltı', u'onyedi', u'onsekiz', u'ondokuz']
tens = ['', u'on', u'yirmi', u'otuz', u'kırk', u'elli', u'altmış', u'yetmiş', u'seksen', u'doksan']
thousands = ['', u'bin', u'milyon', u'milyar', u'trilyon', u'katrilyon', u'kentilyon']
words = []
if num==0: words.append(u'sıfır')
else:
# Convert num to string
numStr = '%d'%num
numStrLen = len(numStr)
# Get the number of group with 3 digits
groups = (numStrLen+2)//3
if groups>(len(thousands)):
return ''
# Pad the zereos to the missing digits
numStr = numStr.zfill(groups*3)
for i in range(0,groups*3,3):
h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])
g = groups-(i//3+1)
# Add hundreds
if h>=1:
# In order not to say 'bir yüz'
if h!=1:
words.append(units[h])
words.append(u'yüz')
# Add tens
if t>1:
words.append(tens[t])
if u>=1: words.append(units[u])
# Add teens
elif t==1:
if u>=1: words.append(teens[u])
else: words.append(tens[t])
# If second digit is zero
else:
# In order not to say 'bir bin'
if g!=1 or u!=1:
if u>=1: words.append(units[u])
# Add thousands
if (g>=1) and ((h+t+u)>0): words.append(thousands[g])
return ' '.join(words) | 14adb62d17f2089127ca9b90f1d884063c028adf | 704,595 |
def upper(value: str): # Only one argument.
"""Converts a string into all uppercase"""
return value.upper() | 8ec4c4ed284bc8d823e356db7749a4c98a00b194 | 704,596 |
def xor(a,b):
""" XOR two strings of same length"""
assert len(a) == len(b)
x = []
for i in range(len(a)):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x) | cbe3d32883dc5516821711181c7f5d52194d89de | 704,597 |
import hashlib
import yaml
def get_hash(x, length=16):
"""Return hash of x."""
return hashlib.sha224(yaml.dump(dict(key=x)).encode()).hexdigest()[:length] | e13c278ef649e2d8c213580d5ccc27ae64d72027 | 704,598 |
def filtreDonner(liste) :
"""
Fonction qui va filtrer les donner.
Cette fonction va filtrer les donners inutiles mot trop frequent ...
param : liste[string] -> liste chaine de caractere a filtrer
return : liste[string] -> liste chaine de caractere filtrer.
"""
return liste | b7e5f04a6645895a16c44f3f477ecc9d9a8ecef1 | 704,599 |
import os.path
def get_include():
"""
Return the directory that contains the dpctl *.h header files.
Extension modules that need to be compiled against dpctl should use
this function to locate the appropriate include directory.
"""
return os.path.join(os.path.dirname(__file__), "include") | 0d63b857071ce118dd8206187fddbf59d2d86583 | 704,600 |
def hello_world(text: str) -> str:
"""Print and return input."""
print(text)
return text | 7bfcb8e9cfccdf5fad8c702f97f6b7c4e56c7682 | 704,601 |
def video_player(obj):
"""
Receives object with 'video' FileField and returns HTML5 player.
"""
return {'object': obj} | 197c16e2ff16777634cfad327c08df571481ed09 | 704,602 |
def wait_for(scope, prompt):
"""
Waits until the response of the remote host contains the given pattern.
:type prompt: regex
:param prompt: The prompt pattern.
"""
conn = scope.get('__connection__')
conn.expect(prompt)
scope.define(__response__=conn.response)
return True | 10c95350b4c2aa4ad8fe9bce040efc461f461ca0 | 704,603 |
import re
def tokens_to_str(message, section='body'):
""" Takes one section of a message as specified by key param and
returns it in string format to be joined with other messages
for summarization, printing, id creation (future).
"""
body = message[section]
new_mess = ''
if isinstance(body[0], list):
for sentence in body:
for word in sentence:
new_mess += (word + ' ')
# put chars in list for easy processing
interim_mess = []
for char in new_mess:
interim_mess.append(char)
# push some chars to the left
for i, char in enumerate(interim_mess):
if i>0:
match = re.match('[!.,?)]', char)
if match and interim_mess[i-1] == ' ':
interim_mess.pop(i-1)
# push some chars to the right
for i, char in enumerate(interim_mess):
if i>0:
match = re.match('[$(]', interim_mess[i-1])
if match and char == ' ':
interim_mess.pop(i)
elif section == 'tags':
interim_mess = ' #'.join(body)
interim_mess = '#' + interim_mess
else:
# put chars in list for easy processing
interim_mess = ' '.join(body)
return ''.join(interim_mess) | 4b8f57060dfe110a2a0e2c767a73966cc1d5abdb | 704,604 |
import functools
def polygon_wrapper(func):
"""
Wrapper function to perform the setup and teardown of polygon
attributes before and after creating the polygon.
Keyword arguments:
func (function) -- the function to draw the polygon.
"""
@functools.wraps(func)
def draw_polygon(self, *args, **kwargs):
"""
Setup the Context, draw the polygon with attributes applied, and
teardown the environment.
"""
# Save the Context so we can restore it when this is done
self.context.save()
# Initialize the polygon's attributes
self._init_attributes(**kwargs)
# Call the function
result = func(self, *args, **kwargs)
# Fill the polygon, if it's being filled
if self.fill:
self.context.fill_preserve()
# Set the outline fill_color and outline the polygon
self.calling_surface._set_color(self.line_color)
self.context.stroke()
# Restore the Context now that the polygon is drawn
self.context.restore()
return result
return draw_polygon | 76056e41c36a2c15dcb8a2e05cc4ec4c1beb68dc | 704,605 |
def compose_base_find_query(user_id: str, administrator: bool, groups: list):
"""
Compose a query for filtering reference search results based on user read rights.
:param user_id: the id of the user requesting the search
:param administrator: the administrator flag of the user requesting the search
:param groups: the id group membership of the user requesting the search
:return: a valid MongoDB query
"""
if administrator:
return dict()
is_user_member = {
"users.id": user_id
}
is_group_member = {
"groups.id": {
"$in": groups
}
}
is_owner = {
"user.id": user_id
}
return {
"$or": [
is_group_member,
is_user_member,
is_owner
]
} | 2f398930603093ddc59e0c6ba4956e7d46a7758d | 704,606 |
def intersection (l, r) :
"""Compute intersection of lists `l` and `r`.
>>> intersection (range (4), range (2,5))
[2, 3]
"""
r_set = set (r)
return [x for x in l if x in r_set] | 36d7003587204814b6e09ec093f2a6715e87a500 | 704,607 |
def filter_df_on_ncases(df, case_id_glue="case:concept:name", max_no_cases=1000):
"""
Filter a dataframe keeping only the specified maximum number of cases
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
max_no_cases
Maximum number of cases to keep
Returns
------------
df
Filtered dataframe
"""
cases_values_dict = dict(df[case_id_glue].value_counts())
cases_to_keep = []
for case in cases_values_dict:
cases_to_keep.append(case)
cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)]
df = df[df[case_id_glue].isin(cases_to_keep)]
return df | 5f8532ebe465d7b80934b35ef8d3925217f4e355 | 704,608 |
def decode(s):
"""doc me"""
for encoding in "utf-8-sig", "utf-16":
try:
return s.decode(encoding)
except UnicodeDecodeError:
continue
return s.decode("latin-1") | 40ce76e5067e591eb1e433f18c4d574a7235ab4e | 704,610 |
def remove_whitespace(s):
"""Remove excess whitespace including newlines from string
"""
words = s.split() # Split on whitespace
return ' '.join(words) | 7d9e7b15ba101f00412565b42c260e8bc29ac49a | 704,611 |
def get_first_group (match):
"""
Retrieves the first group from the match object.
"""
return match.group(1) | d4103989a7fbd55e40600d391b51dfb93053ed8f | 704,612 |
def pattern_to_regex(pattern):
"""
Convert the CODEOWNERS path pattern into a regular expression string.
"""
orig_pattern = pattern # for printing errors later
# Replicates the logic from normalize_pattern function in Gitlab ee/lib/gitlab/code_owners/file.rb:
if not pattern.startswith('/'):
pattern = '/**/' + pattern
if pattern.endswith('/'):
pattern = pattern + '**/*'
# Convert the glob pattern into a regular expression:
# first into intermediate tokens
pattern = (pattern.replace('**/', ':REGLOB:')
.replace('**', ':INVALID:')
.replace('*', ':GLOB:')
.replace('.', ':DOT:')
.replace('?', ':ANY:'))
if pattern.find(':INVALID:') >= 0:
raise ValueError("Likely invalid pattern '{}': '**' should be followed by '/'".format(orig_pattern))
# then into the final regex pattern:
re_pattern = (pattern.replace(':REGLOB:', '(?:.*/)?')
.replace(':GLOB:', '[^/]*')
.replace(':DOT:', '[.]')
.replace(':ANY:', '.') + '$')
if re_pattern.startswith('/'):
re_pattern = '^' + re_pattern
return re_pattern | 8b82ad2efa9e47028a7419dcef72fb9c6b3741ba | 704,614 |
def unit2uniform(x, vmin, vmax):
"""
mapping from uniform distribution on parameter space
to uniform distribution on unit hypercube
"""
return vmin + (vmax - vmin) * x | 2765db219dfda5debd5f8957c0ad9c0b44335f89 | 704,615 |
def _uint_to_le(val, length):
"""Returns a byte array that represents an unsigned integer in little-endian format.
Args:
val: Unsigned integer to convert.
length: Number of bytes.
Returns:
A byte array of ``length`` bytes that represents ``val`` in little-endian format.
"""
return val.to_bytes(length=length, byteorder='little') | 54e765e7b3772c6e2e6dc4c7e6de48d034b9d4b5 | 704,616 |
def gcd(num1, num2):
"""Return Greatest Common Divisor"""
# Euclidean Algorithm for GCD
a = max([num1, num2])
b = min([num1, num2])
while b != 0:
mod = a % b
a = b
b = mod
return a | 36c788d44a4aafaaf000963a7c5e1b80fa6f64f5 | 704,617 |
def get_type_path(type, type_hierarchy):
"""Gets the type's path in the hierarchy (excluding the root type, like
owl:Thing).
The path for each type is computed only once then cached in type_hierarchy,
to save computation.
"""
if 'path' not in type_hierarchy[type]:
type_path = []
current_type = type
while current_type in type_hierarchy:
type_path.append(current_type)
current_type = type_hierarchy[current_type]['parent']
type_hierarchy[type]['path'] = type_path
return type_hierarchy[type]['path'] | 29344b63197f4ea6650d059767100401c693990a | 704,618 |
def get_required_webots_version():
"""Return the Webots version compatible with this version of the package."""
return 'R2020b revision 1' | 89e6e458c2409670d70a833996c76f05e77bd7b1 | 704,619 |
def expand_parameters_from_remanence_array(magnet_parameters, params, prefix):
"""
Return a new parameters dict with the magnet parameters in the form
'<prefix>_<magnet>_<segment>', with the values from 'magnet_parameters'
and other parameters from 'params'.
The length of the array 'magnet_parameters' must be equal to the sum of
the number of segments in both cylinders.
The first n_II elements refer to the inner magnet,
and the remaining elements to the outer magnet.
"""
params_expanded = params.copy()
n_II = params["n_II"]
for i in range(0, n_II):
params_expanded["%s_II_%d" % (prefix, i + 1,)] = magnet_parameters[i]
n_IV = params["n_IV"]
for j in range(0, n_IV):
k = j + n_II # the first n_II elements refer to magnet II
params_expanded["%s_IV_%d" % (prefix, j + 1,)] = magnet_parameters[k]
return params_expanded | e087f5b1e8ea264f074f921a5283d7806178664b | 704,620 |
def transformNode(doc, newTag, node=None, **attrDict):
"""Transform a DOM node into new node and copy selected attributes.
Creates a new DOM node with tag name 'newTag' for document 'doc'
and copies selected attributes from an existing 'node' as provided
in 'attrDict'. The source 'node' can be None. Attribute values will
be converted to strings.
E.g.
n = transformNode(doc, "node1", x="0", y="1")
-> DOM node for <node1 x="0" y="1"/>
n = transformNode(doc, "node1", x=0, y=1+1)
-> DOM node for <node1 x="0" y="2"/>
n = transformNode(doc, "node1", node0, x="x0", y="x0", zoo=bar())
-> DOM node for <node1 x="[node0.x0]" y="[node0.y0]" zoo="[bar()]"/>
"""
newNode = doc.createElement(newTag)
for newAttr, attr in attrDict.items():
sattr = str(attr)
if not node:
newNode.setAttribute(newAttr, sattr)
else:
attrVal = node.getAttribute(sattr)
newNode.setAttribute(newAttr, attrVal or sattr)
return newNode | 2329858a02c643077f67d5c705fb3df72c2a96ee | 704,621 |
def tag_group(tag_group, tag):
"""Select a tag group and a tag."""
payload = {"group": tag_group, "tag": tag}
return payload | f22ccd817145282729876b0234c8309c24450140 | 704,622 |
def combine_sequences(vsequences, jsequences):
"""
Do a pairwise combination of the v and j sequences to get putative germline sequences for the species.
"""
combined_sequences = {}
for v in vsequences:
vspecies, vallele = v
for j in jsequences:
_, jallele= j
combined_sequences[("%s_%s_%s"%(vspecies, vallele,jallele)).replace(" ", "_")] = vsequences[v] + jsequences[j]
return combined_sequences | dac2aea73bd078bcf96dc8e7b44c5dcdeade2759 | 704,623 |
def read_seq_file(filename):
"""Reads data from sequence alignment test file.
Args:
filename (str): The file containing the edge list.
Returns:
str: The first sequence of characters.
str: The second sequence of characters.
int: The cost per gap in a sequence.
int: The cost per mismatch in a sequence.
"""
with open(filename, 'r') as f:
next(f) # Skip first line
cost_gap, cost_mismatch = next(f).strip().split()
cost_gap, cost_mismatch = int(cost_gap), int(cost_mismatch)
seq_x = next(f).strip()
seq_y = next(f).strip()
return seq_x, seq_y, cost_gap, cost_mismatch | 9160bb0b2643deae669818cea1bc1ebeb51506b8 | 704,624 |
def build_complement(dna):
"""
:param dna: str, the DNA strand that user gives(all letters are upper case)
:return: str, the complement of dna
"""
new_dna = ''
for base in dna:
if base == 'A':
new_dna += 'T'
elif base == 'T':
new_dna += 'A'
elif base == 'G':
new_dna += 'C'
elif base == 'C':
new_dna += 'G'
return new_dna | dffdf6345ec25ea80e89996aef7c85a41f38d6f4 | 704,625 |
def _seasonal_prediction_with_confidence(arima_res, start, end, exog, alpha,
**kwargs):
"""Compute the prediction for a SARIMAX and get a conf interval
Unfortunately, SARIMAX does not really provide a nice way to get the
confidence intervals out of the box, so we have to perform the
``get_prediction`` code here and unpack the confidence intervals manually.
Notes
-----
For internal use only.
"""
results = arima_res.get_prediction(
start=start,
end=end,
exog=exog,
**kwargs)
f = results.predicted_mean
conf_int = results.conf_int(alpha=alpha)
return f, conf_int | 9520bf1a60eeb39c25e9a369b0b337905df9afb8 | 704,626 |
def top_height(sz):
"""Returns the height of the top part of size `sz' AS-Waksman network."""
return sz // 2 | 1e4a43a8935cc5c3ccf104e93f87919205baf4a4 | 704,627 |
from typing import Callable
import logging
import time
def eval_time(function: Callable):
"""decorator to log the duration of the decorated method"""
def timed(*args, **kwargs):
log = logging.getLogger(__name__)
time_start = time.time()
result = function(*args, **kwargs)
time_elapsed = round(time.time() - time_start, 2)
log.info(f"Processing time of {function.__name__}: {time_elapsed}s")
return result
return timed | 3f40394c5638bf0fc6371d4247c8980da1f6363f | 704,628 |
def valid_lsi(addr):
"""Is the string a valid Local Scope Identifier?
>>> valid_lsi('1.0.0.1')
True
>>> valid_lsi('127.0.0.1')
False
>>> valid_lsi('1.0.1')
False
>>> valid_lsi('1.0.0.365')
False
>>> valid_lsi('1.foobar')
False
"""
parts = addr.split('.')
if not len(parts) == 4:
return False
if not int(parts[0]) == 1:
return False
in_range = all([0 <= int(x) < 256 for x in parts])
if not in_range:
return False
return True | 8a90547f239ea6d2a5aa971115c2015edc42932b | 704,629 |
def grade(morse_code, inputs):
"""Grades how well the `inputs` represents the expected `morse_code`.
Returns a tuple with three elements. The first is a Boolean telling if we
consider the input good enough (this is the pass/fail evaluation). The next
two elements are strings to be show, respectively, in the top and bottom
lines of the display to give as feedback.
"""
# Did we get the right number of dits and dahs?
expected_inputs = len(morse_code) * 2 - 1
if len(inputs) > expected_inputs:
return (False, " Not good! ", " Extra dit-dahs ")
if len(inputs) < expected_inputs:
return (False, " Not good! ", "Too few dit-dahs")
# Check the sequence of dits and dahs. Don't be too critical about timing
# for now: simply require that every dit is shorter than every dah.
dit_lengths = [ ]
longest_dit = 0.0
dah_lengths = [ ]
shortest_dah = float("inf")
i = 0
for c in morse_code:
press_length = inputs[i*2]
if c == ".":
if press_length > longest_dit:
longest_dit = press_length
dit_lengths.append(inputs[i*2])
else:
if press_length < shortest_dah:
shortest_dah = press_length
dah_lengths.append(inputs[i*2])
if len(dit_lengths) > 0 and len(dah_lengths) > 0 and shortest_dah <= longest_dit:
return (False, "Not Good! Wrong", "dit-dah sequence")
# For the purposes of timing, spaces count as dits
if i < len(morse_code)-1:
dit_lengths.append(inputs[i*2 + 1])
i += 1
# Now check the dits and dahs lengths more carefully
time_unit = (sum(dit_lengths) + sum(dah_lengths)) / (len(dit_lengths) + 3*len(dah_lengths))
i = 0
worst_prec = 1.0
while i < len(inputs):
prec = 0.0
# Check the character
if morse_code[i//2] == ".":
prec = inputs[i] / time_unit
else:
prec = inputs[i] / (3 * time_unit)
if prec > 1.0:
prec = 1.0 / prec
if prec < worst_prec:
worst_prec = prec
# Check the space
if i+1 >= len(inputs):
break
prec = inputs[i+1] / time_unit
if prec > 1.0:
prec = 1.0 / prec
if prec < worst_prec:
worst_prec = prec
i += 2
if worst_prec < 0.35:
return (False, "Not good! Bad", "dit-dahs timing.")
elif worst_prec < 0.55:
return (True, "Good! Can better", "dit-dahs timing.")
elif worst_prec < 0.75:
return (True, "Good!", "Almost perfect!")
else:
return (True, "Great!", "Nailed it!") | 43038fa81ff9a5d39d337b38b5afed1c3ca57e4d | 704,630 |
def reverse_complement(dna):
"""
Reverse-complement a DNA sequence
:param dna: string, DNA sequence
:type dna: str
:return: reverse-complement of a DNA sequence
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return ''.join([complement[base] for base in dna[::-1]]) | efcb38e06fc494adabeb304934ebef9bd932a11f | 704,631 |
from copy import deepcopy
def addReference(inData, reference):
"""
"""
data = deepcopy(inData)
existing_refs = [x for x in data['relatedIdentifiers'] if x['relationType']=='References']
ref_list = [ x['relatedIdentifier'] for x in existing_refs]
if ( reference not in ref_list):
print(reference, 'is NOT in existing references, adding it.')
else:
print(reference, 'is in existing references, do Noting.')
return None # temporary.
r = {"relatedIdentifier": reference,
"relatedIdentifierType": 'DOI',
"relationType": 'References'}
data['relatedIdentifiers'].append(r)
return data | 85dd0c18966b632a2173c27e913bfe94a4d5ec29 | 704,633 |
def len_path_in_limit(p, n=128):
"""if path len in limit, return True"""
return len(p) < n | 988858918109902e662144a6650a33e593ba90b7 | 704,634 |
import torch
def threshold(tensor, density):
"""
Computes a magnitude-based threshold for given tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param density: Desired ratio of nonzeros to total elements
:type density: `float`
:return: Magnitude threshold
:rtype: `float`
"""
tf = tensor.abs().view(-1)
numel = int(density * tf.numel())
if numel == 0:
raise RuntimeError('Provided density value causes model to be zero.')
topk, _ = torch.topk(tf.abs(), numel, sorted=True)
return topk.data[-1] | d0c5a2726a2df195b0588af8af95dac187f50e1b | 704,635 |
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args | a4281afcbc572f02e719f97f92ec30bdf4ddb138 | 704,636 |
import random
import copy
def modify_drone(solution, simulation):
"""Modifies the drone of a random operation.
...
Parameters:
solution(List[Transportation]): The list of the transportations of the solution
simulation(Simulation): The simulation
Returns:
List[Transportation]: The modified solution
"""
solution = solution.copy()
if simulation.environment.drones_count == 1:
return solution
random_operation = random.randrange(0, len(solution))
new_drone = simulation.random_drone()
# Continues to generate a random drone while the drone generated
# is the same as the drone previously assigned
while solution[random_operation].drone == new_drone:
new_drone = simulation.random_drone()
# Copies the transportation in order to change its drone
# only in the mutated solution
transportation = copy.deepcopy(solution[random_operation])
transportation.drone = new_drone
# Assigns the transportation with the new drone to its position
solution[random_operation] = transportation
return solution | 69debcb5a42e52248b6b8e18c62642f8290126f6 | 704,637 |
def _succ(p, l):
"""
retrieve the successor of p in list l
"""
pos = l.index(p)
if pos + 1 >= len(l):
return l[0]
else:
return l[pos + 1] | 0eea63bd24da4079b9718af437c6d7e38ef25444 | 704,638 |
def _readline(ser):
"""Read a line from device on 'ser'.
ser open serial port
Returns all characters up to, but not including, a newline character.
"""
line = bytearray() # collect data in a byte array
while True:
c = ser.read(1)
if c:
if c == b'\n':
break
line += c
return str(line, encoding='utf-8') | 469c5b6afa786d8bf94dec72a918b6df3b3ba4d7 | 704,639 |
def reorder_cols_df(df, cols):
"""Reorder the columns of a DataFrame to start with the provided list of columns"""
cols2 = [c for c in cols if c in df.columns.tolist()]
cols_without = df.columns.tolist()
for col in cols2:
cols_without.remove(col)
return df[cols2 + cols_without] | 917b0084ba34f8e1b1fc697c4838ff8404a2fc90 | 704,641 |
def add_923_heat_rate(df):
"""
Small function to calculate the heat rate of records with fuel consumption and net
generation.
Parameters
----------
df : dataframe
Must contain the columns net_generation_mwh and
fuel_consumed_for_electricity_mmbtu
Returns
-------
dataframe
Same dataframe with new column of heat_rate_mmbtu_mwh
"""
# Calculate the heat rate for each prime mover/fuel combination
df["heat_rate_mmbtu_mwh"] = (
df["fuel_consumed_for_electricity_mmbtu"] / df["net_generation_mwh"]
)
return df | 907ac6ba469a65dfe25a84f7498e66b1e0535d19 | 704,642 |
def v4_multimax(iterable):
"""Return a list of all maximum values.
Bonus 1 - on short solution.
"""
try:
max_item = max(iterable)
except ValueError:
return []
return [
item
for item in iterable
if item == max_item
] | fddeae328993fa77a0b73ab55c4e53a88b42b39c | 704,643 |
from datetime import datetime
def datefix(datestr):
""" transform string into a python datetime object
handle mm/dd/yy or mm/dd/yyyy or dashes instead of slashes """
fix = datestr.replace('-','/')
if len(fix) > 4:
try:
return datetime.strptime(fix, "%m/%d/%y")
except ValueError:
return datetime.strptime(fix, "%m/%d/%Y")
return datetime.utcnow() | 2cb728dfcec24b350d63a79fc3964d3325780b6a | 704,644 |
import re
def cleanHtml(sentence):
"""
remove all Html canvas from the sentence
:param sentence {str} sentence
:return:
{str}: sentence without html canvas
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', str(sentence))
return cleantext | 1a3edcd7227468f8f3102525538a728a9bc93fc0 | 704,645 |
import re
def is_guid(value):
"""
проверяет на наличие только [a-zA-z/-]
"""
if re.match("^[A-Za-z0-9_-]*$", value):
return value
return None | ca9c84ebfe271d93bd7c8d3043f8dd1849fb3239 | 704,646 |
def merge_config(a, b):
"""Merges config b in a."""
for key, b_value in b.items():
if not isinstance(b_value, dict):
a[key] = b_value
else:
a_value = a.get(key)
if a_value is not None and isinstance(a_value, dict):
merge_config(a_value, b_value)
else:
a[key] = b_value
return a | 2e194d9b19c2270968cd205062b4d3ec992cfced | 704,647 |
def fit_index(dataset, list_variables):
""" Mapping between index and category, for categorical variables
For each (categorical) variable, create 2 dictionaries:
- index_to_categorical: from the index to the category
- categorical_to_index: from the category to the index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with (partly) categorical variables
list_variables: list(str)
List of variable names to index
Returns
-------
index: dict
For each categorical column, we have the 2 mappings: idx2cat & idx2cat
"""
index = dict()
for icol in list_variables:
if icol not in dataset.columns:
raise RuntimeError(f'{icol} not found in dataframe')
idx2cat = {ii: jj for ii, jj in enumerate(dataset.loc[:, icol].unique())}
cat2idx = {jj: ii for ii, jj in idx2cat.items()}
index[icol] = {
'index_to_categorical': idx2cat,
'categorical_to_index': cat2idx
}
return index | 7b8c73a5d23de2e537c1f28078d2e032095d6b1c | 704,648 |
def convert_bin_to_text(bin_str: str) -> str:
"""Convert a string of binary to text.
Parameters:
-----------
bin_str:
string: A string of binary, terminating with 00000000.
Returns:
--------
text:
string: A plaintext representation of the binary string.
"""
# get number of characters, less one for the terminating 00000000 bit.
num_chars = int(len(bin_str)/8) - 1
print(bin_str)
text = ""
for i in range(num_chars):
ascii_val = int(bin_str[i*8:(i+1)*8:], 2)
text += chr(ascii_val)
return text | 8890ff192ae4b6e01401dd7f018bf8906c3c37ce | 704,650 |
def scale_on_x_list(x_list, scaler):
"""Scale list of ndarray.
"""
return [scaler.transform(e) for e in x_list] | 2fbe36cb23e99ca6eaf277fb5509e2e997ec4a52 | 704,651 |
import hashlib
def md5(ori_str):
""" MD5加密算法
:param ori_str: 原始字符串
:return: 加密后的字符串
"""
md5_obj = hashlib.md5()
md5_obj.update(ori_str.encode("utf8"))
return md5_obj.hexdigest() | 75efc3226c2f0355ce4b988acd6dcd1a95ea8294 | 704,652 |
import getpass
def getuser() -> str:
"""
Get the username of the current user.
Will leverage the ``getpass`` package.
Returns:
str: The username of the current user
"""
return getpass.getuser() | 3f6053e9aba37f7eafcd7735d7509af290fd3940 | 704,653 |
def cal_pipe_equivalent_length(tot_bui_height_m, panel_prop, total_area_module):
"""
To calculate the equivalent length of pipings in buildings
:param tot_bui_height_m: total heights of buildings
:type tot_bui_height_m: float
:param panel_prop: properties of the solar panels
:type panel_prop: dict
:param total_area_module: total installed module area
:type total_area_module: float
:return: equivalent lengths of pipings in buildings
:rtype: dict
"""
# local variables
lv = panel_prop['module_length_m'] # module length
total_area_aperture = total_area_module * panel_prop['aperture_area_ratio']
number_modules = round(total_area_module / panel_prop['module_area_m2']) # this is an estimation
# main calculation
l_ext_mperm2 = (2 * lv * number_modules / total_area_aperture) # pipe length within the collectors
l_int_mperm2 = 2 * tot_bui_height_m / total_area_aperture # pipe length from building substation to roof top collectors
Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture
pipe_equivalent_lengths = {'Leq_mperm2': Leq_mperm2, 'l_ext_mperm2': l_ext_mperm2, 'l_int_mperm2': l_int_mperm2}
return pipe_equivalent_lengths | 60c95cc1c5a38876095a77f4e68ab3b0df6280a3 | 704,654 |
def format_parameters(section):
"""Format the "Parameters" section."""
def format_item(item):
item = map(lambda x: x.strip(), item)
return ' - **{0}**: *{1}*\n {2}'.format(*item)
return '**Parameters**\n\n{0}'.format('\n\n'.join(
map(format_item, section))) | 8f1393b843b6ea46d69d5644f932f7f0e62160ab | 704,655 |
def _grouprule_aggs_filter(having, columns):
"""
Given (having) conditions, return what to filter on as a string, to be used
after groupbys as grouped.query(string returned by this function).
:param having:
:type having: list
:param columns: Columns on which the group by is made.
:type columns: list
:return: String to be used on a df.query to filter based on the "having" conditions.
:rtype: str
"""
# add first condition
cond = having[0]
operator_map = dict()
operator_map["gt"] = ">"
operator_map["lt"] = "<"
operator_map["eq"] = "=="
first_operator = cond["operator"]
if cond["aggregator"] == "count" and cond["column"] == "*":
result = "_groupby_agg_%s_%s %s %s" % (
columns[0], "size", operator_map[first_operator], cond["value"])
else:
result = "_groupby_agg_%s_%s %s %s" % (
cond["column"], cond["aggregator"], operator_map[first_operator], cond["value"])
# add the rest
for cond in having[1:]:
operator = cond["operator"]
if cond["aggregator"] == "count" and cond["column"] == "*":
result = result + " and _groupby_agg_%s_%s %s %s" % (
columns[0], "size", operator_map[operator], cond["value"])
else:
result = result + " and _groupby_agg_%s_%s %s %s" % (
cond["column"], cond["aggregator"], operator_map[operator], cond["value"])
return result | 86243383bc3bd6f66751effe275ffaa0c34edf5e | 704,656 |
def _find_literal(s, start, level, parts, exprs):
"""Roughly Python/ast.c:fstring_find_literal"""
i = start
parse_expr = True
while i < len(s):
ch = s[i]
if ch in ("{", "}"):
if level == 0:
if i + 1 < len(s) and s[i + 1] == ch:
i += 2
parse_expr = False
break
elif ch == "}":
raise SyntaxError("f-string: single '}' is not allowed")
break
i += 1
parts.append(s[start:i])
return i, parse_expr and i < len(s) | 39e7d97f8aa4bfcd79af00359395605c5910985c | 704,657 |
import argparse
def parse_arguments():
"""Arguments parsing."""
parser = argparse.ArgumentParser("my_agent", description="Launch my agent.")
parser.add_argument("--name", default="my_agent", help="Name of the agent")
parser.add_argument(
"--oef-addr", default="127.0.0.1", help="TCP/IP address of the OEF Agent"
)
parser.add_argument(
"--oef-port", default=10000, help="TCP/IP port of the OEF Agent"
)
parser.add_argument(
"--agent-timeout",
type=float,
default=1.0,
help="The time in (fractions of) seconds to time out an agent between act and react.",
)
parser.add_argument(
"--private-key-pem",
default=None,
help="Path to a file containing a private key in PEM format.",
)
parser.add_argument(
"--expected-version-id",
type=str,
help="The epected version id of the TAC.",
default="tac_v1",
)
return parser.parse_args() | d1b747e8ed9d57d63cb02c58eb585ffa649dc42e | 704,658 |
import os
import subprocess
def validate_move(main_prefix: str, original_path: str, new_file: str) -> bool:
"""Checks that a given file exists at the location in
the main bucket and no longer exists in the upload bucket.
Returns True if this is the case and False otherwise.
"""
main_path = os.path.join('gs://', main_prefix, 'batch0', new_file)
# upload_path = os.path.join('gs://', upload_prefix, original_file)
upload_path = original_path
exists_main = subprocess.run(['gsutil', '-q', 'stat', main_path], check=False)
exists_upload = subprocess.run(['gsutil', '-q', 'stat', upload_path], check=False)
# Exists at destination and not at source
return exists_upload.returncode != 0 and exists_main.returncode == 0 | 12b9003604e6b5f79ebc3e1639a7162ca9b24ac6 | 704,659 |
def normalize_trinucleotide(trinucleotide):
"""Return the normalized representation of the input trinucleotide sequence
Notes
-----
Each trinucleotide sequence has two possible representations (the sequence
and its reverse complement). For example, 5'-ACG-3' and 5'-CGT-3' are two
representations of the same trinucleotide sequence. To prevent ambiguity,
choose the representation where the central nucleotide of the trinucleotide
context is a C or a T is chosen.
"""
# Consistency checks
assert len(trinucleotide) == 3
for letter in trinucleotide:
assert letter in ['A', 'C', 'G', 'T']
complement_map = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}
reverse_complement = ""
for letter in trinucleotide[::-1]:
reverse_complement += complement_map[letter]
# Consistency checks
assert len(reverse_complement) == 3
for letter in reverse_complement:
assert letter in ['A', 'C', 'G', 'T']
# Choose the seq where the middle nucleotide is a 'C' or a 'T'
if trinucleotide[1] in ['C', 'T']:
return trinucleotide
elif reverse_complement[1] in ['C', 'T']:
return reverse_complement
else:
raise Exception("Unexpected error.") | fe04ba6fad28285eac9becbbd6e5324ec7734850 | 704,660 |
import time
def get_current_timestamp(): # pylint: disable=unused-variable
"""
Retrieves the current local time in a custom timestamp format
"""
return time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) | 839ef3e2bc434355d5b077ef4e2a1cb138fab2d1 | 704,661 |
import math
def format_float(number, decimal_places):
"""
Accurately round a floating-point number to the specified decimal
places (useful for formatting results).
"""
divisor = math.pow(10, decimal_places)
value = number * divisor + .5
value = str(int(value) / divisor)
frac = value.split('.')[1]
trail_len = decimal_places - len(frac)
return value + ''.join(['0'] * trail_len) | e7aaa92025284489075ce053319c27310bb96a00 | 704,662 |
def decode_transaction_filter(metadata_bytes):
"""Decodes transaction filter from metadata bytes
Args:
metadata_bytes (str): Encoded list of transaction filters
Returns: decoded transaction_filter list
"""
transaction_filter = []
if not metadata_bytes:
return None
for i in metadata_bytes:
transaction_filter.append(int(i))
return transaction_filter | c76638f6592fb098e2878471746152aa9df9a694 | 704,663 |
def merge(left, right, path=None):
"""Merge dicts"""
if path is None:
path = []
for key in right:
if key in left:
if isinstance(left[key], dict) and isinstance(right[key], dict):
merge(left[key], right[key], path + [str(key)])
elif left[key] == right[key]:
pass # same leaf value
elif isinstance(left[key], list) and isinstance(right[key], list):
for item in right[key]:
if item not in left[key]:
left[key].append(item)
else:
raise Exception('Conflict at %s' %
'.'.join(path + [str(key)]))
else:
left[key] = right[key]
return left | cb313f153225af41626885ae0ee066215dce3b0e | 704,664 |
import hashlib
def get_url_gravatar(email):
"""
Obtenemos una url de gravatar
"""
m = hashlib.md5()
m.update(email.encode('utf-8'))
url = "http://www.gravatar.com/avatar/{0}.jpg?s=300".format(m.hexdigest())
return url | bf48d903445869ee91c685dd1b84e11034dc528c | 704,665 |
def get_index_image():
"""Formats html.
Returns:
Modified index.html content
"""
return """<!DOCTYPE HTML><html lang="en-us">
<head>
</head>
<body style='margin:0'>
<img src='image.[[image_ext]]'>
</body>
</html>""" | 41ea7fbc31e49879216e46083b102294edb5c76f | 704,666 |
def merge_schema(original: dict, other: dict) -> dict:
"""Merge two schema dictionaries into single dict
Args:
original (dict): Source schema dictionary
other (dict): Schema dictionary to append to the source
Returns:
dict: Dictionary value of new merged schema
"""
source = original.copy()
for key, value in other.items():
if key not in source:
source[key] = value
else:
if isinstance(value, list):
source[key].extend(value)
elif isinstance(value, dict):
source[key] = merge_schema(source[key], value)
else:
source[key] = value
return source | 6425b64e6ab166ac14afc2e47392745903b8fd12 | 704,667 |
import hashlib
def hash160(s: bytes) -> bytes:
"""
sha256 followed by ripemd160
:param s: data
:return: hashed data
"""
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest() | 7b18fcdf51db707a17d5408c7b364818a6c5ee0c | 704,668 |
import re
def valid_email(email):
"""Check for a valid email address.
Args:
email (str): Email.
Returns:
bool: Return True if in valid email format and False if not.
"""
return bool(re.match('^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$', email)) | 01c343008229fb2fdf2af3a9a74f3059930696eb | 704,669 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.