content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def listtimes(list, c):
"""multiplies the elements in the list by the given scalar value c"""
ret = []
for i in range(0, len(list)):
ret.extend([list[i]]*c);
return ret; | 8aef63677a1a926f355644187d58b47e437e152c | 1,873 |
def eval_f(f, xs):
"""Takes a function f = f(x) and a list xs of values that should be used as arguments for f.
The function eval_f should apply the function f subsequently to every value x in xs, and
return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the
function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)]."""
return [f(x) for x in xs]
# alternatively: return list(map(f, xs)) | 00c6ed7fc59b213a3ec9fec9feeb3d91b1522061 | 1,874 |
import io
def unpack_text_io_wrapper(fp, encoding):
"""
If *fp* is a #io.TextIOWrapper object, this function returns the underlying
binary stream and the encoding of the IO-wrapper object. If *encoding* is not
None and does not match with the encoding specified in the IO-wrapper, a
#RuntimeError is raised.
"""
if isinstance(fp, io.TextIOWrapper):
if fp.writable() and encoding is not None and fp.encoding != encoding:
msg = 'TextIOWrapper.encoding({0!r}) != {1!r}'
raise RuntimeError(msg.format(fp.encoding, encoding))
if encoding is None:
encoding = fp.encoding
fp = fp.buffer
return fp, encoding | f2c93babab4bff1f08e6fe5c04fbd97dd1ee8a84 | 1,878 |
def scale_bounding_box(bounding_box,scale):
"""Scales bounding box coords (in dict from {x1,y1,x2,y2}) by x and y given by sclae in dict form {x,y}"""
scaled_bounding_box = {
"x1" : int(round(bounding_box["x1"]*scale["x"]))
,"y1" : int(round(bounding_box["y1"]*scale["y"]))
,"x2" : int(round(bounding_box["x2"]*scale["x"]))
,"y2" : int(round(bounding_box["y2"]*scale["y"]))
}
return scaled_bounding_box | 8aa374537ed2ae3ae2324bd8a4819e981f281b71 | 1,880 |
import re
def _strip_build_number(api_version):
"""Removes the build number component from a full api version string."""
match = re.match(r"^([A-Z]+-)?([0-9]+)(\.[0-9]+){2}$", api_version)
if match:
return api_version[:match.start(3)]
# if there aren't exactly 3 version number components, just leave it unchanged
return api_version | 20d8023281f05dfcb8c9fdd021b77796c72e1001 | 1,886 |
import re
def sort_special_vertex_groups(vgroups,
special_vertex_group_pattern='STYMO:',
global_special_vertex_group_suffix='Character'):
"""
Given a list of special vertex group names, all with the prefix of
special_vertex_group_pattern, selects all that start with global_special_vertex_group_suffix
and puts them at the start of the list. This enables e.g. to easily define
top-level vertex groups that always go first, followed by details that
overwrite top level assignments.
"""
global_vg_name_pattern = special_vertex_group_pattern + \
global_special_vertex_group_suffix
first = []
last = []
for g in vgroups:
if re.match(global_vg_name_pattern, g) is not None:
first.append(g)
else:
last.append(g)
first.sort()
last.sort()
first.extend(last)
return first | 0cc8f0992553e5da5b37ea9a9886996cb9013582 | 1,895 |
import hashlib
def obtain_file_hash(path, hash_algo="md5"):
"""Obtains the hash of a file using the specified hash algorithm
"""
hash_algo = hashlib.sha256() if hash_algo=="sha256" else hashlib.md5()
block_size = 65535
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(block_size),b''):
hash_algo.update(chunk)
return hash_algo.hexdigest() | daa996339c638eaab4f3d067dcaaa4b865a6f923 | 1,896 |
def keras_decay(step, decay=0.0001):
"""Learning rate decay in Keras-style"""
return 1. / (1. + decay * step) | f26f1f100ecf1622d6da9958d0a6cd95a37b8b2a | 1,897 |
import collections
def _build_pep8_output(result):
"""
Build the PEP8 output based on flake8 results.
Results from both tools conform to the following format:
<filename>:<line number>:<column number>: <issue code> <issue desc>
with some issues providing more details in the description within
parentheses.
:param result: output from flake8
:returns: list of flake8 output lines by error
"""
# Aggregate individual errors by error
_dict = collections.defaultdict(list)
for line in str(result).split("\n"):
if line:
# Preserve only the code and brief description for each issue to
# facilitate aggregating the results. For example,
#
# E501 line too long (178 > 79 characters) -> E501 line too long
# E303 too many blank lines (4) -> E303 too many blank lines
parts = line.replace("(", ":").split(":")
line_num, col_num, base_issue = parts[1:4]
# Strip the whitespace around the base <issue code> <description>.
#
# Also restore the missing colon, stripped above, if the issue
# was 'missing whitespace' surrounding a colon.
issue = base_issue.strip()
key = "{}:'".format(issue) if issue.endswith("after '") else issue
_dict[key].append("{} ({})".format(line_num, col_num))
# Build the output as one issue per entry
return ["{}: {}".format(k, ", ".join(_dict[k])) for k in
sorted(_dict.keys())] | a4abda2f9d3a2d9b3524c60429b047cbfe0285d9 | 1,898 |
def form_value(request, entity, attribute):
"""
Return value from request params or the given entity.
:param request: Pyramid request.
:param entity: Instance to get attribute from if it isn't found in the request
params.
:param str attribute: Name of attribute to search for in the request params or
on as an attribute of the given entity.
"""
# Check for contains, because we want the request value even if it's empty
if attribute in request.params:
return request.params.get(attribute, '')
if entity:
# Don't provide a default value, because we want to make attribute typos clear
return getattr(entity, attribute)
return '' | 1daea77474dae5a1cb6fdab0b075a5b2f5c40865 | 1,899 |
def check_in_image(paste_image_location, paste_image_size, canvas_image_size):
"""Checks whether the location for the pasted image is within the canvas.
Args:
paste_image_location: a namedtuple of utils.XY, with 'x' and 'y' coordinates
of
the center of the image we want to paste.
paste_image_size: a namedtuple of utils.XY, with 'x' and 'y' coordinates
corresponding to the size of the image we are pasting.
canvas_image_size: the size of the canvas that we are pasting the image to.
Returns:
True if the pasted image would lie within the canvas, False otherwise.
"""
offset_x = int(paste_image_size.x / 2) + 1
offset_y = int(paste_image_size.y / 2) + 1
if (paste_image_location.x + offset_x > canvas_image_size or
paste_image_location.x - offset_x < 1 or
paste_image_location.y + offset_y > canvas_image_size or
paste_image_location.y - offset_y < 1):
return False
return True | 173ff3ca7961bff34237512990fb2f103dd7ddc9 | 1,901 |
def event_message(iden, event):
"""Return an event message."""
return {"id": iden, "type": "event", "event": event} | bfc3fca17a9ad8d3767853c82c5453328d4c07e3 | 1,905 |
def scale(data, new_min, new_max):
"""Scales a normalised data series
:param data: The norrmalised data series to be scaled
:type data: List of numeric values
:param new_min: The minimum value of the scaled data series
:type new_min: numeric
:param new_max: The new maximum of the scaled data series
:type new_max: numeric
:return: A scaled data series
:rtype: list
"""
return [(x*(new_max-new_min))+new_min for x in data] | 3e7720ae90cfdbef1253dbfa39b3e4a10fc118bb | 1,913 |
def get_same_padding(kernel_size: int, stride: int, dilation: int) -> int:
"""Calculates the padding size to obtain same padding.
Same padding means that the output will have the
shape input_shape / stride. That means, for
stride = 1 the output shape is the same as the input,
and stride = 2 gives an output that is half of the
input shape.
Args:
kernel_size : convolution kernel size. Only tested to be correct with odd values.
stride : convolution stride
dilation : convolution dilation
Raises:
ValueError: Only stride or dilation may be greater than 1
Returns:
padding value to obtain same padding.
"""
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
if dilation > 1:
return (dilation * (kernel_size - 1) + 1) // 2
return kernel_size // 2 | 12548482e855dcfc627c5b0a6ccf69ad4a74b39b | 1,914 |
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a | 56d3e0dce01cfc4348ae115de81abb55ec85eb56 | 1,916 |
def legislature_to_number(leg):
"""
Takes a full session and splits it down to the values for
FormatDocument.asp.
session = '49th-1st-regular'
legislature_to_number(session) --> '49Leg/1s'
"""
l = leg.lower().split('-')
return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0]) | cffeeea2bad17d9dadcfd75d70417824c7fe3396 | 1,918 |
import math
def n_permutations(n, r=None):
"""Number of permutations (unique by position)
:param n: population length
:param r: sample length
:return: int
"""
if r is None:
r = n
if n < 0 or r < 0:
raise ValueError("n and r must be positive")
if n == 0 or r > n:
return 0
return math.factorial(n) // math.factorial(n - r) | 441081c534c07bb98b6a32cce4c87d64b030a5a7 | 1,920 |
def get_wl_band(radar_frequency):
"""Returns integer corresponding to radar frequency.
Args:
radar_frequency (float): Radar frequency (GHz).
Returns:
int: 0=35GHz radar, 1=94Ghz radar.
"""
return 0 if (30 < radar_frequency < 40) else 1 | cf2eaa12f111f7ad6751fb31f58e0bc01666494a | 1,923 |
import hashlib
def md5hash(string):
"""
Return the MD5 hex digest of the given string.
"""
return hashlib.md5(string).hexdigest() | cfc0d44c3c84fb08d277d7b397a5aca453025d96 | 1,924 |
import math
def _fcn_mg_joint_pos(t, q_init, q_end, t_strike_end):
"""Helper function for `create_mg_joint_pos_policy()` to fit the `TimePolicy` scheme"""
return ((q_end - q_init) * min(t / t_strike_end, 1) + q_init) / 180 * math.pi | 892a494ea5ee2033d2f29efe7400bce8aab30c1c | 1,932 |
import re
def rex_coverage(patterns, example_freqs, dedup=False):
"""
Given a list of regular expressions and a dictionary of examples
and their frequencies, this counts the number of times each pattern
matches a an example.
If ``dedup`` is set to ``True``, the frequencies are ignored, so that only
the number of keys is returned.
"""
results = []
for p in patterns:
p = '%s%s%s' % ('' if p.startswith('^') else '^',
p,
'' if p.endswith('$') else '$')
r = re.compile(p, re.U)
if dedup:
results.append(sum(1 if re.match(r, k) else 0
for k in example_freqs))
else:
results.append(sum(n if re.match(r, k) else 0
for (k, n) in example_freqs.items()))
return results | a9ac988348d1fa037508b0a2b6c71e077ca41627 | 1,933 |
import importlib
def import_module_attribute(function_path):
"""Import and return a module attribute given a full path."""
module, attribute = function_path.rsplit(".", 1)
app_module = importlib.import_module(module)
return getattr(app_module, attribute) | ce2647bb193c2a6c07949073f7c0d142ee8cd1b5 | 1,936 |
def make_webhdfs_url(host, user, hdfs_path, op, port=50070):
""" Forms the URL for httpfs requests.
INPUT
-----
host : str
The host to connect to for httpfs access to HDFS. (Can be 'localhost'.)
user : str
The user to use for httpfs connections.
hdfs_path : str
The full path of the file or directory being checked.
op : str
The httpfs operation string. E.g., 'GETFILESTATUS'.
port : int
The port to use for httpfs connections.
OUTPUT
------
str : The string to use for an HTTP request to httpfs.
"""
url = 'http://' + host + ':' + str(port) + '/webhdfs/v1'
url += hdfs_path + '?user.name=' + user + '&op=' + op
return url | c4899d75fd54558c6216889cbc749f5d0fe403df | 1,938 |
def _scale_value_to_rpm(value, total):
"""Scale value to reads per million"""
return value * 1 / (total / 1e6) | c3a49c8df8cbb22bd055a2f8076065463041bb72 | 1,940 |
def get_results_from_firebase(firebase):
"""
The function to download all results from firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
results : dict
The results in a dictionary with the following format:
{
"task_id" {
"user1_id": {
"data": {...}
},
"user2_id": {
"data": {...}
},
}
}
"""
fb_db = firebase.database()
results = fb_db.child("results").get().val()
return results | ca06c24367d778d4b601eab6fa31009fe6ecb372 | 1,943 |
def GetSourceFile(file, sourcepath):
"""Return a relative file if it is embedded in a path."""
for root in sourcepath:
if file.find(root) == 0:
prefix_length = len(root)
if not root.endswith('/'):
prefix_length += 1
relative_file = file[prefix_length:]
return relative_file
return None | b241497131c3595f78ebf9d1481c8d9d50887e5a | 1,944 |
def header_lines(filename):
"""Read the first five lines of a file and return them as a list of strings."""
with open(filename, mode='rb') as f:
return [f.readline().decode().rstrip() for _ in range(5)] | 35056152c1566ea2d14452308f00d6903b6e4dff | 1,952 |
import threading
def run_with_timeout(proc, timeout, input=None):
"""
Run Popen process with given timeout. Kills the process if it does
not finish in time.
You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise
the output will be None.
The returncode is 999 if the process was killed.
:returns: (returncode, stdout string, stderr string)
"""
output = []
def target():
output.extend(proc.communicate(input))
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
killed = False
thread.join(timeout)
if thread.is_alive():
proc.terminate()
killed = True
thread.join()
returncode = proc.returncode
if killed:
returncode = 999
return returncode, output[0], output[1] | 414e18dae8f31b20c472f7da14475f8da5761781 | 1,956 |
import inspect
def requires_request_arg(method):
"""
Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show,
get_context and render_html all accepted both 'request' and 'parent_context' as arguments
"""
try:
# see if this is a pre-2.15 get_url method that takes both request and context kwargs
inspect.signature(method).bind({})
except TypeError:
return True
else:
return False | 0ec09e34c04d4d54762051b01af8c80754d47125 | 1,958 |
def parse_api_error(response):
"""
Parse the error-message from the API Response.
Assumes, that a check if there is an error present was done beforehand.
:param response: Dict of the request response ([imdata][0][....])
:type response: ``dict``
:returns: Parsed Error-Text
:rtype: ``str``
"""
if "error" in response["imdata"][0]:
return (
"API-Errorcode "
+ str(response["imdata"][0]["error"]["attributes"]["code"])
+ ": "
+ str(response["imdata"][0]["error"]["attributes"]["text"])
)
else:
return "Unparseable: " + str(response) | acc4256b3245e3e2c10e3ba998bf577e0f51a33e | 1,961 |
def sanitise_utf8(s):
"""Ensure an 8-bit string is utf-8.
s -- 8-bit string (or None)
Returns the sanitised string. If the string was already valid utf-8, returns
the same object.
This replaces bad characters with ascii question marks (I don't want to use
a unicode replacement character, because if this function is doing anything
then it's likely that there's a non-unicode setup involved somewhere, so it
probably wouldn't be helpful).
"""
if s is None:
return None
try:
s.decode("utf-8")
except UnicodeDecodeError:
return (s.decode("utf-8", 'replace')
.replace(u"\ufffd", u"?")
.encode("utf-8"))
else:
return s | 11b864ade1c36e2b42ffbdd76ee2851f01ca7803 | 1,962 |
def _in_delta(value, target_value, delta) -> bool:
"""
Check if value is equal to target value within delta
"""
return abs(value - target_value) < delta | 92ab62a381fc1cfc6bbb82635f196ec4498babf4 | 1,963 |
def unorm_to_byte(x):
"""float x in [0, 1] to an integer [0, 255]"""
return min(int(256 * x), 255) | a6870a339b9b0d5466962a9129c717876d8d0a50 | 1,966 |
def _truncate_and_pad_token_ids(token_ids, max_length):
"""Truncates or pads the token id list to max length."""
token_ids = token_ids[:max_length]
padding_size = max_length - len(token_ids)
if padding_size > 0:
token_ids += [0] * padding_size
return token_ids | a8f29fdbc99c3dcac42b9275037d3a3c39c22e12 | 1,970 |
def castep_geom_count(dot_castep):
"""Count the number of geom cycles"""
count = 0
with open(dot_castep) as fhandle:
for line in fhandle:
if 'starting iteration' in line:
count += 1
return count | 6a619b5853a02a8c118af1fc19da0d803941c84f | 1,971 |
def build_audit_stub(obj):
"""Returns a stub of audit model to which assessment is related to."""
audit_id = obj.audit_id
if audit_id is None:
return None
return {
'type': 'Audit',
'id': audit_id,
'context_id': obj.context_id,
'href': '/api/audits/%d' % audit_id,
'issue_tracker': obj.audit.issue_tracker,
} | 705f066975bf9dae8704944c71eeb3e313cf445f | 1,976 |
def render_cells(cells, width=80, col_spacing=2):
"""Given a list of short (~10 char) strings, display these aligned in
columns.
Example output::
Something like this can be
used to neatly arrange long
sequences of values in a
compact format.
Parameters
----------
cells : [(strlen, str), ...]
Gives the cells to print as tuples giving the strings length in visible
characters and the string to display.
width : int
The width of the terminal.
col_spacing : int
Size of the gap to leave between columns.
"""
# Special case (since max below will fail)
if len(cells) == 0:
return ""
# Columns should be at least as large as the largest cell with padding
# between columns
col_width = max(strlen for strlen, s in cells) + col_spacing
lines = [""]
cur_length = 0
for strlen, s in cells:
# Once line is full, move to the next
if cur_length + strlen > width:
lines.append("")
cur_length = 0
# Add the current cell (with spacing)
lines[-1] += s + (" "*(col_width - strlen))
cur_length += col_width
return "\n".join(map(str.rstrip, lines)) | 714b915430be84980c3a9b74f3c5b2cb89b6acba | 1,977 |
import six
def range_join(numbers, to_str=False, sep=",", range_sep=":"):
"""
Takes a sequence of positive integer numbers given either as integer or string types, and
returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop
values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent
to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*.
Example:
.. code-block:: python
range_join([1, 2, 3, 5])
# -> [(1, 3), (5,)]
range_join([1, 2, 3, 5, 7, 8, 9])
# -> [(1, 3), (5,), (7, 9)]
range_join([1, 2, 3, 5, 7, 8, 9], to_str=True)
# -> "1:3,5,7:9"
"""
if not numbers:
return "" if to_str else []
# check type, convert, make unique and sort
_numbers = []
for n in numbers:
if isinstance(n, six.string_types):
try:
n = int(n)
except ValueError:
raise ValueError("invalid number format '{}'".format(n))
if isinstance(n, six.integer_types):
_numbers.append(n)
else:
raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n))
numbers = sorted(set(_numbers))
# iterate through numbers, keep track of last starts and stops and fill a list of range tuples
ranges = []
start = stop = numbers[0]
for n in numbers[1:]:
if n == stop + 1:
stop += 1
else:
ranges.append((start,) if start == stop else (start, stop))
start = stop = n
ranges.append((start,) if start == stop else (start, stop))
# convert to string representation
if to_str:
ranges = sep.join(
(str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r))
for r in ranges
)
return ranges | c1b2d10ec1b47fa5c917fccead2ef8d5fc506370 | 1,981 |
import inspect
def _get_kwargs(func, locals_dict, default=None):
"""
Convert a function's args to a kwargs dict containing entries that are not identically default.
Parameters
----------
func : function
The function whose args we want to convert to kwargs.
locals_dict : dict
The locals dict for the function.
default : object
Don't include arguments whose values are this object.
Returns
-------
dict
The non-default keyword args dict.
"""
return {n: locals_dict[n] for n in inspect.signature(func).parameters
if locals_dict[n] is not default} | ae0a06cb4e17b5512a03e89d7ca2119c58ea762b | 1,982 |
def legendre(a, p):
"""Legendre symbol"""
tmp = pow(a, (p-1)//2, p)
return -1 if tmp == p-1 else tmp | 66b86dce23ae10ba226ffb19942b98550bb7c218 | 1,984 |
def disemvowel(sentence):
"""Disemvowel:
Given a sentence, return the sentence with all vowels removed.
>>> disemvowel('the quick brown fox jumps over the lazy dog')
'th qck brwn fx jmps vr th lzy dg'
"""
vowels = ('a','e','i','o','u')
for x in sentence:
if x in vowels:
sentence = sentence.replace(x,"")
return sentence
pass | d9b6d873c29e82cb65e43f71e2b6298af18b25fd | 1,987 |
def abs(rv):
"""
Returns the absolute value of a random variable
"""
return rv.abs() | 6bf2f8420f8a5e883dfddfc9a93106662a8f1a74 | 1,990 |
def escape(instruction):
"""
Escape used dot graph characters in given instruction so they will be
displayed correctly.
"""
instruction = instruction.replace('<', r'\<')
instruction = instruction.replace('>', r'\>')
instruction = instruction.replace('|', r'\|')
instruction = instruction.replace('{', r'\{')
instruction = instruction.replace('}', r'\}')
instruction = instruction.replace(' ', ' ')
return instruction | 936ed1d6c55650bf5f9ce52af8f113a9d466a534 | 1,991 |
import itertools
def flatten(colours):
"""Flatten the cubular array into one long list."""
return list(itertools.chain.from_iterable(itertools.chain.from_iterable(colours))) | 41576ef947354c30d1995fefdd30ad86bddbfe6f | 1,994 |
def parse_fastq(fh):
""" Parse reads from a FASTQ filehandle. For each read, we
return a name, nucleotide-string, quality-string triple. """
reads = []
while True:
first_line = fh.readline()
if len(first_line) == 0:
break # end of file
name = first_line[1:].rstrip()
seq = fh.readline().rstrip()
fh.readline() # ignore line starting with +
qual = fh.readline().rstrip()
reads.append((name, seq, qual))
return reads | d33d3efebdd1c5f61e25397328c6b0412f1911dd | 1,996 |
def coalesce(*values):
"""Returns the first not-None arguement or None"""
return next((v for v in values if v is not None), None) | 245177f43962b4c03c2347725a2e87f8eb5dc08a | 1,997 |
def allowed_file(filename, extensions):
"""
Check file is image
:param filename: string
:param extensions: list
:return bool:
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extensions | c61e77205e40cd05fc0ea6e4e4f770180f15e6d8 | 2,005 |
def payoff_blotto_sign(x, y):
"""
Returns:
(0, 0, 1) -- x wins, y loss;
(0, 1, 0) -- draw;
(1, 0, 0)-- x loss, y wins.
"""
wins, losses = 0, 0
for x_i, y_i in zip(x, y):
if x_i > y_i:
wins += 1
elif x_i < y_i:
losses += 1
if wins > losses:
return (0, 0, 1)
elif wins < losses:
return (1, 0, 0)
return (0, 1, 0) | 5a34ce81fdff8f90ee715d9c82fc55abf7eb2904 | 2,006 |
def to_base_str(n, base):
"""Converts a number n into base `base`."""
convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if n < base:
return convert_string[n]
else:
return to_base_str(n // base, base) + convert_string[n % base] | bc137d41c9543ef1a201f4bb14234fa277067a77 | 2,007 |
def number_of_photons(i,n=6):
"""Check if number of photons in a sample is higher than n (default value is 6)"""
bitstring = tuple(i)
if sum(bitstring) > n:
return True
else:
return False | 6c7cfea354aa4948d2c94469708f250e6d5b659d | 2,008 |
from typing import Any
from typing import get_type_hints
from typing import get_origin
from typing import Union
from typing import get_args
def get_repr_type(type_: Any) -> Any:
"""Parse a type and return an representative type.
Example:
All of the following expressions will be ``True``::
get_repr_type(A) == A
get_repr_type(Annotated[A, ...]) == A
get_repr_type(Union[A, B, ...]) == A
get_repr_type(Optional[A]) == A
"""
class Temporary:
__annotations__ = dict(type=type_)
unannotated = get_type_hints(Temporary)["type"]
if get_origin(unannotated) is Union:
return get_args(unannotated)[0]
return unannotated | fe74d79c1fcc74ff86d0c41db3f8f9da37dbf69a | 2,011 |
import csv
def parse_latency_stats(fp):
"""
Parse latency statistics.
:param fp: the file path that stores the statistics
:returns an average latency in milliseconds to connect a pair of initiator and responder clients
"""
latency = []
with open(fp) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time'])
for row in csvreader:
latency.append(float(row['time']) * 1000)
return sum(latency) / len(latency) | c50c730b5c5bea704bd682d003baa0addfd7ee89 | 2,012 |
def _get_raster_extent(src):
"""
extract projected extent from a raster dataset
(min_x, max_x, min_y, max_y)
Parameters
----------
src : gdal raster
Returns
-------
(min_x, max_x, min_y, max_y)
"""
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
return ulx, lrx, lry, uly | 49ed0b3c583cbfa5b9ecbc96d94aec42aeba3a32 | 2,014 |
def score_to_rating_string(score):
"""
Convert score to rating
"""
if score < 1:
rating = "Terrible"
elif score < 2:
rating = "Bad"
elif score < 3:
rating = "OK"
elif score < 4:
rating = "Good"
else:
rating = "Excellent"
return rating | 0c6a5aba0cb220a470f2d40c73b873d11b1a0f98 | 2,018 |
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"):
"""Moving Average Convergence Divergence
Parameters:
-----------
df : DataFrame
Input dataframe.
ewa_short : int
Exponentially weighted average time-window for a short time-span.
A common choice for the short time-window is 12 intervals.
ewa_long : int
Exponentially weighted average time-window for a longer time-span.
A common choice for the long time-window is 26 intervals.
ewa_signal : int
Time-window for the EWA of the difference between long and short
averages.
price_col : str
Column name in `df` used for defining the current indicator (e.g. "open",
"close", etc.)
Returns:
--------
macd_ts : Series
Moving average convergence-divergence indicator for the time series.
"""
ewa_short = int(ewa_short)
ewa_long = int(ewa_long)
ewa_signal = int(ewa_signal)
ewa12 = df[price_col].ewm(span=ewa_short).mean()
ewa26 = df[price_col].ewm(span=ewa_long).mean()
macd_ts = ewa12 - ewa26
signal_line = macd_ts.ewm(span=ewa_signal).mean()
return macd_ts - signal_line, 'stationary' | 3140f67371394244b66b9048d273e0d5fee5e471 | 2,020 |
def __zedwalther(kin):
"""
Calculate the z-parameter for the Walther equation (ASTM D341).
Parameters
----------
kin: scalar
The kinematic viscosity of the lubricant.
Returns
-------
zed: scalar
The z-parameter.
"""
zed = kin + 0.7 + 10 ** (-1.47 - 1.84 * kin - 0.51 * kin ** 2)
return zed | d01a716da03230436c5f511cc65f9e7c96732d99 | 2,021 |
def parse_anchor_body(anchor_body):
"""
Given the body of an anchor, parse it to determine what topic ID it's
anchored to and what text the anchor uses in the source help file.
This always returns a 2-tuple, though based on the anchor body in the file
it may end up thinking that the topic ID and the text are identical.
"""
c_pos = anchor_body.find(':')
if c_pos >= 0:
id_val = anchor_body[:c_pos]
anchor_body = anchor_body[c_pos+1:]
id_val = id_val or anchor_body
else:
id_val = anchor_body
return (id_val.casefold().rstrip(), anchor_body.strip()) | 5e86ac489727ec4da69f7ca14152cb79da541f3a | 2,023 |
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result | 3df4edf59ea473ad7b832256443a1e4e8c7e0ce9 | 2,024 |
def set_namespace_root(namespace):
"""
Stores the GO ID for the root of the selected namespace.
Parameters
----------
namespace : str
A string containing the desired namespace. E.g. biological_process, cellular_component
or molecular_function.
Returns
-------
list
The list of GO ID's of the root terms of the selected namespace.
"""
if namespace == 'biological_process':
namespace_list = ['GO:0008150']
elif namespace == 'cellular_component':
namespace_list = ['GO:0005575']
elif namespace == 'molecular_function':
namespace_list = ['GO:0003674']
else:
namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674']
return namespace_list | 2719b2766912ad8caf3427513c7affa1cdb92eb3 | 2,028 |
def seconds_to_hours(s):
"""Convert seconds to hours:
:param s: Number of seconds
:type s: Float
:return: Number of hours
:rtype: Float
"""
return float(s) / 3600 | 9bf9a7b408bf49714c4e873f59ec5433cc4f1ecf | 2,037 |
def contigs_n_bases(contigs):
"""Returns the sum of all n_bases of contigs."""
return sum(c.n_bases for c in contigs) | 57bbc1712739bf8501ad95a5aa72adece6803bc3 | 2,038 |
from datetime import datetime
def datetime_to_fractional_year(input: datetime) -> float:
"""Converts a Python datetime object to a fractional year."""
start = date(input.year, 1, 1).toordinal() # type: ignore
year_length = date(input.year + 1, 1, 1).toordinal() - start # type: ignore
return input.year + (input.toordinal() - start) / year_length | 576361cad890f709d6d02c56f53c43529211fb2b | 2,043 |
from typing import Union
import pathlib
def get_path(obj: Union[str, pathlib.Path]) -> pathlib.Path:
"""Convert a str into a fully resolved & expanded Path object.
Args:
obj: obj to convert into expanded and resolved absolute Path obj
"""
return pathlib.Path(obj).expanduser().resolve() | 88641ea4a6ae54aea12b7d0c9afca8d6f475b8d0 | 2,045 |
import json
import yaml
def load_dict(file_name):
"""
Reads JSON or YAML file into a dictionary
"""
if file_name.lower().endswith(".json"):
with open(file_name) as _f:
return json.load(_f)
with open(file_name) as _f:
return yaml.full_load(_f) | a098a8582e22fba2c9c2b72fbf3e3f769f740a98 | 2,049 |
def units(legal_codes):
"""
Return sorted list of the unique units for the given
dictionaries representing legal_codes
"""
return sorted(set(lc["unit"] for lc in legal_codes)) | 85803ecb3d1f51c058c959b7e060c3cb5263f6a3 | 2,053 |
import re
def parse_discount(element):
"""Given an HTML element, parse and return the discount."""
try:
# Remove any non integer characters from the HTML element
discount = re.sub("\D", "", element)
except AttributeError:
discount = "0"
return discount | 658f8a6bef8ba4bf82646a10c495904c03a717c7 | 2,054 |
import re
def _cleanse_line(line, main_character):
"""
Cleanse the extracted lines to remove formatting.
"""
# Strip the line, just in case.
line = line.strip()
# Clean up formatting characters.
line = line.replace('\\' , '') # Remove escape characters.
line = line.replace('[mc]', main_character) # Standardize MC name.
line = re.sub(r'{/?i}' , '*', line) # Convert italics to Markdown.
line = re.sub(r'{cps=\d+}', '' , line) # Remove scroll speed formatting.
return line | 87177c557ab89b77c63cc1df10874e52606258a7 | 2,057 |
import string
import random
def generate_random_string( length ):
"""Generate a random string of a given length containing uppercase and lowercase letters, digits and ASCII punctuation."""
source = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return ''.join( random.choice( source ) for i in range( length ) ) | 9bb1ee7e21f27231e498f48bff505d963565f582 | 2,061 |
def find(x):
"""
Find the representative of a node
"""
if x.instance is None:
return x
else:
# collapse the path and return the root
x.instance = find(x.instance)
return x.instance | 5143e9d282fb1988d22273996dae36ed587bd9d2 | 2,062 |
def is_callable(x):
"""Tests if something is callable"""
return callable(x) | 72584deb62ac5e34e69325466236792c5299a51b | 2,064 |
import torch
def pad_col(input, val=0, where='end'):
"""Addes a column of `val` at the start of end of `input`."""
if len(input.shape) != 2:
raise ValueError(f"Only works for `phi` tensor that is 2-D.")
pad = torch.zeros_like(input[:, :1])
if val != 0:
pad = pad + val
if where == 'end':
return torch.cat([input, pad], dim=1)
elif where == 'start':
return torch.cat([pad, input], dim=1)
raise ValueError(f"Need `where` to be 'start' or 'end', got {where}") | 77caa028bb76da922ba12492f077811d2344c2a9 | 2,068 |
def _DX(X):
"""Computes the X finite derivarite along y and x.
Arguments
---------
X: (m, n, l) numpy array
The data to derivate.
Returns
-------
tuple
Tuple of length 2 (Dy(X), Dx(X)).
Note
----
DX[0] which is derivate along y has shape (m-1, n, l).
DX[1] which is derivate along x has shape (m, n-1, l).
"""
return (X[1:, :, :] - X[:-1, :, :], # D along y
X[:, 1:, :] - X[:, 0:-1, :]) # D along x | 4aff05c2c25089c9f93b762a18dad42b0142db09 | 2,069 |
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a | 5a68495e507e9a08a9f6520b83a912cf579c6688 | 2,071 |
def show_counts(input_dict):
"""Format dictionary count information into a string
Args:
input_dict (dictionary): input keys and their counts
Return:
string: formatted output string
"""
out_s = ''
in_dict_sorted = {k: v for k, v in sorted(input_dict.items(), key=lambda item: item[1], reverse=True)}
for idx, (k, v) in enumerate(in_dict_sorted.items()):
out_s += '\t{}:\t{} ({})\n'.format(idx, k, v)
out_s += '\n'
return out_s | 078d1f7599b22741f474c0e6d1b02f44edfc1f9b | 2,075 |
from typing import List
def already_exists(statement: str, lines: List[str]) -> bool:
"""
Check if statement is in lines
"""
return any(statement in line.strip() for line in lines) | 194d8c6c48609f5a2accacdb2ed0857815d48d1d | 2,078 |
import re
def _egg_link_name(raw_name: str) -> str:
"""
Convert a Name metadata value to a .egg-link name, by applying
the same substitution as pkg_resources's safe_name function.
Note: we cannot use canonicalize_name because it has a different logic.
"""
return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link" | 923ff815b600b95ccb5750a8c1772ee9156e53b2 | 2,083 |
def format_map(mapping, st):
"""
Format string st with given map.
"""
return st.format_map(mapping) | 462e0a744177d125db50739eac1f2e7a62128010 | 2,086 |
import re
def split_words_and_quoted_text(text):
"""Split string text by space unless it is
wrapped inside double quotes, returning a list
of the elements.
For example
if text =
'Should give "3 elements only"'
the resulting list would be:
['Should', 'give', '3 elements only']
"""
# using shlex
# return shlex.split(text)
# using re
result = list()
pattern = re.findall(r'\w+\s*|\".+?\"', text)
for char in pattern:
result.append(char.strip().replace('"', ''))
return result | befb31949d4c52fac96765fd78bc1b9d644282ba | 2,088 |
def scheduler(epoch):
"""Generating learning rate value for a given epoch.
inputs:
epoch = number of current epoch
outputs:
learning_rate = float learning rate value
"""
if epoch < 100:
return 1e-3
elif epoch < 125:
return 1e-4
else:
return 1e-5 | 916cbc12ff76b8d022a96c89083b8bd2a3078c69 | 2,089 |
def posts_completed(scraped_posts, limit):
"""Returns true if the amount of posts scraped from
profile has reached its limit.
"""
if len(scraped_posts) == limit:
return True
else:
return False | ff72474349a32f326b63b95070927c4b379be800 | 2,092 |
def get_zero_columns(matrix):
""" Returns a list of the columns which are all 0 """
rows = matrix.shape[0]
columns = matrix.shape[1]
result = []
for j in range(columns):
is_zero_column = True
for i in range(rows):
is_zero_column = is_zero_column and matrix[i, j] == 0.0
result.append(is_zero_column)
return result | 35694592f4155f710e5ed3c2148a138591cd683f | 2,093 |
def traditional_constants_icr_equation_empty_fixed(fixed_params, X_col):
""" Traditional ICR equation with constants from ACE consensus """
a = 450
tdd = X_col[0]
return a / tdd | 2931e4b3592a94690d98b0cb4cb90f712ff4a449 | 2,094 |
def sort_completions_key(completion):
"""
sort completions according to their type
Args:
completion (jedi.api.classes.Completion): completion
Returns:
int: sorting order
"""
if completion.type == "function":
return 2
elif completion.type == "instance":
return 1
else:
return 3 | 7bf767d908c83c11dafa5e0fd694bbb31a98c404 | 2,095 |
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git') | b1c3cec3d8cf3c7d3ffa5c405522b1a08754223b | 2,096 |
import math
def mylog10(x):
"""Return the base-10 logarithm of x."""
return math.log10(x) | d32113c16047175125e1b79c9ce0ea8822e4853c | 2,100 |
import json
def handler(event, context):
""" Lambda Handler.
Returns Hello World and the event and context objects
"""
print(event)
print(context)
return {
"body": json.dumps('Hello World!')
} | 561326fec784aa72a133b217f1e2cecaf12ec1ad | 2,104 |
def clf2D_slope_intercept(coef=None, intercept=None, clf=None):
"""
Gets the slop an intercept for the separating hyperplane of a linear
classifier fit on a two dimensional dataset.
Parameters
----------
coef:
The classification normal vector.
intercept:
The classifier intercept.
clf: subclass of sklearn.linear_model.base.LinearClassifierMixin
A sklearn classifier with attributes coef_ and intercept_
Output
------
slope, intercept
"""
if clf is not None:
coef = clf.coef_.reshape(-1)
intercept = float(clf.intercept_)
else:
assert coef is not None and intercept is not None
slope = - coef[0] / coef[1]
intercept = - intercept / coef[1]
return slope, intercept | 9376c34a3836ee028c4b0497e1088ddd50bb1fc6 | 2,107 |
def filter_bam_file(bamfile, chromosome, outfile):
"""
filter_bam_file uses samtools to read a <bamfile> and read only
the reads that are mapped to <chromosome>.
It saves the filtered reads into <outfile>.
"""
inputs = [bamfile]
outputs = [outfile]
options = {
'cores': 1,
'memory': '4g',
'account': 'NChain',
'walltime': '01:00:00'
}
directory = "/".join(outfile.split("/")[:-1])
spec = '''
source /com/extra/samtools/1.6.0/load.sh
mkdir -p {dirc}
samtools view -b {infile} {chrom} > {out}
'''.format(infile=bamfile, chrom=chromosome, out=outfile, dirc=directory)
return inputs, outputs, options, spec | 317e1283d4722483e4bc98080ef99abd9876d045 | 2,112 |
def iou(a, b):
""" Calculates intersection over union (IOU) over two tuples """
(a_x1, a_y1), (a_x2, a_y2) = a
(b_x1, b_y1), (b_x2, b_y2) = b
a_area = (a_x2 - a_x1) * (a_y2 - a_y1)
b_area = (b_x2 - b_x1) * (b_y2 - b_y1)
dx = min(a_x2, b_x2) - max(a_x1, b_x1)
dy = min(a_y2, b_y2) - max(a_y1, b_y1)
if (dx>=0) and (dy>=0):
overlap = dx * dy
iou = overlap / (a_area + b_area - overlap)
return iou
return 0 | 0e72d00a672c430cce69246cb7d7889ae41ae216 | 2,113 |
import json
def get_dict(str_of_dict: str, order_key='', sort_dict=False) -> list:
"""Function returns the list of dicts:
:param str_of_dict: string got form DB
(e.g. {"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...),
:param order_key: the key by which dictionaries will be sorted (required if flag 'sort_dict=True'),
:param sort_dict: flag for sorting the dictionary (boolean).
:return: list of dicts (e.g. [{"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...])"""
result_dict = list()
if str_of_dict:
result_dict = json.loads('[' + str_of_dict + ']')
if sort_dict and order_key:
try:
result_dict = sorted(result_dict, key=lambda i: i[order_key])
return result_dict
except KeyError:
return result_dict
return result_dict
else:
return result_dict | 81d20db2dbe929693994b5b94aa971850ef9c838 | 2,114 |
import hashlib
import struct
def get_richpe_hash(pe):
"""Computes the RichPE hash given a file path or data.
If the RichPE hash is unable to be computed, returns None.
Otherwise, returns the computed RichPE hash.
If both file_path and data are provided, file_path is used by default.
Source : https://github.com/RichHeaderResearch/RichPE
"""
if pe.RICH_HEADER is None:
return None
# Get list of @Comp.IDs and counts from Rich header
# Elements in rich_fields at even indices are @Comp.IDs
# Elements in rich_fields at odd indices are counts
rich_fields = pe.RICH_HEADER.values
if len(rich_fields) % 2 != 0:
return None
# The RichPE hash of a file is computed by computing the md5 of specific
# metadata within the Rich header and the PE header
md5 = hashlib.md5()
# Update hash using @Comp.IDs and masked counts from Rich header
while len(rich_fields):
compid = rich_fields.pop(0)
count = rich_fields.pop(0)
mask = 2 ** (count.bit_length() // 2 + 1) - 1
count |= mask
md5.update(struct.pack("<L", compid))
md5.update(struct.pack("<L", count))
# Update hash using metadata from the PE header
md5.update(struct.pack("<L", pe.FILE_HEADER.Machine))
md5.update(struct.pack("<L", pe.FILE_HEADER.Characteristics))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.Subsystem))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MajorLinkerVersion))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MinorLinkerVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorSubsystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorSubsystemVersion))
return md5.hexdigest() | 30e5437f36f76a6225eaba579d55218440ab46b9 | 2,115 |
def get_input(label, default=None):
"""Prompt the user for input.
:param label: The label of the prompt.
:param label: str
:param default: The default value.
:rtype: str | None
"""
if default:
_label = "%s [%s]: " % (label, default)
else:
_label = "%s: " % label
print("")
value = input(_label)
if not value:
return default
return value | 11de813f0fcfd16f1198299030656c07392f95c9 | 2,116 |
def updateDF(df, fields, id_patient):
"""
fields is a dictionary of column names and values.
The function updates the row of id_patient with the values in fields.
"""
for key in fields:
df.loc[df["id_patient"] == id_patient, key] = fields[key][0]
return df | 5ced64eca8d8736836f82dacd1750cb8ac612989 | 2,124 |
def gcd(num1: int, num2: int) -> int:
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while num2 != 0:
num1, num2 = num2, num1 % num2
return num1 | c53ff5be770570278f497d7ce2a2146a3ac3d9da | 2,125 |
def solid_polygon_info_(base_sides, printed=False):
"""Get information about a solid polygon from its side count."""
# Example: A rectangular solid (Each base has four sides) is made up of
# 12 edges, 8 vertices, 6 faces, and 12 triangles.
edges = base_sides * 3
vertices = base_sides * 2
faces = base_sides + 2
triangles = (base_sides - 2) * 2 + vertices
if printed:
print(f"Edges: {edges}\nVertices: {vertices}\nFaces: {faces}\nTriangles: {triangles}")
else:
return {"edges": edges,
"vertices": vertices,
"faces": faces,
"triangles": triangles} | a16bae9b82fd7a89332d5403359c2aa1eddf6cb4 | 2,129 |
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(label_info.values)
for label_info in dataset_info.labels
} | 929db286b3f7ee8917618e9f46feabdff630d3b2 | 2,133 |
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
"""
if isinstance(prefixes, str):
return word.split(prefixes)[-1]
for prefix in prefixes:
word = word.split(prefix)[-1]
return word | 6932e5605b11eee004a350c7f9be831d8bb7ca9d | 2,137 |
def isSol(res):
"""
Check if the string is of the type ai bj ck
"""
if not res or res[0] != 'a' or res[-1] != 'c':
return False
l = 0
r = len(res)-1
while res[l] == "a":
l+=1
while res[r] == "c":
r-=1
if r-l+1 <= 0:
return False
for x in res[l:r-l+1]:
if x != 'b':
return False
return True | 14030e52a588dc13029602e81a5f2068707bca17 | 2,138 |
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 68b7c48e5bd832a637e7a06353c48ffa09b449cd | 2,140 |
import torch
def huber_loss(x, delta=1.):
""" Standard Huber loss of parameter delta
https://en.wikipedia.org/wiki/Huber_loss
returns 0.5 * x^2 if |a| <= \delta
\delta * (|a| - 0.5 * \delta) o.w.
"""
if torch.abs(x) <= delta:
return 0.5 * (x ** 2)
else:
return delta * (torch.abs(x) - 0.5 * delta) | b3493eb9d4e38fa36f92db80dc52a47c32caf3c9 | 2,143 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.