content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def is_bounded(coord, shape):
"""
Checks if a coord (x,y) is within bounds.
"""
x, y = coord
g, h = shape
lesser = x < 0 or y < 0
greater = x >= g or y >= h
if lesser or greater:
return False
return True | 0a0b6921fcdf285c89b5021d113585ff38255013 | 702,332 |
from typing import Optional
def bool_to_int(bool_value: Optional[bool]) -> Optional[int]:
"""Cast bool to int value.
:param bool_value: some bool value
:return: int represenation
"""
if bool_value is None:
return bool_value
return int(bool_value) | fde6cda3dc8909638bb315451a09938224f2f306 | 702,333 |
from typing import Any
def arghash(args: Any, kwargs: Any) -> int:
"""Simple argument hash with kwargs sorted."""
sorted_args = tuple(
x if hasattr(x, "__repr__") else x for x in [*args, *sorted(kwargs.items())]
)
return hash(sorted_args) | c3e95c63831c958bb2a52cabad9f2ce576a4fed8 | 702,334 |
def _get_pcluster_version_from_stack(stack):
"""
Get the version of the stack if tagged.
:param stack: stack object
:return: version or empty string
"""
return next((tag.get("Value") for tag in stack.get("Tags") if tag.get("Key") == "Version"), "") | 86106e52ea6ef8780c8aa8f514e0708dd53fb8e3 | 702,336 |
def chunks(seq, size):
"""Breaks a sequence of bytes into chunks of provided size
:param seq: sequence of bytes
:param size: chunk size
:return: generator that yields tuples of sequence chunk and boolean that indicates if chunk is
the last one
"""
length = len(seq)
return ((seq[pos:pos + size], (pos + size < length))
for pos in range(0, length, size)) | 25abe8afdb032af0e2b10dbd3c03b369d862813f | 702,342 |
def evaluate(bounds, func):
"""
Evaluates simpsons rules on an array of values and a function pointer.
.. math::
\int_{a}^{b} = \sum_i ...
Parameters
----------
bounds: array_like
An array with a dimension of two that contains the starting and
ending points for the integrand.
func: function
A function being evaluted in this integral
Returns
-------
integral: float
The integral of function (func) between the bounds
"""
if len(bounds) != 2:
raise ValueError("Bounds should be a length of two, found %d." % len(bounds))
a = float(bounds[0])
b = float(bounds[1])
ya = func(a)
yb = func((a + b) / 2)
yc = func(b)
I = (b - a) * (ya + 4 * yb + yc) / 6.0
return I | af5102d37c1943420a0ff3fff980342ee8c9dad9 | 702,345 |
import torch
def instance_masks_to_semseg_mask(instance_masks, category_labels):
"""
Converts a tensor containing instance masks to a semantic segmentation mask.
:param instance_masks: tensor(N, T, H, W) (N = number of instances)
:param category_labels: tensor(N) containing semantic category label for each instance.
:return: semantic mask as tensor(T, H, W] with pixel values containing class labels
"""
assert len(category_labels) == instance_masks.shape[0], \
"Number of instances do not match: {}, {}".format(len(category_labels), len(instance_masks))
semseg_masks = instance_masks.long()
for i, label in enumerate(category_labels):
semseg_masks[i] = torch.where(instance_masks[i], label, semseg_masks[i])
# for pixels with differing labels, assign to the category with higher ID number (arbitrary criterion)
return semseg_masks.max(dim=0)[0] | 127c9b9ff3d1044b1c5e1e8650ed493f8a4bc6de | 702,347 |
import time
def fix_end_now(json):
"""Set end time to the time now if no end time is give"""
if 'end' not in json or json['end'] is None:
json['end'] = int(time.time())
return json | 2c126e00ac293c6a511cb86457d60536f1d690ef | 702,348 |
def topics_to_calldata(topics, bytes_per_topic=3):
"""Converts a list of topics to calldata.
Args:
topics (bytes[]): List of topics.
bytes_per_topic (int): Byte length of each topic.
Returns:
bytes: Topics combined into a single string.
"""
return b''.join(topic.to_bytes(bytes_per_topic, byteorder='little') for topic in topics) | 2d500016963934fdbc60b388632b817624d5ad8d | 702,349 |
def remove_basic_block_assembly(pydot_cfg):
"""
Remove assembly text from the CFG's basic blocks
"""
# Avoid graph and edge nodes, which are the 1st and 2nd nodes
nodes = pydot_cfg.get_nodes()[2:]
for n in nodes:
n.set_label("")
return pydot_cfg | 3ca5d7fcd46dc96bff92aea67b47afbe53accc40 | 702,350 |
def indent(yaml: str):
"""Add indents to yaml"""
lines = yaml.split("\n")
def prefix(line):
return " " if line.strip() else ""
lines = [prefix(line) + line for line in lines]
return "\n".join(lines) | 815babb29378f1cfac6ada8258322df92235fc9e | 702,356 |
def get_rbac_role_assigned(self) -> dict:
"""Get list of accessible menus based on the current session
permissions
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - rbacRole
- GET
- /rbac/role/menuAssigned
.. note::
Returns HTTP 204 Empty Response if nothing is assigned
:return: Returns dictionary of current session accessible menus
:rtype: dict
"""
return self._get("/rbac/role/menuAssigned") | 91d0dcbded36b857a0dc0ddfeaa2815516d162f6 | 702,357 |
def merge_import_policies(value, order=""):
"""
Merges and returns policy list for import.
If duplicates are found, only the most specific one will be kept.
"""
if not hasattr(value, "merged_import_policies"):
raise AttributeError("{value} has not merged import policies")
return value.merged_import_policies(order == "reverse") | adaaf5626e09022b36b69b332fb85788665c114e | 702,364 |
def _sort_student(name: str) -> str:
"""
Return the given student name in a sortable format.
Students are sorted by last name (i.e., last space-split chunk).
"""
return name.lower().split()[-1] | 92747346e0e6ded9715761b8907ccb8ab33da742 | 702,367 |
import re
def is_gcd_file(filename: str) -> bool:
"""Checks whether `filename` is a GCD file."""
if re.search('(gcd|geo)', filename.lower()):
return True
return False | 3022bd165683cde609d1f866c3ce655e84561dc4 | 702,368 |
import zipfile
def _extract_info(archive, info):
"""
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
"""
if isinstance(archive, zipfile.ZipFile):
fn = info.filename
is_dir = fn.endswith('/') or fn.endswith('\\')
out = archive.read(info)
if is_dir and out == b'':
return None
return out
info_file = archive.extractfile(info)
if info_file:
return info_file.read()
return None | 3014a9e85077f33522aaf466c9bef3bd020d252b | 702,373 |
def _space_all_but_first(s: str, n_spaces: int) -> str:
"""Pad all lines except the first with n_spaces spaces"""
lines = s.splitlines()
for i in range(1, len(lines)):
lines[i] = " " * n_spaces + lines[i]
return "\n".join(lines) | 36da5eb15a9ab5fa473831b5440ffa88495f6cac | 702,374 |
def fix_brackets(placeholder: str) -> str:
"""Fix the imbalanced brackets in placeholder.
When ptype is not null, regex matching might grab a placeholder with }
missing. This function fix the missing bracket.
Args:
placeholder: string placeholder of RuntimeParameter
Returns:
Placeholder with re-balanced brackets.
Raises:
RuntimeError: if left brackets are less than right brackets.
"""
lcount = placeholder.count('{')
rcount = placeholder.count('}')
if lcount < rcount:
raise RuntimeError(
'Unexpected redundant left brackets found in {}'.format(placeholder))
else:
patch = ''.join(['}'] * (lcount - rcount))
return placeholder + patch | 5fa4a83eeca676c58c33038add62a0c5bed7fe8b | 702,376 |
def ParseRangeHeader(range_header):
"""Parse HTTP Range header.
Args:
range_header: A str representing the value of a range header as retrived
from Range or X-AppEngine-BlobRange.
Returns:
Tuple (start, end):
start: Start index of blob to retrieve. May be negative index.
end: None or end index. End index is exclusive.
(None, None) if there is a parse error.
"""
if not range_header:
return None, None
try:
range_type, ranges = range_header.split('=', 1)
if range_type != 'bytes':
return None, None
ranges = ranges.lstrip()
if ',' in ranges:
return None, None
end = None
if ranges.startswith('-'):
start = int(ranges)
if start == 0:
return None, None
else:
split_range = ranges.split('-', 1)
start = int(split_range[0])
if len(split_range) == 2 and split_range[1].strip():
end = int(split_range[1]) + 1
if start > end:
return None, None
return start, end
except ValueError:
return None, None | ad37fd1532edd9519073c93aa73402b3c7d0a404 | 702,377 |
from typing import Any
from typing import List
def init_args(
cls: Any,
) -> List[str]:
""" Return the __init__ args (minus 'self') for @cls
Args:
cls: class, instance or callable
Returns:
The arguments minus 'self'
"""
# This looks insanely goofy, but seems to literally be the
# only thing that actually works. Your obvious ways to
# accomplish this task do not apply here.
try:
# Assume it's a factory function, static method, or other callable
args = cls.__code__.co_varnames
except AttributeError:
# assume it's a class
args = cls.__init__.__code__.co_varnames
# Note: There is a special place in hell for people who don't
# call the first method argument 'self'.
if args[0] == 'self':
args = args[1:]
return args | 2d867049f3c1f4937d0d8a7042315644cef219ae | 702,378 |
import secrets
def generate_token_urlsafe(unique=False, nbytes=32):
"""Generate an URL-safe random token."""
return secrets.token_urlsafe(nbytes=nbytes) | 5bdab6879f241a7459653e5ec363110263f0c171 | 702,380 |
def score_sig_1D_base(true_signatures, pred_signatures):
"""
percent of mutations with the right signature
Parameters
----------
true_signatures: iterable
array-like of length N (number of mutations), with the
true signature for each mutation
pred_signatures: iterable
array-like of length N (number of mutations), with the
predicted signature for each mutation
"""
N = len(true_signatures)
return sum(true_signatures == pred_signatures) / N | cd04cef1c7eecea8da804fe631fedc18c33b4d11 | 702,383 |
import decimal
def _dict_decimal_to_float(my_dict):
"""If any values in dictionary are of dtype 'Decimal', cast
to float so can be JSONified.
"""
for key, val in my_dict.items():
if isinstance(val, decimal.Decimal):
my_dict[key] = float(val)
return my_dict | 7d1741322782c49b9b8a6a7369344797439ff00b | 702,385 |
def _get_timestep_lookup_function(control_loop_period, memory_loc, rtf_key):
"""
Creates a customized timestep lookup function for a SynchronizedTimedLoop
:param control_loop_period: duration in [s] of each timed loop
:param memory_loc: location in shared memory to get() data from
:param rtf_key: key for dictionary to plug into memory location
:return: a customized function that takes no arguments and returns a
desired_dt based on the scaling from the real_time_factor
"""
def timestep_lookup_function():
data = memory_loc.get()
real_time_factor = data[rtf_key][0]
desired_dt = control_loop_period / real_time_factor
return desired_dt
return timestep_lookup_function | 500e61c0cbb38012bbce18dfc6f648c78a4b3f36 | 702,390 |
def get_cursor(connection):
"""
Gets a cursor from a given connection.
:type MySQLdb.connections.Connection
:param connection: A mysql connection
:rtype MySQLdb.cursors.SSDictCursor
:return A mysql cursor
"""
return connection.cursor() | dc1e9b99bff6b43eb324161026196358d318f59c | 702,392 |
def _popup_footer(view, details):
"""
Generate a footer for the package popup that indicates how the package is
installed.
"""
return """
{shipped} <span class="status">Ships with Sublime</span>
{installed} <span class="status">In Installed Packages Folder</span>
{unpacked} <span class="status">In Packages Folder</span>
""".format(
shipped="\u2611" if details["is_shipped"] else "\u2610",
installed="\u2611" if details["is_installed"] else "\u2610",
unpacked="\u2611" if details["is_unpacked"] else "\u2610") | 8f159f75990e87c8d431bb2dd1d01e648079ac96 | 702,393 |
def trycast(new_type, value, default=None):
"""
Attempt to cast `value` as `new_type` or `default` if conversion fails
"""
try:
default = new_type(value)
finally:
return default | d670f545fb1853dfd95da1de961e44ee5a477c5f | 702,394 |
def construct_description(prefix: str, suffix: str, items: list):
"""Construction of complete description with prefix and suffixx.
Args:
prefix (str): prefix
suffix (str): suffix
items (list): itesm
Returns:
str: complete description string
"""
item_str = ','.join(items)
return f'{prefix}{item_str}{suffix}' | 61eec7c2f40bf3b4857cdd682e52aabac1f2ec76 | 702,398 |
def read_cookies_file(filename):
"""read cookie txt file
:param filename: (str) cookies file path
:return: (dict) cookies
"""
with open(filename, 'r') as fp:
cookies = fp.read()
return cookies | 79ce16fabdec49a7b7b60b4d8bd5346798f03edf | 702,399 |
from typing import Union
def kind_div(x, y) -> Union[int, float]:
"""Tries integer division of x/y before resorting to float. If integer
division gives no remainder, returns this result otherwise it returns the
float result. From https://stackoverflow.com/a/36637240."""
# Integer division is tried first because it is lossless.
quo, rem = divmod(x, y)
if not rem:
return quo
else:
return x / y | 97343b68051291acc5a614d086d09f59f9b9abfd | 702,400 |
def rat_fun(x, poles):
"""
Computes the value of a rational function with poles in poles and roots in
-poles; see Definition 8.29 from the doctoral thesis "Model Order Reduction
for Fractional Diffusion Problems" for a precise definition.
Parameters
----------
x : float
The argument of the rational function.
poles : list
A list of poles.
Returns
-------
val : float
The value of the rational function at x.
"""
val = 1
for pole in poles:
val *= (x + pole) / (x - pole)
return val | 48e14764595f1242e65908d8008ada2ac64a4a90 | 702,403 |
import csv
def _get_reader(file):
"""Get CSV reader and skip header rows."""
reader = csv.reader(file)
# Skip first 3 rows because they're all headers.
for _ in range(3):
next(reader)
return reader | 588328d9ccb5af32abad0c0d8fe8c4489d306c12 | 702,406 |
def traverseFilter(node,filterCallback):
"""Traverse every node and return a list of the nodes that matched the given expression
For example:
expr='a+3+map("test",f())'
ast=SeExprPy.AST(expr)
allCalls=SeExprPy.traverseFilter(ast.root(),lambda node,children: node.type==SeExprPy.ASTType.Call)
allMapNodes=SeExprPy.traverseFilter(ast.root(),lambda node,children: node.type==SeExprPy.ASTType.Call and node.value=="map")
allVarRefs=SeExprPy.traverseFilter(ast.root(),lambda node,children: node.type==SeExprPy.ASTType.Var)
"""
ret=[]
children=node.children()
if filterCallback(node,children): ret.append(node)
for childNode in children: ret.extend(traverseFilter(childNode,filterCallback))
return ret | 751810f0eb54b4aa08cf020f34649780bd208d81 | 702,415 |
def get_cell_numbers(contained):
"""Retrieve non-overlapping cell numbers from the output of `get_overlapping`.
None may appear at the ends of the output, indicating that the corresponding
target cells are not overlapping with any source cells. These should be ignored
when regridding.
Cell numbers of 0 indicate that the corresponding target cells need to be regridded
in combination with the previous non-zero cell number target cell.
Returns:
cell_numbers (list): The number of cells corresponding to the source
dimension, as described above.
overlap (bool): If True, this indicates that for at least one location, there
is an overlap of depth 1 between adjacent operations.
"""
cell_numbers = []
overlap = False
for prev_elements, elements in zip([None] + contained[:-1], contained):
cell_number = None
if (
prev_elements is not None
and elements
and prev_elements
and elements[0] == prev_elements[-1]
):
overlap = True
cell_number = -1
if elements:
if cell_number is None:
cell_number = 0
cell_number += elements[-1] - elements[0] + 1
cell_numbers.append(cell_number)
return cell_numbers, overlap | b0bf32676ec7bfa73e04d3afd7053675f95d4abd | 702,421 |
def get_mos(da, da_peak_times):
"""
Takes an xarray DataArray containing veg_index values and calculates the vegetation
values (time not available) at middle of season (mos) for each timeseries per-pixel.
The middle of season is the mean vege value and time (day of year) in the timeseries
at 80% to left and right of the peak of season (pos) per-pixel.
Parameters
----------
da: xarray DataArray
A two-dimensional or multi-dimensional array containing an DataArray of veg_index
and time values.
da_peak_times: xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel must be
the time (day of year) value calculated at peak of season (pos) prior.
Returns
-------
da_mos_values : xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel is the
veg_index value detected at the peak of season (pos).
"""
# notify user
print('Beginning calculation of middle of season (mos) values (times not possible).')
# get left and right slopes values
print('> Calculating middle of season (mos) values.')
slope_l = da.where(da['time.dayofyear'] <= da_peak_times)
slope_r = da.where(da['time.dayofyear'] >= da_peak_times)
# getupper 80% values in positive slope on left and right
slope_l_upper = slope_l.where(slope_l >= (slope_l.max('time') * 0.8))
slope_r_upper = slope_r.where(slope_r >= (slope_r.max('time') * 0.8))
# get means of slope left and right
slope_l_means = slope_l_upper.mean('time')
slope_r_means = slope_r_upper.mean('time')
# combine left and right veg_index means
da_mos_values = (slope_l_means + slope_r_means) / 2
# convert type
da_mos_values = da_mos_values.astype('float32')
# rename vars
da_mos_values = da_mos_values.rename('mos_values')
# notify user
print('> Success!\n')
#return da_mos_values
return da_mos_values | 510452f4e89e2e26107f4c8bc9cc7617f7b56b61 | 702,422 |
from importlib.util import find_spec
def _pytest_has_xdist() -> bool:
"""
Check if the pytest-xdist plugin is installed, providing parallel tests
"""
# Check xdist exists without importing, otherwise pytests emits warnings
return find_spec("xdist") is not None | 77cc6d04d21f76b35b183fc4e9ebc2bf6824b744 | 702,426 |
def makedist(dist_type, *pars, **kwards):
"""
Creates a distribution class from scipy continuous distributions
See https://docs.scipy.org/doc/scipy/reference/stats.html.
Parameters
----------
dist_type: String -> Type of the distribution (see the scipy documentation)
*pars and **kwards: Statistical parameters and its values
Return
------
dist: Distribution class
"""
a = 'sst.'
b = dist_type
c = a + b
Scipy_stats_Obj = eval(c)
dist = Scipy_stats_Obj(*pars, **kwards)
return(dist, dist_type) | b9fb48e80d51cbeac5977d3e469ea14854f94def | 702,427 |
def solve(_n, tree):
"""
Given a list of list of tokens:
. (empty), or # (tree), compute the
number of trees one would encounter
if one traverses the 2D grid along
a slope of (3, 1).
:param _n: The number of rows in the
2D grid.
:param tree: The 2D grid as a list of list.
:return: The number of trees encountered on
a traversal along the slope (3, 1).
"""
_i, _j = 0, 0
count = 0
_col = len(tree[0])
while _i + 1 < _n:
_j = (_j + 3) % _col
_i += 1
if tree[_i][_j] == "#":
count += 1
return count | b4dbbc5c62d5b680242997e22c7a84d4ad583848 | 702,430 |
def bin_bucket_sort(arr):
"""
Binary bucket sort / 2-Radix sort
Time: O(NLog2N)
Space: O(N)
input: 1D-list array
output: 1D-list sorted array
"""
bucket = [[], []]
aux = list(arr)
flgkey = 1
while True:
for ele in aux:
bucket[int(bool(ele & flgkey))].append(ele)
if bucket[0] == [] or bucket[1] == []:
return aux
aux = list(bucket[0]) + list(bucket[1])
flgkey <<= 1
bucket = [[], []] | 8945ef31d5705d1462ce71ed6447bcc8d76e4665 | 702,437 |
import csv
def import_data(file):
"""
Imports instruction data from a .csv file, at the specified filepath
:param file: file
File object, to be read by the CSV reader
:return: list
A list of dictionaries, per row in the file, with the keys specified in the headers list
below
"""
headers = [
'entity', 'buy_sell', 'agreed_fx', 'currency', 'instr_date', 'settle_date', 'units', 'ppu',
]
# Create CSV reader object, using header schema defined to generate dictionary
csv_reader = csv.DictReader(file, headers)
# Create list of dictionaries from reader
data_rows = [file_row for file_row in csv_reader]
return data_rows | 472f47c2e2111d5ce9a9d20e1ac447a504941aa2 | 702,442 |
from typing import Tuple
from typing import List
from typing import Any
def _extract_bracket_params(meta_type: str) -> Tuple[str, List[Any]]:
"""
Gets parameters from the string representation of the type
Args:
meta_type (str): The string name of the metadata type
Returns:
Tuple[str, List[Any]]: A tuple, first arg is a string of the type
name only and then the second value is a list of values (if any)
inside the brackets of the meta_type. e.g. "int64" returns ("int64", [])
and "decimal128(1,2)" returns ("decimal128", [1, 2])
"""
is_decimal_type = meta_type.startswith("decimal128")
is_binary_type = meta_type.startswith("binary")
if "(" in meta_type:
attr_name, value_str = meta_type.split("(", 1)
value_str = value_str.split(")")[0]
values = value_str.split(",")
if not any([bool(v) for v in values]):
values = []
# cast input to int for specific types
if (is_decimal_type or is_binary_type) and values:
values = [int(v.strip()) for v in values]
else:
attr_name = meta_type
values = []
return attr_name, values | 38fc3872c18bb788a54d50b09a36cf3f5925550e | 702,444 |
import hashlib
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest() | 78020e86a6d9a6939de6b9050d34fca0d482aab4 | 702,445 |
def emit_compare(field_name, value, session, model):
"""Emit a comparison operation comparing the value of ``field_name`` on ``model`` to ``value``."""
property = getattr(model, field_name)
return property == value | a9ad880951f87f488b12c4ce7c38c5e0e463a798 | 702,446 |
def unsigned32(i):
"""cast signed 32 bit integer to an unsigned integer"""
return i & 0xFFFFFFFF | 1d4e06406d3ee7ce7d8f5cefd28955f135059917 | 702,448 |
def has_and_not_none(obj, name):
"""
Returns True iff obj has attribute name and obj.name is not None
"""
return hasattr(obj, name) and (getattr(obj, name) is not None) | 0d68a9b01d56ba056768d06a88c68b4bd5bbd4d2 | 702,450 |
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag
# matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None)) | 66aa9272fd6e4a6e281d39f03dd63acabad0bbe7 | 702,451 |
def get_chombo_box_extent(box, space_dim):
"""
Parse box extents from Chombo HDF5 files into low and high limits
Parameters
----------
box : List
Chombo HDF5 format box limits,
e.g. [x_lo, y_lo, x_hi, y_hi] = [0,0,1,1]
space_dim : int
Number of spatial dimensions
Returns
-------
lo, hi : List
Low and high limits, [x_lo, y_lo, ...], [x_hi, y_hi, ...]
e.g [0,0], [1,1]
"""
lo = [box[i] for i in range(space_dim)]
hi = [box[i] for i in range(space_dim, 2 * space_dim)]
return lo, hi | d72b409a96c8a1936f456d87a341fba22ee9f97e | 702,453 |
def get_raw_pdb_filename_from_interim_filename(interim_filename, raw_pdb_dir):
"""Get raw pdb filename from interim filename."""
pdb_name = interim_filename
slash_tokens = pdb_name.split('/')
slash_dot_tokens = slash_tokens[-1].split(".")
raw_pdb_filename = raw_pdb_dir + '/' + slash_tokens[-2] + '/' + slash_dot_tokens[0] + '.' + slash_dot_tokens[1]
return raw_pdb_filename | 084239659220ea65ae57a006c8ce28df73b2fd5e | 702,454 |
def get_non_lib(functions):
"""
Get all non-library functions
@param functions: List of db_DataTypes.dbFunction objects
@return: a subset list of db_DataTypes.dbFunction objects that are not library functions.
"""
return [f for f in functions if not f.is_lib_func] | 7f536ff98d647ba5e497b8550bc2497ef45e814b | 702,459 |
def _check_duplicates(data, name):
"""Checks if `data` has duplicates.
Parameters
----------
data : pd.core.series.Series
name : str
Name of the column (extracted from geopandas.GeoDataFrame) to check duplicates.
Returns
-------
bool : True if no duplicates in data.
"""
if data.duplicated().any():
duplicates = data[data.duplicated(keep=False)]
raise ValueError(f"{name} cannot contain duplicate values, found {duplicates}")
return True | 429ce8d092b3a39fc44eeca91d593db22fe7364d | 702,460 |
def clean_chamber_input(chamber):
""" Turns ambiguous chamber information into tuple (int, str) with chamber id and chamber name """
if type(chamber) == str:
if chamber == '1':
chamber = 1
elif chamber == '2':
chamber = 2
elif chamber == 'GA':
chamber = 1
elif chamber == 'SC':
chamber = 2
chamber_name = 'GA' if chamber == 1 else \
'SC' if chamber == 2 else ''
return chamber, chamber_name | 0ad20c117fc90e523e85ef7061a548b20c68dc92 | 702,463 |
from pathlib import Path
import logging
import yaml
def params_from_yaml(args):
"""Extract the parameters for preparation from a yaml file and return a dict"""
# Check the path exists
try:
config_file_path = Path(args.config)
assert config_file_path.exists()
except Exception:
logging.error(f"Could not find config file at {args.config}")
raise
# Load the data from the config file
try:
with open(config_file_path, "r") as f:
params = yaml.safe_load(f)
except Exception:
logging.error(
f"Could not extract parameters from yaml file at {config_file_path}"
)
raise
if "verbose" not in params.keys():
params["verbose"] = True
return params | 36beadd8fa4f27471c514a963838aac216aad434 | 702,466 |
from typing import List
from typing import Union
def list_or_first(x: List[str]) -> Union[List[str], str]:
"""
Returns a list if the number of elements is
greater than 1 else returns the first element of
that list
"""
return x if len(x) > 1 else x[0] | 82e86001b35ecd6542a22fac3c5dd7f7723966d6 | 702,467 |
def nexthop_is_local(next_hop):
"""
Check if next-hop points to the local interface.
Will be True for Connected and Local route strings on Cisco devices.
"""
interface_types = (
'Eth', 'Fast', 'Gig', 'Ten', 'Port',
'Serial', 'Vlan', 'Tunn', 'Loop', 'Null'
)
for type in interface_types:
if next_hop.startswith(type):
return True | fd74119d54998fafcb9400adaaa2c95b42671734 | 702,476 |
def logistic_expval(mu, tau):
"""
Expected value of logistic distribution.
"""
return mu | f6ac18144d5543d50c04f8e042bb8b5f8c8ea5ec | 702,477 |
def GetKDPPacketHeaderInt(request=0, is_reply=False, seq=0, length=0, key=0):
""" create a 64 bit number that could be saved as pkt_hdr_t
params:
request:int - 7 bit kdp_req_t request type
is_reply:bool - False => request, True => reply
seq: int - 8 sequence number within session
length: int - 16 bit length of entire pkt including hdr
key: int - session key
returns:
int - 64 bit number to be saved in memory
"""
retval = request
if is_reply:
retval = 1<<7 |retval
retval = (seq << 8) | retval
retval = (length << 16) | retval
#retval = (retval << 32) | key
retval = (key << 32) | retval
return retval | 7a56abb0f1ccbe1a7da1a9e0c6b70418e00ef0be | 702,480 |
def tilted_L1(u, quantile=0.5):
"""
tilted_L1(u; quant) = quant * [u]_+ + (1 - quant) * [u]_
"""
return 0.5 * abs(u) + (quantile - 0.5) * u | ff7a3fe97d79e4c848797c79a2a7c14b449ad6b6 | 702,484 |
def bytes_to_human(size, digits=2, binary=True):
"""Convert a byte value to the largest (> 1.0) human readable size.
Args:
size (int): byte size value to be converted.
digits (int, optional): number of digits used to round the converted
value. Defaults to 2.
binary (bool, optional): convert to binary (True) or decimal (False)
units. Defaults to True.
Returns:
str: value translated to a human readable size.
"""
units = 1024 if binary else 1000
conversion = ["B", "KB", "MB", "GB", "TB", "PB", "EB"]
index = 0
value = [size if isinstance(size, (int, float)) else 0, conversion.pop(0)]
while value[0] > units and conversion:
index += 1
value[0] = float(size) / (units ** index)
value[1] = conversion.pop(0)
if units == 1024 and len(value[1]) > 1:
value[1] = "{}i{}".format(*value[1])
return "".join([str(round(value[0], digits)), value[1]]) | 22367220a122e399658a0dd42b52083ccc29df6f | 702,486 |
import pathlib
def parent(path: str) -> str:
"""Get path's parent
e.g
j.sals.fs.parent("/home/rafy/testing_make_dir/test1") -> '/home/rafy/testing_make_dir'
Args:
path (str): path to get its parent
Returns:
str: parent path.
"""
return str(pathlib.Path(path).parent) | 8ed409fde19dcd74d3fb2c169946680a0c46543b | 702,489 |
def _getText(nodelist):
""" returns collected and stripped text of textnodes among nodes in nodelist """
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc.strip() | 37548ebf34f0f26cc4166e95621ee1ec0f3a3f71 | 702,490 |
def from_json(json_data, key):
"""Extract values from JSON data.
:arg dict json_data: The JSON data
:arg str key: Key to get data for.
:Returns: The value of `key` from `json_data`, or None if `json_data`
does not contain `key`.
"""
if key in json_data:
return json_data[key]
return None | 2e5a97176c771c1c363fd7fbf86d8bf7b598f4ab | 702,492 |
def project(a):
""" De-homogenize vector """
return a[:-1] / float(a[-1]) | 68074a4fb9c5021f7e727654e699d823d093c3a6 | 702,495 |
from operator import inv
def n_clinic_to_unit_cube(x_array,box):
"""
---Inputs---
x_array : {2 mode numpy array}
array defining the "real" coordinate(s) at which to calculate values of
basis functions
shape (n_points, d)
dimension of first mode gives number of points
dimension of second mode gives number of spatial dimensions (d)
box : {2 mode numpy array}
square array of shape (d, d) defining the region in which points x_array live,
each row is a vector
---Outputs---
u_array : {numpy array}
coordinates transformed into unit n-cube, shape (n_points, d)
"""
if (len(box.shape) != 2):
print('ERROR: box must be 2 mode numpy array')
return
elif (box.shape[0] != box.shape[1]):
print('ERROR: box array must be square')
return
# given box vectors in columns of V, let L be the mapping from unit n-cube to box
# L I = V --> L = V
# then L^{-1} maps from "real" box coordinates to the unit n-cube
# L^{-1} V = I
V = box.T #box variable has cell vectors stored in rows, above formulation has them as columns of V
L = V
L_inv = inv(L)
#transform coordinates
u_array_transposed = L_inv@(x_array.T) #transform every coordinate into unit n-cube
u_array = u_array_transposed.T #shape (n_points, n_dim)
return u_array | d55dd6f99047935df395dde174b4525a2f6b9bcd | 702,499 |
def remote_shortname(socket):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
return socket.gethostname().split('.', 1)[0] | e52ec17a36800029a9889dc1b5e567567b9c9340 | 702,504 |
def is_fragment(href):
"""Return True if href is a fragment else False"""
is_fragment = False
try:
if href[0] == '#':
is_fragment = True
except IndexError:
is_fragment = False
return is_fragment | da1899ac1b59b8a6a42034a46194ff0d09a8799d | 702,508 |
async def challenge(websocket, user):
"""Challenges a user.
"""
return await websocket.send(f'|/challenge {user}, gen8metronomebattle') | bee53e79d085ebc7f8a48e0091adb2e6605a2ac6 | 702,511 |
def air2vacMortonIAU(wl_air):
"""Take an input air wavelength in Angstroms and return the vacuum
wavelength.
Formula taken from
https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s = 1e4 / wl_air
n = 1 + 0.00008336624212083 + (0.02408926869968 / (130.1065924522 - s**2))\
+ (0.0001599740894897 / (38.92568793293 - s**2))
return wl_air * n | 66823170493c3b7d8f63bd7d2e32364aa5efa78e | 702,513 |
def get_max_node(candidates, gr_values, column_prefix=""):
"""Given a set of candidate nodes, and return the one with the
highest Gain Ratio.
Args:
candidates (list): List of candidate nodes.
gr_values (dict): Dictionary with column names as keys and
the corresponding Gain Ratio values as values.
column_prefix (str): Prefix of the columns generated by the generator
(e.g. "new_link_type_"). Defaults to "".
Returns:
str: Name of the node with the highest Gain Ratio in the candidate set.
"""
max_gr = 0
max_gr_node = None
for node in candidates:
try:
gr = gr_values[column_prefix+node]
except:
gr = gr_values[node]
if gr > max_gr or (gr == 0 and len(candidates) == 1):
max_gr = gr
max_gr_node = node
return max_gr_node | 5d64dad256784a932bcdc85265713d2b32c19c47 | 702,516 |
def tablefy(data, column=None, gutter=1, width=79):
"""
Convert a list of strings into being a table, displaying in a left-to-right
and top-to-bottom pattern. This does not sort the values.
:param data: list of strings
:param column: width of column, if None, detected
:param gutter: width of gutter
:param width: width of entire table to fill
:returns: newline separated string
"""
if not data:
return ""
lines = []
if column is None:
column = max([len(s) for s in data])
per_line = max(int(width / column), 1)
gutter_chars = " " * gutter
items = []
for entry in data:
items.append(entry.ljust(column))
if len(items) == per_line:
lines.append(gutter_chars.join(items))
items = []
if items:
lines.append(gutter_chars.join(items))
return "\n".join(lines) | d8aa63421578b9a0c4cb9c3fa106e7336de946d9 | 702,517 |
import functools
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> func = once(lambda a: a+3)
>>> func(3)
6
>>> func(9)
6
>>> func('12')
6
"""
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return functools.wraps(func)(wrapper) | 60e6c4a9e26b322c27da74ca98e119056247b900 | 702,518 |
def _element_basis(string: str):
"""
Parse element and basis from string
Args: str
Returns: element, basis
"""
cut_list = string.split(".")
element = cut_list[0]
basis = " ".join(cut_list[1:])
return element, basis | c6be1c2a05832bb3bbc4577015b541136c724a08 | 702,520 |
def proportion_linear(time_right: float,
time_between: float,
time_step: float) -> tuple:
"""
:param time_right: right border in time
:param time_between: time between borders
:param time_step: step in time between borders
:return: _Iterable_(alpha, beta)
Typical usage example:
time_between = 1.98
time_step = time_2 - time_1
alpha, beta = proportion_linear(time_2, time_between, time_step)
assert alpha + beta == 1
assert alpha > beta
"""
beta = (time_between - time_right) / time_step
return (1 - beta), beta | a2841baf2b3022e2be7f47111e63f1f6eb9e76e2 | 702,523 |
def check_negation(text, NEGATION_MAP):
"""
Utility function to check negation of an emotion
:param text: text chunk with the emotion term
:return: boolean value for negation
"""
neg_word_list = NEGATION_MAP
neg_match = False
for neg_word in neg_word_list:
if neg_word.strip() in text:
neg_match = True
return neg_match | 020b7f692264754d8b76111c709cc31710ba6339 | 702,529 |
def find_bean_by_name(jsn, nme):
"""
Extracts a bean of the given name from jmx metrics json object.
"""
if 'beans' not in jsn:
return None
else:
return next((b for b in jsn['beans'] if b['name'] == nme), None) | 826115bff5a5c1a4ee58560a55641ccf99c1541f | 702,532 |
def convert_datetime_to_timestamp(dt):
"""Convert pandas datetime to unix timestamp"""
return int(dt.timestamp()) | f9cf6223bfabfa54c00835b56bdce2d5b268afe7 | 702,533 |
def check_who_queued(user):
"""
Returns a function that checks if the song was requested by user
"""
def pred(song):
if song.requested_by and song.requested_by.id == user.id:
return True
return False
return pred | e53a1434077ec7b97e237d1ff8bcc8c2454c4015 | 702,539 |
def _get_lines(filename):
"""Returns a list of lines from 'filename', joining any line ending in \\
with the following line."""
with open(filename, "r") as f:
lines = []
accum = ""
for line in f:
if line.endswith("\\\n"):
accum += line[:-2]
else:
lines.append(accum + line)
accum = ""
return lines | 83b2184eedfb21d27f310f9f2229d05d69ac8b92 | 702,540 |
import re
def replace_punctuation_and_whitespace(text):
"""Replace occurrences of punctuation (other than . - _) and any consecutive white space with ."""
rx = re.compile(r"[^\w\s\-.]|\s+")
return rx.sub(".", text) | b539ec796c1b69176e0da132ee88f9695b745fb2 | 702,541 |
def filter_out_length_of_one(word):
"""
Filters out all words of length 1
:param word: Input word
:return: None if word is of length 1, else the original word
"""
if len(word) > 1:
return True
return False | c89fda6b560811a178a5a2d2c242d54fba27d071 | 702,543 |
import json
def read_json(jsonfile):
"""Read a json file into a dictionary
Args:
jsonfile: the name of the json file to read
Returns:
the contents of the JSON file as a dictionary
>>> from click.testing import CliRunner
>>> test = dict(a=1)
>>> with CliRunner().isolated_filesystem():
... filename = 'tmp.json'
... save_json(filename, test)
... assert test == read_json(filename)
"""
with open(jsonfile, 'r') as filepointer:
dict_ = json.load(filepointer)
return dict_ | da5b7bddc42b14a6547071fe528a1c051d35356c | 702,545 |
def zero_float(string):
"""Try to make a string into a floating point number and make it zero if
it cannot be cast. This function is useful because python will throw an
error if you try to cast a string to a float and it cannot be.
"""
try:
return float(string)
except:
return 0 | 075a49b53a0daf0f92072a5ec33f4b8240cc6885 | 702,546 |
def checksum(data) -> int:
"""
Found on: http://www.binarytides.com/raw-socket-programming-in-python-linux/. Modified to work in python 3.
The checksum is the 16-bit ones's complement of the one's complement sum
of the ICMP message starting with the ICMP Type (RFC 792).
:param data: data to built checksum from.
:return: 16-bit int checksum
"""
s = 0
for i in range(0, len(data), 2):
tmp = data[i]
if i + 1 < len(data):
tmp += (data[i + 1] << 8)
s += tmp
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
s = ~s & 0xffff
return s | af8ba70fa53f95514bc6e8118440ba607c17d794 | 702,548 |
def _number_convert(match):
"""
Convert number with an explicit base
to a decimal integer value:
- 0x0000 -> hexadecimal
- 16'h0000 -> hexadecimal
- 0b0000 -> binary
- 3'b000 -> binary
- otherwise -> decimal
"""
prefix, base, number = match.groups()
if prefix is not None:
return str(match.group(0))
if base in "xh":
return str(int(number, 16))
if base == "b":
return str(int(number, 2))
return str(int(number, 10)) | adef8f8f80342fbcd79c461068eb04f99427f88c | 702,549 |
def run_episode(environment, agent, is_training=False):
"""Run a single episode."""
timestep = environment.reset()
while not timestep.last():
action = agent.step(timestep, is_training)
new_timestep = environment.step(action)
if is_training:
agent.update(timestep, action, new_timestep)
timestep = new_timestep
episode_return = environment.episode_return
return episode_return | dcec7609b33cf2f13ca6753c2dfd614252189b51 | 702,553 |
def pandas_df_to_temporary_csv(tmp_path):
"""Provides a function to write a pandas dataframe to a temporary csv file with function scope."""
def _pandas_df_to_temporary_csv(pandas_df, sep=",", filename="temp.csv"):
temporary_csv_path = tmp_path / filename
pandas_df.to_csv(temporary_csv_path, sep=sep, header=True, index=False, na_rep="")
return temporary_csv_path
return _pandas_df_to_temporary_csv | 5ec9b3072928e3cdbe067dfcb33010b2a51a267b | 702,554 |
def df(n):
"""Gives the double factorial of *n*"""
return 1.0 if n <= 0 else 1.0 * n * df(n - 2) | 71fcc2445db94b5686d4c9b8d85e9bdc1dc2bbb4 | 702,555 |
import re
def track_num_to_int(track_num_str):
""" Convert a track number tag value to an int.
This function exists because the track number may be
something like 01/12, i.e. first of 12 tracks,
so we need to strip off the / and everything after.
If the string can't be parsed as a number, -1 is returned. """
if track_num_str == '':
return -1
if '/' in track_num_str:
track_num_str = re.sub('(/(.*)$)', '', track_num_str)
try:
track_num = int(track_num_str)
except ValueError:
track_num = -1
return track_num | af6e878bad7c3e26c61ad2ad4a759cb8c1dc4224 | 702,558 |
def sort_list(player_data_list):
"""Sort list based on qualifer.
Args:
player_data_list: player data list
Returns:
player data list properly sorted
"""
return sorted(player_data_list, key=lambda x: x[-1]) | 7164d2851ca6c7557e8a9e10c45a25243254180d | 702,560 |
def sort(array: list[int]) -> list[int]:
"""Naive bubble sort implementation.
"""
for i in range(len(array)):
for j in range(len(array) - 1, i, -1):
if array[j] < array[j - 1]:
array[j], array[j - 1] = array[j - 1], array[j]
return array | f3121f84b238d82ea3cc7d87bfa993c2a9339786 | 702,562 |
def _call_bool_filter(context, value):
"""Pass a value through the 'bool' filter.
:param context: Jinja2 Context object.
:param value: Value to pass through bool filter.
:returns: A boolean.
"""
return context.environment.call_filter("bool", value, context=context) | 181a51d7d436cf0eaf2c0fe9d2f04ab4030f010c | 702,564 |
def get_wbo(offmol, dihedrals):
"""
Returns the specific wbo of the dihedral bonds
Parameters
----------
offmol: openforcefield molecule
dihedrals: list of atom indices in dihedral
Returns
-------
bond.fractional_bond_order: wiberg bond order calculated using openforcefield toolkit for the specific dihedral central bond
"""
offmol.assign_fractional_bond_orders(bond_order_model="am1-wiberg-elf10")
bond = offmol.get_bond_between(dihedrals[1], dihedrals[2])
return bond.fractional_bond_order | 82aa84036d3078826fedaec40e599ffd783a422c | 702,567 |
import string
import random
def random_string() -> str:
"""Return a random string of characters."""
letters = string.ascii_letters
return "".join(random.choice(letters) for i in range(32)) | dc828f6f89f1e20ee6cea5ebcbbecb38b0b07aa6 | 702,572 |
from typing import Pattern
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
"""Replace `regex` with `replacement` twice on `original`.
This is used by string normalization to perform replaces on
overlapping matches.
"""
return regex.sub(replacement, regex.sub(replacement, original)) | 6bada9c7b349ba3a5da840e131577dd354f0b9eb | 702,574 |
def get_pdm_terms(site_index, n, adj_sites=4, shift=0):
"""
inputs:
site_index (int): center site index
n (int): number of sites in lattice (for enforcing periodic boundary conditions)
adj_site (int): how many adjacent sites to collect pop and coherence values from
shift (int): return row indices shifted by this value (when collecting from matrix w/ compound index
returns:
ind_list (list of ints): all site indices to collect
shift_ind_list (list of ints): site indices shifted down i*n rows to collect the correct sample
coh_list (list of ints): coherences in fragment with central site
shift_coh_list (list of ints): coherences shifted down i*n rows to select particular sample
"""
# if a term in sites is out of bounds, subtract bound length
ind_list, shift_ind_list = [site_index], [site_index+shift]
coh_list, shift_coh_list = [], []
#print("Site: ", site_index)
for ind in range(site_index - adj_sites, site_index + adj_sites + 1):
if ind != site_index: # we've already add the target site population to ind_list and shift_ind_list
if ind < 0: ind += n
elif ind >= n: ind -= n
else: pass
#print(ind)
ind_list.append(ind)
shift_ind_list.append(ind+shift) # shift down to specific matrix in set we are selecting
coh_list.append(ind)
shift_coh_list.append(ind+shift)
return ind_list, shift_ind_list, coh_list, shift_coh_list | 34d7914ddd751cb75b04e855d68edb156c1defda | 702,579 |
import ast
def ast_name_node(**props):
"""
creates a name ast node with the property names and values
as specified in `props`
"""
node = ast.Name()
for name, value in props.items():
setattr(node, name, value)
return node | 880d9527d63c7b2c97a4bc616bf245dab6583f81 | 702,581 |
def buildX(traj_file, t, X):
"""
Builds the node attribute matrix for a given time step.
Inputs:
traj_file : string indicating the location of the ground truth trajectory data
t : scalar indicating current time step
X : empty node attribute matrix in shape [n_nodes, n_features]
Outputs:
X : node attribute matrix with values added from traj_file in shape [n_nodes, n_features]
"""
# read through the file to find the current time step from "t = 100" etc.
with open(traj_file) as f:
lines = f.readlines()
i = -1
# find the line that contains the time step information
for line in lines:
i += 1
if line[0:4] == "t = " and int(line[4:]) == t:
# print("found time {0} in trajectory data".format(t))
count = 0
for line in lines[i:]:
# print("count", count)
# extract these lines and make a graph
if count < 3:
count += 1
continue
if ((count >= 3) and (count < (X.shape[0] + 3))):
# X(i,1:-1) = all data in the current row of .oxdna file
j = 1
my_str = ""
for k in range(len(line)):
if line[k] != " " and line[k] != "\n":
my_str += line[k]
if line[k] == " " or line[k] == "\n":
X[(count-3),j] = float(my_str)
j += 1
my_str = ""
count += 1
else:
break
return X | 3b0d3b18d9897364e1140103d7f049d5502d302a | 702,582 |
def move_left(board, row):
"""Move the given row to one position left"""
board[row] = board[row][1:] + board[row][:1]
return board | a0dc74f65abd5560db7c2fe602186d15b8fda3d2 | 702,583 |
def get_quadrant(x, y):
"""
Returns the quadrant as an interger in a mathematical positive system:
1 => first quadrant
2 => second quadrant
3 => third quadrant
4 => fourth quadrant
None => either one or both coordinates are zero
Parameters
----------
x : float
x coordinate.
y : float
y coordinate.
Returns
-------
int or None
Quadrant number.
"""
if x > 0 and y > 0:
return 1
elif x < 0 and y > 0:
return 2
elif x < 0 and y < 0:
return 3
elif x > 0 and y < 0:
return 4
else:
return None | 8650edb7a3e854eed0559f633dcf5cd0c7310db4 | 702,585 |
def days_in_month(year, month):
"""
Inputs:
year - an integer between datetime.MINYEAR and datetime.MAXYEAR
representing the year
month - an integer between 1 and 12 representing the month
Returns:
The number of days in the input month.
"""
leapyear=0
if(year%4 ==0):
if(year%100!=0):
leapyear=1
elif(year%400==0):
leapyear=1
if(month == 1 or month == 3 or month == 5 or month == 7 or
month == 8 or month == 10 or month == 12):
return 31
elif(month == 4 or month == 6 or month == 9 or month == 11):
return 30
elif(leapyear == 0 and month == 2):
return 28
elif(leapyear == 1 and month == 2):
return 29
else:
print("Please Enter a valid month and Year") | b2988e9a6b1413ef0957413c10e5e4f5ef225d8b | 702,586 |
def repeat(s, n):
""" (str, int) -> str
Return s repeated n times; if n is negative, return empty string.
>>> repeat('yes', 4)
'yesyesyesyes'
>>>repeat('no', 0)
''
"""
return (s * n) | b44b56b9e69783c7f57e99b61cc86e975c575a59 | 702,589 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.