content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def convert_proxy_to_string(proxy):
""" This function convert a requests proxy format to a string format """
return proxy['http'].split('//')[1] | 7e4fbb7b075fb2139fda83b52f562699c85a6b32 | 28,692 |
def ReadFile(path, mode='r'):
"""Read a given file on disk. Primarily useful for one off small files."""
with open(path, mode) as f:
return f.read() | cf007c6fcf826eccde7f42b87542794f1d4d8cb0 | 28,693 |
from typing import List
def scale_row(row: List[float], scalar: float) -> List[float]:
"""
Return the row scaled by scalar.
"""
return [scalar * el for el in row] | d44901244199b9d39529a3e3bccc7a9eab9d332e | 28,697 |
def get_action_value(mdp, state_values, state, action, gamma):
""" Computes Q(s,a) as in formula above """
result = 0
for to_state in mdp.get_all_states():
transition_probability = mdp.get_transition_prob(state, action, to_state)
reward = mdp.get_reward(state, action, to_state)
result += transition_probability * (reward + gamma * state_values[to_state])
return result | 226d8e01054552ae1108d3d83e0e438ddc821df9 | 28,702 |
def convert_shell_env(env):
"""Convert shell_env dict to string of env variables
"""
env_str = ""
for key in env.keys():
env_str += "export {key}={value};".format(
key=key, value=str(env.get(key)))
return env_str | 4f777dbeb2534529dbf76e2b5a203e4b2de7ed63 | 28,704 |
import requests
def get_identity_token(scopes='https://www.googleapis.com/auth/cloud-platform'):
"""
Getting an identity token from a google authorization service.
:param scopes: https://cloud.google.com/deployment-manager/docs/reference/latest/authorization
:return: bearer token
"""
host = 'http://metadata.google.internal'
url = f'{host}/computeMetadata/v1/instance/service-accounts/default/token?scopes={scopes}'
response = requests.get(url=url, headers={'Metadata-Flavor': 'Google'})
response.raise_for_status()
# we are always quicker than the lifetime of the token an therefore skip checking expired_in and token_type
return response.json()['access_token'] | 70acf41bd322b70ce7d258688698d511e0c2211b | 28,707 |
def split_comma_separated(text):
"""Return list of split and stripped strings."""
return [t.strip() for t in text.split(',') if t.strip()] | 5030ff3dac88de0ef82f929cfcc5adf913b124a0 | 28,711 |
import pickle
def read_pickle(name):
"""
Reads a pickle file
:param name: Path to the pickled file to read
:return: The deserialized pickled file
"""
with open(name, "rb") as input_file:
return pickle.load(input_file) | 4021e5f3aeba9824d07998658d88f9971843585f | 28,717 |
from typing import List
from typing import Dict
def build_final_outputs(outputs: List[Dict], old_new_dict: Dict) -> List[Dict]:
"""
Receives outputs, or a single output, and a dict containing mapping of old key names to new key names.
Returns a list of outputs containing the new names contained in old_new_dict.
Args:
outputs (Dict): Outputs to replace its keys.
old_new_dict (Dict): Old key name mapped to new key name.
Returns:
(Dict): The dictionary with the transformed keys and their values.
"""
return [{old_new_dict.get(k): v for k, v in output.items() if k in old_new_dict} for output in outputs] | 930d9026ad731c8689110a2fd1bef0b3b13e79d9 | 28,718 |
def transform_box_format_gt(box):
"""x1,y1,x2,y2 to x1, y1, w, h"""
x1, y1, x2, y2 = box.x1, box.y1, box.x2, box.y2
return [x1, y1, x2 - x1, y2 - y1] | 10b32ec5f51a2ee5a558bf74345df4528c65b0ec | 28,719 |
def calc_corr_matrix(wallet_df):
"""Calculates the Pearson correlation coefficient between cryptocurrency pairs
Args:
wallet_df (DataFrame): Transformed DF containing historical price data for cryptocurrencies
"""
corrmat_df = wallet_df.corr()
return corrmat_df | 7d76f496783f129749888d7913a93919a5570273 | 28,722 |
import re
def parse_tags(s):
"""
Return a list of tags (e.g. {tag_a}, {tag_b}) found in string s
"""
return re.findall('{(\w+)\}*', s) | 28849f326ff6019b9e41ee6fa0f48cfebff0811e | 28,724 |
def count_parameters(model) -> int:
"""Count parameters in a torch model.
Parameters
----------
model : torch.nn.module
The model from which you want to count parameters.
Returns
-------
int
Total number of parameters in the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad) | a3f398bb5969cd4d81c1702089698a2ed9d79d31 | 28,727 |
import csv
def get_csv_log_file_data(folder_list):
""" The Function takes a list of folders and returns combined list of entries
and the folder of the entry, taken from from the driving_log.csv file."""
csv_lines = []
# For the driving_log.csv file from imput list of folders:
# In this case ['training_data_middle', 'training_data_opposite', 'training_data_recover']
# The first folder has training samples to train the network to drive car in the middle of the road
# The second folder has data by driving the car in the clock wise direction on track one
# The third folder has samples to teach car to recover to middle of road from sides.
for val in folder_list:
print('./{}/driving_log.csv'.format(val))
with open('./{}/driving_log.csv'.format(val)) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
csv_lines.append([line, './{}/'.format(val)])
return csv_lines | 1945fae6695116052864c1a93f19e2198b24662f | 28,730 |
import logging
def clean_geolocation_data(geolocation_data, attr_to_remove=None):
"""Remove attributes from geolocation data.
If no attributes are provided, return a copy of the same data.
:param geolocation_data: Full geolocation data
:type: dict
:param attr_to_remove: List of attributes to remove
:type: list
:return: Geolocation data (cleaned or copy)
:rtype: dict
"""
geolocation_copy = geolocation_data.copy()
if attr_to_remove is None:
return geolocation_copy
for attr in attr_to_remove:
try:
del geolocation_copy[attr]
except KeyError:
logging.info('Key not found, continuing ...')
return geolocation_copy | 9ef7a5c2777b556b81c1d21ec775ae6963c857ca | 28,734 |
def h3(text):
"""h3 tag
>>> h3('my subsubheading')
'<h3>my subsubheading</h3>'
"""
return '<h3>{}</h3>'.format(text) | 196d30c7b3b0e6219ef4ce3a2edfd42a3bb13f46 | 28,738 |
def get_skin_cluster_influence_objects(skincluster):
"""
Wrapper around pymel that wrap OpenMaya.MFnSkinCluster.influenceObjects() which crash when
a skinCluster have zero influences.
:param skincluster: A pymel.nodetypes.SkinCluster instance.
:return: A list in pymel.PyNode instances.
"""
try:
return skincluster.influenceObjects()
except RuntimeError:
return [] | ebb686bc4ca718db104fccb08b4332de1df9d3d3 | 28,739 |
def find_sum_of_arithmetic_sequence(requested_terms: int, first_term: int, common_difference: int) -> int:
"""
Finds the sum of an arithmetic sequence
:param requested_terms:
:param first_term:
:param common_difference:
:return: the sum of an arithmetic sequence
"""
return int((requested_terms / 2) * (2 * first_term + (requested_terms - 1) * common_difference)) | fc4f3fec94737674096ff9e0a22c6001690c6101 | 28,742 |
def render_compiled(compiled, variables):
"""Render from compiled template with interpolated variables."""
template, partials = compiled
return template(variables, partials=partials) | 3c13acf96ac3b59bcd5c2a1f3f3dbc19fd210c80 | 28,753 |
def requirements(section=None):
"""Helper for loading dependencies from requirements files."""
if section is None:
filename = "requirements.txt"
else:
filename = f"requirements-{section}.txt"
with open(filename) as file:
return [line.strip() for line in file] | 2e4b7f9f4d8c8d0cd3cfa749f03785d2bba6a26b | 28,754 |
def route(rule, **options):
"""Like :meth:`Flask.route` but for nereid.
.. versionadded:: 3.0.7.0
Unlike the implementation in flask and flask.blueprint route decorator does
not require an existing nereid application or a blueprint instance. Instead
the decorator adds an attribute to the method called `_url_rules`.
.. code-block:: python
:emphasize-lines: 1,7
from nereid import route
class Product:
__name__ = 'product.product'
@classmethod
@route('/product/<uri>')
def render_product(cls, uri):
...
return 'Product Information'
"""
def decorator(f):
if not hasattr(f, '_url_rules'):
f._url_rules = []
f._url_rules.append((rule, options))
return f
return decorator | c7f33af4e8fa10090e5b6a90532707fd59688885 | 28,755 |
def remap_column_names(data_names, name_map):
""" Remap data array column names using dictionary map.
For each column name that matches a key in name_map, the column name is replaced with
that key's value.
Args:
data_names (str, nx1 tuple): list of column names taken from structured np array
name_map (dict): dictionary with keys matching history file column names to be
replaced by the corresponding values
Returns:
(str, nx1 tuple): New list of column names
"""
return tuple(name_map.get(name, name) for name in data_names)
# get(name, name) means it will keep the current name if not found in dictionary | 4863d8b9ce1986df4bd85f543014208428ea85cb | 28,756 |
def radical(n, *args, **kwds):
"""
Return the product of the prime divisors of n.
This calls ``n.radical(*args, **kwds)``. If that doesn't work, it
does ``n.factor(*args, **kwds)`` and returns the product of the prime
factors in the resulting factorization.
EXAMPLES::
sage: radical(2 * 3^2 * 5^5)
30
sage: radical(0)
Traceback (most recent call last):
...
ArithmeticError: Radical of 0 not defined.
sage: K.<i> = QuadraticField(-1)
sage: radical(K(2))
i + 1
The next example shows how to compute the radical of a number,
assuming no prime > 100000 has exponent > 1 in the factorization::
sage: n = 2^1000-1; n / radical(n, limit=100000)
125
"""
try:
return n.radical(*args, **kwds)
except AttributeError:
return n.factor(*args, **kwds).radical_value() | 846549ba03bcc38cd25a49cb5a2bd51c16cc2e54 | 28,762 |
def findPeakCluster(index, build_list, df, peak_gap):
"""
Recursively finds members of a peak cluster starting from the peak with
the smallest size.
Parameters
----------
index : TYPE Integer
DESCRIPTION. The index of df that corresponds to the
rows (i.e. peaks) that are clustered (within peak_gap
of each other) and awaiting to be processed to give
fewer peaks.
build_list : TYPE List
DESCRIPTION. List of index of peaks in peak clusters
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
peak_gap : TYPE Integer
DESCRIPTION. User-supplied. A pair of peaks within peak_gap of
each other will be processed to give one peak.
Returns
-------
TYPE List
A list of index corresponding to peaks in a peak cluster.
"""
# Return build_list if we reach the end of dataframe
if index == max(df.index):
return list(set(build_list))
# Stop recursion when next peak is not within peak_gap of current peak
elif df.loc[index + 1, "Size"] - df.loc[index, "Size"] > peak_gap:
return list(set(build_list))
# Recursion to next peak
else:
build_list += [index, index + 1]
return findPeakCluster(index + 1, build_list, df, peak_gap) | f808d67a234df45f117b653ca16890ce9c4e982e | 28,769 |
def clamp(low, high, x):
"""
Clamps a number to the interval [low, high]
"""
return low if x < low else (high if x > high else x) | 1393af569f83369a7aa0c22cfaded9ed8e9d112a | 28,771 |
def valid(neighbor, rows, columns):
"""Find out if neighbor cell is valid
Args:
neighbor (List of integers): Neighboring cell position
rows (int): Number of rows on the board
columns (int): Number of columns on the board
Returns:
[boolean]: True if valid, False otherwise
"""
if neighbor[0] < 0 or neighbor[1] < 0:
return False
if neighbor[0] >= rows or neighbor[1] >= columns:
return False
return True | 5f832c2a8b06aa98e378c1614078e36f8a9fc2e5 | 28,773 |
def get_sample_name(filename):
"""Extract sample name from filename."""
return filename.split('/')[-1].split('.')[0] | 378dd429f0796930bfeb24e3a8fa51bcde32fb60 | 28,777 |
import re
def getTags(text):
""" Grep the tags in text and return them as a dict """
# 'Name' 'Version' 'Type' 'Author' 'Origin' 'Category' 'ID'
# 'URL' 'Desc' 'Date' 'Flags' 'RefCount' 'Signature' 'MASFile'
# 'BaseSignature' 'MinVersion'
# Name=134_JUDD
tags = {}
for line in text:
m = re.match(r'(.*) *= *(.*)', line)
if m:
tags[m.group(1)] = m.group(2)
#print(m.group(1), m.group(2))
return tags | 92a536b36e0c9ea78bef1ffd97ff69d4e448a0ac | 28,778 |
def _filter_none_elems_from_dict(dict_: dict):
""" Given a dict (call it m), returns a new dict which contains all the
non-null (non-none) elements of m.
Args:
dict_: The dict to return the non-null elements of.
Returns:
A new dict with all the non-null elements of <dict_>.
"""
return {k: v for k, v in dict_.items() if v is not None} | a8b95a1e9f36e90b5c96a4e95b05fcba069d4a93 | 28,779 |
def check_layout_layers(layout, layers):
"""
Check the layer widget order matches the layers order in the layout
Parameters
----------
layout : QLayout
Layout to test
layers : napari.components.LayerList
LayersList to compare to
Returns
----------
match : bool
Boolean if layout matches layers
"""
layers_layout = [
layout.itemAt(2 * i - 1).widget().layer
for i in range(len(layers), 0, -1)
]
return layers_layout == list(layers) | 7d5c3ed65e0588f430341345d6e0fb0856aacaeb | 28,784 |
def isSignedOff(message):
"""
Check whether a commit message contains Signed-off-by tag
"""
for line in message.splitlines():
if line.startswith('Signed-off-by'):
return True
return False | 79248d9438ac1fc1cbce18ae8af236f0960d42e2 | 28,785 |
def read_file(file_path):
"""
Reads input file.
Args:
file_path (str): path of input file.
Returns:
list: content of the file.
"""
with open(file_path, 'r') as file:
return file.read().strip().split('\n') | 27008dcbd9bd9336240e68c9514ae6170c18df78 | 28,795 |
import json
def read_json_file(filename):
"""Read JSON file
Read JSON file as dictionary
Args:
filename(str): Filename
Returns:
dict: Dictionary with file content
"""
with open(filename, 'r') as json_file:
json_str = json_file.read()
try:
parsed_json = json.loads(json_str)
except json.JSONDecodeError as err:
raise Exception(f"Could not read: {filename}; "
f"Error: {err}") from err
return parsed_json | dbc7360d44bb964f1d59186806f552c844d311e1 | 28,798 |
def FindFieldDefByID(field_id, config):
"""Find the specified field, or return None."""
for fd in config.field_defs:
if fd.field_id == field_id:
return fd
return None | b49766575864dac1d65a8a245827d00fcd345be1 | 28,802 |
def as_an_int(num1):
"""Returns the integer value of a number passed in."""
return int(num1) | 32971d8def38efacb150ff511e400700f78c0907 | 28,808 |
import json
def dump_json(obj, format = 'readable'):
"""Dump json in readable or parseable format"""
# Parseable format has no indentation
indentation = None
sep = ':'
if format == 'readable':
indentation = 4
sep += ' '
return json.dumps(obj, indent = indentation, separators = (',', sep),
sort_keys = True) | 2b6efb9202def6651bf4deb1ebc5e34f44c6c438 | 28,810 |
def format_element(eseq):
"""Format a sequence element using FASTA format (split in lines of 80 chr).
Args:
eseq (string): element sequence.
Return:
string: lines of 80 chr
"""
k = 80
eseq = [eseq[i:min(i + k, len(eseq))]for i in range(0, len(eseq), k)]
return("\n".join(eseq)) | 1e492b341d5ecea3f44ac9fe7246964a98f016a7 | 28,811 |
from typing import List
def _calculate_german_iban_checksum(
iban: str, iban_fields: str = "DEkkbbbbbbbbcccccccccc"
) -> str:
"""
Calculate the checksum of the German IBAN format.
Examples
--------
>>> iban = 'DE41500105170123456789'
>>> _calculate_german_iban_checksum(iban)
'41'
"""
numbers: List[str] = [
value
for field_type, value in zip(iban_fields, iban)
if field_type in ["b", "c"]
]
translate = {
"0": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
}
for i in range(ord("A"), ord("Z") + 1):
translate[chr(i)] = str(i - ord("A") + 10)
for val in "DE00":
translated = translate[val]
for char in translated:
numbers.append(char)
number = sum(int(value) * 10 ** i for i, value in enumerate(numbers[::-1]))
checksum = 98 - (number % 97)
return str(checksum) | dd9edcc1047caae8822d7a70f02d934db67504db | 28,813 |
def w_acoustic_vel(T,S,Z,lat):
""" Calculate acoustic velocity of water dependent on water depth,
temperature, salinity and latitude. After Leroy et al. (2008)
J. Acoust. Soc. Am. 124(5). """
w_ac_vel = 1402.5 + 5 * T - 5.44e-2 * T**2 + 2.1e-4 * T**3 + 1.33 * S - 1.23e-2 * S * T + 8.7e-5 * S * T**2 + 1.56e-2 * Z + 2.55e-7 * Z**2 - 7.3e-12 * Z**3 + 1.2e-6 * Z * (lat - 45) - 9.5e-13 * T * Z**3 + 3e-7 * T**2 * Z + 1.43e-5 * S * Z
return w_ac_vel | d1b8cac0c2bb65d76eb0125faf981a5b1ad1e31e | 28,817 |
def find_service_by_type(cluster, service_type):
"""
Finds and returns service of the given type
@type cluster: ApiCluster
@param cluster: The cluster whose services are checked
@type service_type: str
@param service_type: the service type to look for
@return ApiService or None if not found
"""
for service in cluster.get_all_services():
if service.type == service_type:
return service
return None | fd04adce95c71499e17a143d7c94c0cf1aa603c9 | 28,818 |
def resolve_stack_name(source_stack_name, destination_stack_path):
"""
Returns a stack's full name.
A dependancy stack's name can be provided as either a full stack name, or
as the file base name of a stack from the same environment.
resolve_stack_name calculates the dependency's stack's full name from this.
:param source_stack_name: The name of the stack with the parameter to be \
resolved.
:type source_stack_name: str
:param destination_stack_path: The full or short name of the depenency \
stack.
:type destination_stack_path: str
:returns: The stack's full name.
:rtype: str
"""
if "/" in destination_stack_path:
return destination_stack_path
else:
source_stack_base_name = source_stack_name.rsplit("/", 1)[0]
return "/".join([source_stack_base_name, destination_stack_path]) | ffee297a7ce1f25cecd1832ced3c8dc9fd729e90 | 28,823 |
def get_induced_subgraph(graph, nodes):
"""Get the nodes-induced subgraph G[S] for a graph G and a subset of nodes S"""
return {node: graph[node].intersection(nodes) for node in nodes} | 58955db6d38dae86f24b756a6bfc67886300eaf5 | 28,826 |
def merge_two_lists(list_one, list_two):
"""
Function merge two lists in a list. Then return the sorted list.
Input lists don't change.
:rtype: list
:return: sorted list
"""
# Copy lists by value
temp_list_one = list_one[:]
temp_list_two = list_two[:]
mergedlist = []
while temp_list_one and temp_list_two:
if temp_list_one[0] <= temp_list_two[0]:
mergedlist.append(temp_list_one.pop(0))
else:
mergedlist.append(temp_list_two.pop(0))
while temp_list_one:
mergedlist.append(temp_list_one.pop(0))
while temp_list_two:
mergedlist.append(temp_list_two.pop(0))
return mergedlist | bd0bae58ad54725b55da64404b2635e71881907f | 28,828 |
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
lists = []
for i in stories:
for triggers in triggerlist:
if triggers.evaluate(i)==True:
lists.append(i)
# This is a placeholder
return lists | a91aa78452fb0a75753a0687a7938a565b2b87f0 | 28,829 |
def remove_namespace(tree):
"""
Namespace can make Splunk output ugly. This function removes namespace from all elements
e.g element.tag = '{http://schemas.microsoft.com/win/2004/08/events/event}System'
:param tree: xml ElementTree Element
:return: xml ElementTree Element with namespace removed
"""
# Remove namespace
for element in tree.getiterator():
try:
if element.tag.startswith('{'):
element.tag = element.tag.split('}')[1]
except:
pass
return tree | b2f4431ffcd33b26321271ea55da24386c10adac | 28,836 |
def elide_sequence(s, flank=5, elision="..."):
"""Trims the middle of the sequence, leaving the right and left flanks.
Args:
s (str): A sequence.
flank (int, optional): The length of each flank. Defaults to five.
elision (str, optional): The symbol used to represent the part trimmed. Defaults to '...'.
Returns:
str: The sequence with the middle replaced by ``elision``.
Examples:
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
'ABCDE...VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=3)
'ABC...XYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", elision="..")
'ABCDE..VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12)
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12, elision=".")
'ABCDEFGHIJKL.OPQRSTUVWXYZ'
"""
elided_sequence_len = flank + flank + len(elision)
if len(s) <= elided_sequence_len:
return s
return s[:flank] + elision + s[-flank:] | df318fec488dec46e0f99a0c035b0a962be59844 | 28,840 |
from typing import Mapping
def find_path(g: Mapping, src, dst, path=None):
"""find a path from src to dst nodes in graph
>>> g = dict(a='c', b='ce', c='abde', d='c', e=['c', 'z'], f={})
>>> find_path(g, 'a', 'c')
['a', 'c']
>>> find_path(g, 'a', 'b')
['a', 'c', 'b']
>>> find_path(g, 'a', 'z')
['a', 'c', 'b', 'e', 'z']
>>> assert find_path(g, 'a', 'f') == None
"""
if path == None:
path = []
path = path + [src]
if src == dst:
return path
if src not in g:
return None
for node in g[src]:
if node not in path:
extended_path = find_path(g, node, dst, path)
if extended_path:
return extended_path
return None | ea3c48ef552b1393448c36579b11c5bc09c5cced | 28,841 |
from typing import Any
from typing import Dict
def metadata(user_model: Any) -> Dict:
"""
Call the user model to get the model metadata
Parameters
----------
user_model
User defined class instance
Returns
-------
Model Metadata
"""
if hasattr(user_model, "metadata"):
return user_model.metadata()
else:
return {} | 6fa8df5a8d842c8fbccfa6d8447732da4263a124 | 28,848 |
def rgb_clamp(colour_value):
"""
Clamp a value to integers on the RGB 0-255 range
"""
return int(min(255, max(0, colour_value))) | f4dce9746fecd32cb432f03a056451a34d6f265a | 28,852 |
import pyarrow
def _is_column_extension_type(ca: "pyarrow.ChunkedArray") -> bool:
"""Whether the provided Arrow Table column is an extension array, using an Arrow
extension type.
"""
return isinstance(ca.type, pyarrow.ExtensionType) | 606c2fad0486df8f4995925021111eb1cb78f3c4 | 28,853 |
import string
import random
def generate_password(length: int) -> str:
"""Return random password of specified length."""
choice = string.ascii_letters + string.digits
password = ""
for character in random.choices(choice, k=length):
password += character
return password | 3ef64b60ea893fe37aad5ca41cad6f1363f48412 | 28,864 |
def wrap_it_in_a_link(html, url):
""" Wrap a link around some arbitrary html
Parameters:
html - the html around which to wrap the link
url - the URL to link to
Returns:
The same html but with a link around it
"""
return "<a href='" + url + "'>" + html + "</a>" | 606ca401982a4f5474e03063e26e402e690557fc | 28,871 |
def is_even(number: int):
"""Return True if the number is even and false otherwise"""
return number % 2 == 0 | 0fe6ff55e5a84caedda3523bad02b8d144ab0657 | 28,873 |
def get_frame_IDs(objects_archive, start, end, every):
"""
Returns list of ID numbers of the objects identified in each frame.
Parameters
----------
objects_archive : dictionary
Dictionary of objects identified in a video labeled by ID number
start, end, every : ints
start = index of first frame to analyze; end = index of last frame
to analyze; every = analyze every `every` frame (e.g., if every = 3,
analyzes every 3rd frame)
Returns
-------
frame_IDs : dictionary
Dictionary indexed by frame number in the video. Each entry is
a list of the ID numbers of the objects identified in that frame.
"""
# initializes dictionary of IDs for each frame
frame_IDs = {}
for f in range(start, end, every):
frame_IDs[f] = []
# loads IDs of objects found in each frame
for ID in objects_archive.keys():
obj = objects_archive[ID]
frames = obj.get_props('frame')
for f in frames:
frame_IDs[f] += [ID]
return frame_IDs | 4826a4d226460e07fbd015e59a052d9792e8ac6a | 28,875 |
def plan_exists(plan):
"""This function can be used to check if a plan trajectory was computed.
Parameters
----------
plan : :py:obj:`!moveit_msgs.msg.RobotTrajectory`
The computed robot trajectory.
Returns
-------
:py:obj:`bool`
Bool specifying if a trajectory is present
"""
# Check if a trajectory is present on the plan object
if not all(
[
not (
len(plan.joint_trajectory.points) >= 1
), # True when no trajectory was found
not (
len(plan.multi_dof_joint_trajectory.points) >= 1
), # True when no trajectory was found
]
):
# A trajectory was found
return True
else:
# No trajectory was found
return False | e2eb384119ed55cb3561e9485bddc3a221a03f92 | 28,876 |
def suggest_patience(epochs: int) -> int:
"""Current implementation: 10% of total epochs, but can't be less than 5."""
assert isinstance(epochs, int)
return max(5, round(.1 * epochs)) | e1631576d63dc3b62df636fb555739158035a25a | 28,882 |
def measure_diff(fakes, preds):
"""Measures difference between ground truth and prediction
fakes (float array): generated "true" global scores
preds (list list): list of [video_id: int, criteria_name: str,
score: float, uncertainty: float]
in same order
Returns:
(float): 100 times mean squared distance
between ground truth and predicted score
"""
diff = 0
for fake, pred in zip(fakes, preds):
f, p = round(fake, 2), pred[2]
diff += 100 * abs(f - p) ** 2
return diff / len(preds) | 824052778fe02620d52ad0ff5987e1f62363d7fc | 28,884 |
def stringify(vals):
"""Return a string version of vals (a list of object implementing __str__)
Args:
vals (List[any]): List of object that implements __str__
Returns:
str: A string representation
"""
if type(vals) == list:
return '_'.join([str(e) for e in vals])
else:
return str(vals) | cbd681b2435a919a91a7eeb905ca3279a9baeea6 | 28,885 |
import logging
def get_stream_handler(
formatter: logging.Formatter, level: int
) -> logging.StreamHandler:
"""
Create a ready-to-go stream handler for a Logger.
Parameters
----------
formatter : logging.Formater
Formatter to apply to the handler.
level : int
Level to apply to the stream handler.
Returns
-------
logging.StreamHandler
"""
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(level)
return handler | 2c6de5bec9fbcb3f7f76c1c21c0db2091b649c96 | 28,886 |
def print_to_log(message, log_file):
"""Append a line to a log file
:param message: The message to be appended.
:type message: ``str``
:param log_file: The log file to write the message to.
:type log_file: ``Path``
"""
with open(log_file, "a") as file_handle:
message.rstrip()
file_handle.write(message+"\n")
return 0 | cc79449ac082e3e1053e43bfe6d87c7b617c1923 | 28,887 |
def get_prefix_repr(prefix, activities):
"""
Gets the numeric representation (as vector) of a prefix
Parameters
-------------
prefix
Prefix
activities
Activities
Returns
-------------
prefix_repr
Representation of a prefix
"""
this_pref_repr = [0] * len(activities)
for act in prefix:
i = activities.index(act)
this_pref_repr[i] = this_pref_repr[i] + 1
return tuple(this_pref_repr) | 3a194ba988ed5d1889e64df011cd7437e0354ce7 | 28,892 |
def sanitizeFilename(filename: str) -> str:
"""
Remove invalid characters <>:"/\\|?* from the filename.
"""
result = ''
for c in filename:
if c not in '<>:"/\\|?*':
result += c
return result | 90068166ee5ca26cbe4713f2d4edb96be92b961c | 28,893 |
def format_time_delta(delta):
"""
Given a time delta object, convert it into a nice, human readable string.
For example, given 290 seconds, return 4m 50s.
"""
d = delta.days
h = delta.seconds / 3600
m = (delta.seconds % 3600) / 60
s = delta.seconds % 60
if d > 0:
return '%sd %sh' % (d, h)
elif h > 0:
return '%sh %sm' % (h, m)
else:
return '%sm %ss' % (m, s) | 8b795cdb716f78dc7c0be5b7454a9d63cd703463 | 28,899 |
import re
def check_url(url):
""" Check if url is actually a valid YouTube link using regexp.
:param url: string url that is to be checked by regexp
:return: boolean True if the url matches the regexp (it is a valid YouTube page), otherwise False
"""
youtube = re.compile(r'(https?://)?(w{3}\.)?(youtube.com/watch\?v=|youtu.be/).{11}')
return True if youtube.match(url) else False | ab0874af82ebe4d7e9435a57a96feb8339da9e70 | 28,901 |
def termcap_distance(ucs, cap, unit, term):
"""termcap_distance(S, cap, unit, term) -> int
Match horizontal distance by simple ``cap`` capability name, ``cub1`` or
``cuf1``, with string matching the sequences identified by Terminal
instance ``term`` and a distance of ``unit`` *1* or *-1*, for right and
left, respectively.
Otherwise, by regular expression (using dynamic regular expressions built
using ``cub(n)`` and ``cuf(n)``. Failing that, any of the standard SGR
sequences (``\033[C``, ``\033[D``, ``\033[nC``, ``\033[nD``).
Returns 0 if unmatched.
"""
assert cap in ('cuf', 'cub')
# match cub1(left), cuf1(right)
one = getattr(term, '_%s1' % (cap,))
if one and ucs.startswith(one):
return unit
# match cub(n), cuf(n) using regular expressions
re_pattern = getattr(term, '_re_%s' % (cap,))
_dist = re_pattern and re_pattern.match(ucs)
if _dist:
return unit * int(_dist.group(1))
return 0 | 731c4e02378a626c6a3c3dd7ee30848d88cc2f8a | 28,905 |
def _bytes_to_string(value: bytes) -> str:
"""Decode bytes to a UTF-8 string.
Args:
value (bytes): The bytes to decode
Returns:
str: UTF-8 representation of bytes
Raises:
UnicodeDecodeError
"""
return value.decode(encoding="utf-8") | b33508db0184f21854c734bbef126ec39d95c7b1 | 28,907 |
def merge(intervals):
""" Turns `intervals` [[0,2],[1,5],[7,8]] to [[0,5],[7,8]].
"""
out = []
for i in sorted(intervals, key=lambda i: i[0]):
if out and i[0] <= out[-1][1]:
out[-1][1] = max(out[-1][1], i[1])
else:
out += i,
return out | b0a9eb30f81132f0af568f442465bdd9ce19ab83 | 28,908 |
def date_day_of_week(date):
"""Return the day of the week on which the given date occurred."""
day_of_week = date.strftime('%A')
return day_of_week | 14e5a60b3089f2ec1aed18f71929c73ea1b50731 | 28,909 |
import torch
def scatter_embs(input_embs, inputs):
"""
For inputs that have 'input_embs' field passed in, replace the entry in input_embs[i] with the entry
from inputs[i]['input_embs']. This is useful for the Integrated Gradients - for which the predict is
called with inputs with 'input_embs' field which is an interpolation between the baseline and the real calculated
input embeddings for the sample.
:param input_embs: tensor of shape B x S x h of input embeddings according to the input sentences.
:param inputs: list of dictionaries (smaples), for which the 'input_embs' field might be specified
:return: tensor of shape B x S x h with embeddings (if passed) from inputs inserted to input_embs
"""
interp_embeds = [(ind, ex.get('input_embs')) for ind, ex in enumerate(inputs)]
for ind, embed in interp_embeds:
if embed is not None:
input_embs[ind] = torch.tensor(embed)
return input_embs | d0966c15f51b1b692995cd25076e455bea8a2b8a | 28,910 |
def sanitizeStr(data):
"""
Escape all char that will trigger an error.
Parameters
----------
data: str
the str to sanitize
Returns
-------
str
The sanitized data.
"""
data = " ".join(data.split())
new_msg = []
for letter in data:
if letter in ['"',"\\"]:
new_msg.append("\\")
new_msg.append(letter)
return "".join(new_msg) | 6fcd1455a01997d526cfd178d98ee3e9eca3c888 | 28,915 |
from typing import OrderedDict
def group_unique_values(items):
"""group items (pairs) into dict of lists.
Values in each group stay in the original order and must be unique
Args:
items: iterable of key-value pairs
Returns:
dict of key -> lists (of unique values)
Raises:
ValueError if value in a group is a duplicate
"""
result_lists = OrderedDict()
result_sets = OrderedDict()
for key, val in items:
if key not in result_lists:
result_lists[key] = []
result_sets[key] = set()
if val in result_sets[key]:
raise ValueError("Duplicate value: %s" % val)
result_sets[key].add(val)
result_lists[key].append(val)
return result_lists | cd25d657117b34fe408c27149ddf034f3956383d | 28,917 |
import io
import csv
def scanlist_csv() -> io.StringIO:
"""
Generates a placeholder ScanList.csv
"""
header = [
"No.",
"Scan List Name",
"Scan Channel Member",
"Scan Channel Member RX Frequency",
"Scan Channel Member TX Frequency",
"Scan Mode",
"Priority Channel Select",
"Priority Channel 1",
"Priority Channel 1 RX Frequency",
"Priority Channel 1 TX Frequency",
"Priority Channel 2",
"Priority Channel 2 RX Frequency",
"Priority Channel 2 TX Frequency",
"Revert Channel",
"Look Back Time A[s]",
"Look Back Time B[s]",
"Dropout Delay Time[s]",
"Dwell Time[s]",
]
sio = io.StringIO()
writer = csv.writer(sio, dialect="d878uvii")
writer.writerow(header)
return sio | c5c4a8e6d860c3984c7c4af4ab869ca521b1c8cd | 28,918 |
def getTableRADecKeys(tab):
"""Returns the column names in the table in which RA, dec coords are stored, after trying a few possible
name variations.
Args:
tab (:obj:`astropy.table.Table`): The table to search.
Returns:
Name of the RA column, name of the dec. column
"""
RAKeysToTry=['ra', 'RA', 'RADeg']
decKeysToTry=['dec', 'DEC', 'decDeg', 'Dec']
RAKey, decKey=None, None
for key in RAKeysToTry:
if key in tab.keys():
RAKey=key
break
for key in decKeysToTry:
if key in tab.keys():
decKey=key
break
if RAKey is None or decKey is None:
raise Exception("Couldn't identify RA, dec columns in the supplied table.")
return RAKey, decKey | 93807fe56826ac680605b0494af60ddc6a5014a8 | 28,920 |
def format_channel_link(name: str, channel_id: str):
"""
Formats a channel name and ID as a channel link using slack control sequences
https://api.slack.com/docs/message-formatting#linking_to_channels_and_users
>>> format_channel_link('general', 'C024BE7LR')
'<#C024BE7LR|general>'
"""
return '<#{}|{}>'.format(channel_id, name) | 2745a21960c7a2200e07e28bc5644a09f609b8ba | 28,927 |
import pickle
def load_pickle(filepath_str):
"""
Load pickled results.
Inputs:
filepath_str: path to the pickle file to load
Returns:
loaded pickle file
"""
with open(filepath_str, 'rb') as pickle_file:
return pickle.load(pickle_file) | a7cd817327e928a8d1f3ff3290923e9b878d2a06 | 28,928 |
def local_ij_delta_to_class(local_ij_delta):
"""
:param local_ij_delta: tuple (i, j) returned from local_ij_delta
:return: a value 0-5 for the each of the possible adjecent hexagons, or -1 if
the (i,j) tuple is representing a non-adjecent hexagon coordinate
"""
if (local_ij_delta == (0, 1)):
return 0
elif (local_ij_delta == (1, 0)):
return 1
elif (local_ij_delta == (0, -1)):
return 2
elif (local_ij_delta == (-1, 0)):
return 3
elif (local_ij_delta == (-1, -1)):
return 4
elif (local_ij_delta == (1, 1)):
return 5
else:
return -1 | ed16645346353304c62b4c6bd7e73ee7b3fb2ead | 28,930 |
def prior_creator(vector, priors_lowbounds, priors_highbounds):
"""
Generates flat priors between *priors_lowbounds and *priors_highbounds for parameters in *vector
:param vector: array containing parameters optimized within flat priors
:param priors_lowbounds: array containing lower bound of flat priors
:param priors_highbounds: array containing higher bound of flat priors
:return: selection. selection = True if all *vector entries are within their flat prior. Otherwise selection = False
"""
selection = True
for i, entry in enumerate(vector):
if entry > priors_lowbounds[i] and entry < priors_highbounds[i]:
selection = selection * True
else:
selection = selection * False
return selection | 6351b1946daf2f956b45dc181d7192aa2e70fbbf | 28,934 |
def to_string(pkt):
"""Pretty-prints a packet."""
name = pkt._name
detail = ''
if name == 'AppleMIDIExchangePacket':
detail = '[command={} ssrc={} name={}]'.format(
pkt.command.decode('utf-8'), pkt.ssrc, pkt.name
)
elif name == 'MIDIPacket':
items = []
for entry in pkt.command.midi_list:
command = entry.command
if command in ('note_on', 'note_off'):
items.append('{} {} {}'.format(command, entry.params.key, entry.params.velocity))
elif command == 'control_mode_change':
items.append(
'{} {} {}'.format(command, entry.params.controller, entry.params.value)
)
else:
items.append(command)
detail = ' '.join(('[{}]'.format(i) for i in items))
return '{} {}'.format(name, detail) | ecdd0dfe3dd9bb2cb24351567520fd821619e19c | 28,939 |
def delete_after(list, key):
"""
Return a list with the item after the first occurrence of the key
(if any) deleted.
"""
if list == ():
return ()
else:
head1, tail1 = list
if head1 == key:
# Leave out the next item, if any
if tail1 == ():
return list
else:
head2, tail2 = tail1
return (head1, tail2)
else:
return (head1, delete_after(tail1, key)) | 047ad50c25c1c6a2f2d25c5fba1d813b45bd1e17 | 28,945 |
def repeat(a, repeats, axis=None):
"""Repeat arrays along an axis.
Args:
a (cupy.ndarray): Array to transform.
repeats (int, list or tuple): The number of repeats.
axis (int): The axis to repeat.
Returns:
cupy.ndarray: Transformed array with repeats.
.. seealso:: :func:`numpy.repeat`
"""
return a.repeat(repeats, axis) | 4a7a9382b9aa125dc66c2aaf8da0567c49d9aaf3 | 28,948 |
def getsupportedcommands(qhlp, dostrip=True):
"""Parse qHLP answer and return list of available command names.
@param qhlp : Answer of qHLP() as string.
@param dostrip : If True strip first and last line from 'qhlp'.
@return : List of supported command names (not function names).
"""
qhlp = qhlp.splitlines()
if dostrip:
qhlp = qhlp[1:-1]
cmds = []
for line in qhlp:
line = line.upper()
cmds.append(line.split()[0].strip())
return cmds | 90cce0dd689f836b57788632681d33f3f717d239 | 28,955 |
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp) | 3dccd2acc324d8b748fe95290c49d961f1c636e1 | 28,957 |
def int2ascii(i: int) -> str:
"""Convert an integer to an ASCII character.
Args:
i (int): Integer value to be converted to ASCII text.
Note:
The passed integer value must be <= 127.
Raises:
ValueError: If the passed integer is > 127.
Returns:
str: The ASCII character associated to the passed integer.
"""
if i > 127:
raise ValueError('The passed integer value must be <= 127.')
return chr(i) | f46ed05d425f9277ea6c97a0f8bafb070b15091c | 28,958 |
def get_from_dict_or_default(key, dict, default):
"""Returns value for `key` in `dict` otherwise returns `default`"""
if key in dict:
return dict[key]
else:
return default | 14ad53c4cac7f554cfa537edeaf7a11e1e8ecac3 | 28,963 |
import math
def distance_formula(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Distance between two points is defined as the square root of (x2 - x1)^2 + (y2 - y1) ^ 2
:raises TypeError: Any of the values are non-numeric or None.
"""
return math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2)) | 5c1a4706365a2347bc23d7efcc74caa003405c0e | 28,965 |
def validate_asn(asn):
"""
Validate the format of a 2-byte or 4-byte autonomous system number
:param asn: User input of AS number
:return: Boolean: True if valid format, False if invalid format
"""
try:
if "." in str(asn):
left_asn, right_asn = str(asn).split(".")
asn_ok = (0 <= int(left_asn) <= 65535) and \
(0 <= int(right_asn) <= 65535)
else:
asn_ok = 0 <= int(asn) <= 4294967295
except ValueError:
asn_ok = False
return asn_ok | 14806fc04132c06dbb75abb2ceefd0340b7845e6 | 28,967 |
import math
def get_points_distance(point1, point2):
"""
Gets the distance between two points
:param point1: tuple with point 1
:param point2: tuple with point 2
:return: int distance
"""
return int(math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)) | 40af84836715b49ba531fe5113e40230110d49b9 | 28,971 |
from typing import Tuple
import unicodedata
def validate_ae(value: str) -> Tuple[bool, str]:
"""Return ``True`` if `value` is a conformant **AE** value.
An **AE** value:
* Must be no more than 16 characters
* Leading and trailing spaces are not significant
* May only use ASCII characters, excluding ``0x5C`` (backslash) and all
control characters
Parameters
----------
value : str
The **AE** value to check.
Returns
-------
Tuple[bool, str]
A tuple of (bool, str), with the first item being ``True`` if the
value is conformant to the DICOM Standard and ``False`` otherwise and
the second item being a short description of why the validation failed
or ``''`` if validation was successful.
"""
if not isinstance(value, str):
return False, "must be str"
if len(value) > 16:
return False, "must not exceed 16 characters"
# All characters use ASCII
if not value.isascii():
return False, "must only contain ASCII characters"
# Unicode category: 'Cc' is control characters
invalid = [c for c in value if unicodedata.category(c)[0] == 'C']
if invalid or '\\' in value:
return False, "must not contain control characters or backslashes"
return True, '' | aee3ec59ea1965bd745f1527368053c5c04e5c4b | 28,972 |
def state2bin(s, num_bins, limits):
"""
:param s: a state. (possibly multidimensional) ndarray, with dimension d =
dimensionality of state space.
:param num_bins: the total number of bins in the discretization
:param limits: 2 x d ndarray, where row[0] is a row vector of the lower
limit of each discrete dimension, and row[1] are corresponding upper
limits.
Returns the bin number (index) corresponding to state s given a
discretization num_bins between each column of limits[0] and limits[1].
The return value has same dimensionality as ``s``. \n
Note that ``s`` may be continuous. \n
\n
Examples: \n
s = 0, limits = [-1,5], num_bins = 6 => 1 \n
s = .001, limits = [-1,5], num_bins = 6 => 1 \n
s = .4, limits = [-.5,.5], num_bins = 3 => 2 \n
"""
if s == limits[1]:
return num_bins - 1
width = limits[1] - limits[0]
if s > limits[1]:
print(
"Tools.py: WARNING: ", s, " > ", limits[1], ". Using the chopped value of s"
)
print("Ignoring", limits[1] - s)
s = limits[1]
elif s < limits[0]:
print(
"Tools.py: WARNING: ", s, " < ", limits[0], ". Using the chopped value of s"
)
s = limits[0]
return int((s - limits[0]) * num_bins / (width * 1.0)) | c2a0cb48864e58481681e8c72074e618dda7ddb8 | 28,973 |
import pickle
def load_db(pathname):
"""
returns the stored database from a pickle file
Parameters
----------
pathname: string
Returns
-------
database: dictionary mapping names to profiles
"""
with open(pathname, mode="rb") as opened_file:
database = pickle.load(opened_file)
return database | 2ca73cfaf500fd6a841cfb9dc12c3b21d320ac69 | 28,974 |
def mkcol_mock(url, request):
"""Simulate collection creation."""
return {"status_code": 201} | 313fb40302c282d102d6846980ce09ea3600c50c | 28,989 |
def dict2bibtex(d):
""" Simple function to return a bibtex entry from a python dictionary """
outstring = '@' + d.get('TYPE','UNKNOWN') + '{' + d.get('KEY','') + ',\n'
kill_keys = ['TYPE','KEY','ORDER']
top_keys = ['AUTHOR','TITLE','YEAR']
top_items = []
rest_items = []
for k in d.keys():
if k in top_keys:
top_items = top_items + [ ( k , d[k] )]
elif not k in kill_keys:
rest_items = rest_items + [ ( k , d[k] )]
rest_items.sort()
for k in top_items + rest_items:
outstring = outstring + '\t' + k[0] + ' = {' + k[1] + '},\n'
outstring = outstring + '}\n\n'
return outstring | 1ffbc9ec9754acf9904c959be05e0585302435a3 | 28,990 |
def generate_system_redaction_list_entry(newValue):
"""Create an entry for the redaction list for a redaction performed by the system."""
return {
'value': newValue,
'reason': 'System Redacted',
} | 22d7831106c6dd5350a3c86b51facc273835a1e6 | 28,994 |
from typing import Union
def _print_result(
start_quantity: Union[int, float],
start_unit: str,
end_unit: str,
end_quantity: Union[int, float],
) -> str:
"""
Function to create final output string for conversion.
:param start_quantity: Integer or float starting quantity which needed conversion.
:param start_unit: Initial unit type of integer or float starting quantity.
:param end_unit: Ending unit type of integer or float quantity.
:param end_quantity: Integer or float of converted starting quantity from start unit to end unit.
:return: String of values concatenated in user friendly message.
"""
if end_quantity < 0.000001:
output = "Value smaller than decimal precision 6. Cannot output."
else:
output = f"```{start_quantity} {start_unit} = {end_quantity} {end_unit}```"
return output | 26228ea4c3064894748e5352ea117393031ee79b | 28,995 |
import importlib
def get_version(version_module_name):
"""Load currently declared package version."""
version_module = importlib.import_module(version_module_name)
# always reload
importlib.reload(version_module)
version = f"{version_module.__version__}"
print(f"version is {version}")
return version | 156d7e18e96ea0716011e0ca0536d38eaa078b9e | 28,996 |
def _expand_task_collection(factory):
"""Parse task collection task factory object, and return task list.
:param dict factory: A loaded JSON task factory object.
"""
return factory.tasks | 1d2c0b2a763b9e3c78b4bea8d012575038a5804c | 28,999 |
from textwrap import dedent
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return "" | ce9d89bbba7ef6784e707fcae0ea6b127ee3cdcd | 29,003 |
def _split_path(loc):
""" Split S3 path into bucket and prefix strings """
bucket = loc.split("s3://")[1].split("/")[0]
prefix = "/".join(loc.split("s3://")[1].split("/")[1:])
return bucket, prefix | 46ace1084e7688847d60fe34e4b3958c89cfbd31 | 29,004 |
def GetStepStartAndEndTime(build, full_step_name):
"""Gets a step's start_time and end_time from Build.
Returns:
(start_time, end_time)
"""
for step in build.steps or []:
if step.name == full_step_name:
return step.start_time.ToDatetime(), step.end_time.ToDatetime()
return None, None | ef3d2a017ad6aa1e0b5c9c974a64d715ae62d1c1 | 29,008 |
def compute_pascal(n):
"""
Compute the nth row of Pascal’s triangle
Parameters
----------
n : integer
which row too compute
Returns
-------
pascal_n : a list of integers
The nth row of Pascal’s triangle.
"""
pascal_n = [1]
prev = 1
for k in range(1,n+1):
cur = prev * (n+1-k)/k
pascal_n.append(int(cur))
prev = cur
return(pascal_n) | 54b2d5ca80412d2da4da4e9f09dff25026205d3d | 29,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.